linux/mm/page_alloc.c
<<
>>
Prefs
   1/*
   2 *  linux/mm/page_alloc.c
   3 *
   4 *  Manages the free list, the system allocates free pages here.
   5 *  Note that kmalloc() lives in slab.c
   6 *
   7 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   8 *  Swap reorganised 29.12.95, Stephen Tweedie
   9 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  10 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
  11 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
  12 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
  13 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
  14 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
  15 */
  16
  17#include <linux/stddef.h>
  18#include <linux/mm.h>
  19#include <linux/swap.h>
  20#include <linux/interrupt.h>
  21#include <linux/pagemap.h>
  22#include <linux/jiffies.h>
  23#include <linux/bootmem.h>
  24#include <linux/memblock.h>
  25#include <linux/compiler.h>
  26#include <linux/kernel.h>
  27#include <linux/kmemcheck.h>
  28#include <linux/kasan.h>
  29#include <linux/module.h>
  30#include <linux/suspend.h>
  31#include <linux/pagevec.h>
  32#include <linux/blkdev.h>
  33#include <linux/slab.h>
  34#include <linux/ratelimit.h>
  35#include <linux/oom.h>
  36#include <linux/notifier.h>
  37#include <linux/topology.h>
  38#include <linux/sysctl.h>
  39#include <linux/cpu.h>
  40#include <linux/cpuset.h>
  41#include <linux/memory_hotplug.h>
  42#include <linux/nodemask.h>
  43#include <linux/vmalloc.h>
  44#include <linux/vmstat.h>
  45#include <linux/mempolicy.h>
  46#include <linux/memremap.h>
  47#include <linux/stop_machine.h>
  48#include <linux/sort.h>
  49#include <linux/pfn.h>
  50#include <linux/backing-dev.h>
  51#include <linux/fault-inject.h>
  52#include <linux/page-isolation.h>
  53#include <linux/page_ext.h>
  54#include <linux/debugobjects.h>
  55#include <linux/kmemleak.h>
  56#include <linux/compaction.h>
  57#include <trace/events/kmem.h>
  58#include <trace/events/oom.h>
  59#include <linux/prefetch.h>
  60#include <linux/mm_inline.h>
  61#include <linux/migrate.h>
  62#include <linux/hugetlb.h>
  63#include <linux/sched/rt.h>
  64#include <linux/sched/mm.h>
  65#include <linux/page_owner.h>
  66#include <linux/kthread.h>
  67#include <linux/memcontrol.h>
  68#include <linux/ftrace.h>
  69#include <linux/nmi.h>
  70
  71#include <asm/sections.h>
  72#include <asm/tlbflush.h>
  73#include <asm/div64.h>
  74#include "internal.h"
  75
  76/* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
  77static DEFINE_MUTEX(pcp_batch_high_lock);
  78#define MIN_PERCPU_PAGELIST_FRACTION    (8)
  79
  80#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
  81DEFINE_PER_CPU(int, numa_node);
  82EXPORT_PER_CPU_SYMBOL(numa_node);
  83#endif
  84
  85#ifdef CONFIG_HAVE_MEMORYLESS_NODES
  86/*
  87 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
  88 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
  89 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
  90 * defined in <linux/topology.h>.
  91 */
  92DEFINE_PER_CPU(int, _numa_mem_);                /* Kernel "local memory" node */
  93EXPORT_PER_CPU_SYMBOL(_numa_mem_);
  94int _node_numa_mem_[MAX_NUMNODES];
  95#endif
  96
  97/* work_structs for global per-cpu drains */
  98DEFINE_MUTEX(pcpu_drain_mutex);
  99DEFINE_PER_CPU(struct work_struct, pcpu_drain);
 100
 101#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
 102volatile unsigned long latent_entropy __latent_entropy;
 103EXPORT_SYMBOL(latent_entropy);
 104#endif
 105
 106/*
 107 * Array of node states.
 108 */
 109nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
 110        [N_POSSIBLE] = NODE_MASK_ALL,
 111        [N_ONLINE] = { { [0] = 1UL } },
 112#ifndef CONFIG_NUMA
 113        [N_NORMAL_MEMORY] = { { [0] = 1UL } },
 114#ifdef CONFIG_HIGHMEM
 115        [N_HIGH_MEMORY] = { { [0] = 1UL } },
 116#endif
 117        [N_MEMORY] = { { [0] = 1UL } },
 118        [N_CPU] = { { [0] = 1UL } },
 119#endif  /* NUMA */
 120};
 121EXPORT_SYMBOL(node_states);
 122
 123/* Protect totalram_pages and zone->managed_pages */
 124static DEFINE_SPINLOCK(managed_page_count_lock);
 125
 126unsigned long totalram_pages __read_mostly;
 127unsigned long totalreserve_pages __read_mostly;
 128unsigned long totalcma_pages __read_mostly;
 129
 130int percpu_pagelist_fraction;
 131gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
 132
 133/*
 134 * A cached value of the page's pageblock's migratetype, used when the page is
 135 * put on a pcplist. Used to avoid the pageblock migratetype lookup when
 136 * freeing from pcplists in most cases, at the cost of possibly becoming stale.
 137 * Also the migratetype set in the page does not necessarily match the pcplist
 138 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
 139 * other index - this ensures that it will be put on the correct CMA freelist.
 140 */
 141static inline int get_pcppage_migratetype(struct page *page)
 142{
 143        return page->index;
 144}
 145
 146static inline void set_pcppage_migratetype(struct page *page, int migratetype)
 147{
 148        page->index = migratetype;
 149}
 150
 151#ifdef CONFIG_PM_SLEEP
 152/*
 153 * The following functions are used by the suspend/hibernate code to temporarily
 154 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
 155 * while devices are suspended.  To avoid races with the suspend/hibernate code,
 156 * they should always be called with pm_mutex held (gfp_allowed_mask also should
 157 * only be modified with pm_mutex held, unless the suspend/hibernate code is
 158 * guaranteed not to run in parallel with that modification).
 159 */
 160
 161static gfp_t saved_gfp_mask;
 162
 163void pm_restore_gfp_mask(void)
 164{
 165        WARN_ON(!mutex_is_locked(&pm_mutex));
 166        if (saved_gfp_mask) {
 167                gfp_allowed_mask = saved_gfp_mask;
 168                saved_gfp_mask = 0;
 169        }
 170}
 171
 172void pm_restrict_gfp_mask(void)
 173{
 174        WARN_ON(!mutex_is_locked(&pm_mutex));
 175        WARN_ON(saved_gfp_mask);
 176        saved_gfp_mask = gfp_allowed_mask;
 177        gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
 178}
 179
 180bool pm_suspended_storage(void)
 181{
 182        if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
 183                return false;
 184        return true;
 185}
 186#endif /* CONFIG_PM_SLEEP */
 187
 188#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
 189unsigned int pageblock_order __read_mostly;
 190#endif
 191
 192static void __free_pages_ok(struct page *page, unsigned int order);
 193
 194/*
 195 * results with 256, 32 in the lowmem_reserve sysctl:
 196 *      1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
 197 *      1G machine -> (16M dma, 784M normal, 224M high)
 198 *      NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
 199 *      HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
 200 *      HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
 201 *
 202 * TBD: should special case ZONE_DMA32 machines here - in those we normally
 203 * don't need any ZONE_NORMAL reservation
 204 */
 205int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
 206#ifdef CONFIG_ZONE_DMA
 207         256,
 208#endif
 209#ifdef CONFIG_ZONE_DMA32
 210         256,
 211#endif
 212#ifdef CONFIG_HIGHMEM
 213         32,
 214#endif
 215         32,
 216};
 217
 218EXPORT_SYMBOL(totalram_pages);
 219
 220static char * const zone_names[MAX_NR_ZONES] = {
 221#ifdef CONFIG_ZONE_DMA
 222         "DMA",
 223#endif
 224#ifdef CONFIG_ZONE_DMA32
 225         "DMA32",
 226#endif
 227         "Normal",
 228#ifdef CONFIG_HIGHMEM
 229         "HighMem",
 230#endif
 231         "Movable",
 232#ifdef CONFIG_ZONE_DEVICE
 233         "Device",
 234#endif
 235};
 236
 237char * const migratetype_names[MIGRATE_TYPES] = {
 238        "Unmovable",
 239        "Movable",
 240        "Reclaimable",
 241        "HighAtomic",
 242#ifdef CONFIG_CMA
 243        "CMA",
 244#endif
 245#ifdef CONFIG_MEMORY_ISOLATION
 246        "Isolate",
 247#endif
 248};
 249
 250compound_page_dtor * const compound_page_dtors[] = {
 251        NULL,
 252        free_compound_page,
 253#ifdef CONFIG_HUGETLB_PAGE
 254        free_huge_page,
 255#endif
 256#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 257        free_transhuge_page,
 258#endif
 259};
 260
 261int min_free_kbytes = 1024;
 262int user_min_free_kbytes = -1;
 263int watermark_scale_factor = 10;
 264
 265static unsigned long __meminitdata nr_kernel_pages;
 266static unsigned long __meminitdata nr_all_pages;
 267static unsigned long __meminitdata dma_reserve;
 268
 269#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 270static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
 271static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
 272static unsigned long __initdata required_kernelcore;
 273static unsigned long __initdata required_movablecore;
 274static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
 275static bool mirrored_kernelcore;
 276
 277/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
 278int movable_zone;
 279EXPORT_SYMBOL(movable_zone);
 280#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
 281
 282#if MAX_NUMNODES > 1
 283int nr_node_ids __read_mostly = MAX_NUMNODES;
 284int nr_online_nodes __read_mostly = 1;
 285EXPORT_SYMBOL(nr_node_ids);
 286EXPORT_SYMBOL(nr_online_nodes);
 287#endif
 288
 289int page_group_by_mobility_disabled __read_mostly;
 290
 291#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
 292static inline void reset_deferred_meminit(pg_data_t *pgdat)
 293{
 294        unsigned long max_initialise;
 295        unsigned long reserved_lowmem;
 296
 297        /*
 298         * Initialise at least 2G of a node but also take into account that
 299         * two large system hashes that can take up 1GB for 0.25TB/node.
 300         */
 301        max_initialise = max(2UL << (30 - PAGE_SHIFT),
 302                (pgdat->node_spanned_pages >> 8));
 303
 304        /*
 305         * Compensate the all the memblock reservations (e.g. crash kernel)
 306         * from the initial estimation to make sure we will initialize enough
 307         * memory to boot.
 308         */
 309        reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn,
 310                        pgdat->node_start_pfn + max_initialise);
 311        max_initialise += reserved_lowmem;
 312
 313        pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages);
 314        pgdat->first_deferred_pfn = ULONG_MAX;
 315}
 316
 317/* Returns true if the struct page for the pfn is uninitialised */
 318static inline bool __meminit early_page_uninitialised(unsigned long pfn)
 319{
 320        int nid = early_pfn_to_nid(pfn);
 321
 322        if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
 323                return true;
 324
 325        return false;
 326}
 327
 328/*
 329 * Returns false when the remaining initialisation should be deferred until
 330 * later in the boot cycle when it can be parallelised.
 331 */
 332static inline bool update_defer_init(pg_data_t *pgdat,
 333                                unsigned long pfn, unsigned long zone_end,
 334                                unsigned long *nr_initialised)
 335{
 336        /* Always populate low zones for address-contrained allocations */
 337        if (zone_end < pgdat_end_pfn(pgdat))
 338                return true;
 339        (*nr_initialised)++;
 340        if ((*nr_initialised > pgdat->static_init_size) &&
 341            (pfn & (PAGES_PER_SECTION - 1)) == 0) {
 342                pgdat->first_deferred_pfn = pfn;
 343                return false;
 344        }
 345
 346        return true;
 347}
 348#else
 349static inline void reset_deferred_meminit(pg_data_t *pgdat)
 350{
 351}
 352
 353static inline bool early_page_uninitialised(unsigned long pfn)
 354{
 355        return false;
 356}
 357
 358static inline bool update_defer_init(pg_data_t *pgdat,
 359                                unsigned long pfn, unsigned long zone_end,
 360                                unsigned long *nr_initialised)
 361{
 362        return true;
 363}
 364#endif
 365
 366/* Return a pointer to the bitmap storing bits affecting a block of pages */
 367static inline unsigned long *get_pageblock_bitmap(struct page *page,
 368                                                        unsigned long pfn)
 369{
 370#ifdef CONFIG_SPARSEMEM
 371        return __pfn_to_section(pfn)->pageblock_flags;
 372#else
 373        return page_zone(page)->pageblock_flags;
 374#endif /* CONFIG_SPARSEMEM */
 375}
 376
 377static inline int pfn_to_bitidx(struct page *page, unsigned long pfn)
 378{
 379#ifdef CONFIG_SPARSEMEM
 380        pfn &= (PAGES_PER_SECTION-1);
 381        return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
 382#else
 383        pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
 384        return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
 385#endif /* CONFIG_SPARSEMEM */
 386}
 387
 388/**
 389 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
 390 * @page: The page within the block of interest
 391 * @pfn: The target page frame number
 392 * @end_bitidx: The last bit of interest to retrieve
 393 * @mask: mask of bits that the caller is interested in
 394 *
 395 * Return: pageblock_bits flags
 396 */
 397static __always_inline unsigned long __get_pfnblock_flags_mask(struct page *page,
 398                                        unsigned long pfn,
 399                                        unsigned long end_bitidx,
 400                                        unsigned long mask)
 401{
 402        unsigned long *bitmap;
 403        unsigned long bitidx, word_bitidx;
 404        unsigned long word;
 405
 406        bitmap = get_pageblock_bitmap(page, pfn);
 407        bitidx = pfn_to_bitidx(page, pfn);
 408        word_bitidx = bitidx / BITS_PER_LONG;
 409        bitidx &= (BITS_PER_LONG-1);
 410
 411        word = bitmap[word_bitidx];
 412        bitidx += end_bitidx;
 413        return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
 414}
 415
 416unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
 417                                        unsigned long end_bitidx,
 418                                        unsigned long mask)
 419{
 420        return __get_pfnblock_flags_mask(page, pfn, end_bitidx, mask);
 421}
 422
 423static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
 424{
 425        return __get_pfnblock_flags_mask(page, pfn, PB_migrate_end, MIGRATETYPE_MASK);
 426}
 427
 428/**
 429 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
 430 * @page: The page within the block of interest
 431 * @flags: The flags to set
 432 * @pfn: The target page frame number
 433 * @end_bitidx: The last bit of interest
 434 * @mask: mask of bits that the caller is interested in
 435 */
 436void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
 437                                        unsigned long pfn,
 438                                        unsigned long end_bitidx,
 439                                        unsigned long mask)
 440{
 441        unsigned long *bitmap;
 442        unsigned long bitidx, word_bitidx;
 443        unsigned long old_word, word;
 444
 445        BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
 446
 447        bitmap = get_pageblock_bitmap(page, pfn);
 448        bitidx = pfn_to_bitidx(page, pfn);
 449        word_bitidx = bitidx / BITS_PER_LONG;
 450        bitidx &= (BITS_PER_LONG-1);
 451
 452        VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
 453
 454        bitidx += end_bitidx;
 455        mask <<= (BITS_PER_LONG - bitidx - 1);
 456        flags <<= (BITS_PER_LONG - bitidx - 1);
 457
 458        word = READ_ONCE(bitmap[word_bitidx]);
 459        for (;;) {
 460                old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
 461                if (word == old_word)
 462                        break;
 463                word = old_word;
 464        }
 465}
 466
 467void set_pageblock_migratetype(struct page *page, int migratetype)
 468{
 469        if (unlikely(page_group_by_mobility_disabled &&
 470                     migratetype < MIGRATE_PCPTYPES))
 471                migratetype = MIGRATE_UNMOVABLE;
 472
 473        set_pageblock_flags_group(page, (unsigned long)migratetype,
 474                                        PB_migrate, PB_migrate_end);
 475}
 476
 477#ifdef CONFIG_DEBUG_VM
 478static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
 479{
 480        int ret = 0;
 481        unsigned seq;
 482        unsigned long pfn = page_to_pfn(page);
 483        unsigned long sp, start_pfn;
 484
 485        do {
 486                seq = zone_span_seqbegin(zone);
 487                start_pfn = zone->zone_start_pfn;
 488                sp = zone->spanned_pages;
 489                if (!zone_spans_pfn(zone, pfn))
 490                        ret = 1;
 491        } while (zone_span_seqretry(zone, seq));
 492
 493        if (ret)
 494                pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
 495                        pfn, zone_to_nid(zone), zone->name,
 496                        start_pfn, start_pfn + sp);
 497
 498        return ret;
 499}
 500
 501static int page_is_consistent(struct zone *zone, struct page *page)
 502{
 503        if (!pfn_valid_within(page_to_pfn(page)))
 504                return 0;
 505        if (zone != page_zone(page))
 506                return 0;
 507
 508        return 1;
 509}
 510/*
 511 * Temporary debugging check for pages not lying within a given zone.
 512 */
 513static int __maybe_unused bad_range(struct zone *zone, struct page *page)
 514{
 515        if (page_outside_zone_boundaries(zone, page))
 516                return 1;
 517        if (!page_is_consistent(zone, page))
 518                return 1;
 519
 520        return 0;
 521}
 522#else
 523static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
 524{
 525        return 0;
 526}
 527#endif
 528
 529static void bad_page(struct page *page, const char *reason,
 530                unsigned long bad_flags)
 531{
 532        static unsigned long resume;
 533        static unsigned long nr_shown;
 534        static unsigned long nr_unshown;
 535
 536        /*
 537         * Allow a burst of 60 reports, then keep quiet for that minute;
 538         * or allow a steady drip of one report per second.
 539         */
 540        if (nr_shown == 60) {
 541                if (time_before(jiffies, resume)) {
 542                        nr_unshown++;
 543                        goto out;
 544                }
 545                if (nr_unshown) {
 546                        pr_alert(
 547                              "BUG: Bad page state: %lu messages suppressed\n",
 548                                nr_unshown);
 549                        nr_unshown = 0;
 550                }
 551                nr_shown = 0;
 552        }
 553        if (nr_shown++ == 0)
 554                resume = jiffies + 60 * HZ;
 555
 556        pr_alert("BUG: Bad page state in process %s  pfn:%05lx\n",
 557                current->comm, page_to_pfn(page));
 558        __dump_page(page, reason);
 559        bad_flags &= page->flags;
 560        if (bad_flags)
 561                pr_alert("bad because of flags: %#lx(%pGp)\n",
 562                                                bad_flags, &bad_flags);
 563        dump_page_owner(page);
 564
 565        print_modules();
 566        dump_stack();
 567out:
 568        /* Leave bad fields for debug, except PageBuddy could make trouble */
 569        page_mapcount_reset(page); /* remove PageBuddy */
 570        add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
 571}
 572
 573/*
 574 * Higher-order pages are called "compound pages".  They are structured thusly:
 575 *
 576 * The first PAGE_SIZE page is called the "head page" and have PG_head set.
 577 *
 578 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
 579 * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
 580 *
 581 * The first tail page's ->compound_dtor holds the offset in array of compound
 582 * page destructors. See compound_page_dtors.
 583 *
 584 * The first tail page's ->compound_order holds the order of allocation.
 585 * This usage means that zero-order pages may not be compound.
 586 */
 587
 588void free_compound_page(struct page *page)
 589{
 590        __free_pages_ok(page, compound_order(page));
 591}
 592
 593void prep_compound_page(struct page *page, unsigned int order)
 594{
 595        int i;
 596        int nr_pages = 1 << order;
 597
 598        set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
 599        set_compound_order(page, order);
 600        __SetPageHead(page);
 601        for (i = 1; i < nr_pages; i++) {
 602                struct page *p = page + i;
 603                set_page_count(p, 0);
 604                p->mapping = TAIL_MAPPING;
 605                set_compound_head(p, page);
 606        }
 607        atomic_set(compound_mapcount_ptr(page), -1);
 608}
 609
 610#ifdef CONFIG_DEBUG_PAGEALLOC
 611unsigned int _debug_guardpage_minorder;
 612bool _debug_pagealloc_enabled __read_mostly
 613                        = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
 614EXPORT_SYMBOL(_debug_pagealloc_enabled);
 615bool _debug_guardpage_enabled __read_mostly;
 616
 617static int __init early_debug_pagealloc(char *buf)
 618{
 619        if (!buf)
 620                return -EINVAL;
 621        return kstrtobool(buf, &_debug_pagealloc_enabled);
 622}
 623early_param("debug_pagealloc", early_debug_pagealloc);
 624
 625static bool need_debug_guardpage(void)
 626{
 627        /* If we don't use debug_pagealloc, we don't need guard page */
 628        if (!debug_pagealloc_enabled())
 629                return false;
 630
 631        if (!debug_guardpage_minorder())
 632                return false;
 633
 634        return true;
 635}
 636
 637static void init_debug_guardpage(void)
 638{
 639        if (!debug_pagealloc_enabled())
 640                return;
 641
 642        if (!debug_guardpage_minorder())
 643                return;
 644
 645        _debug_guardpage_enabled = true;
 646}
 647
 648struct page_ext_operations debug_guardpage_ops = {
 649        .need = need_debug_guardpage,
 650        .init = init_debug_guardpage,
 651};
 652
 653static int __init debug_guardpage_minorder_setup(char *buf)
 654{
 655        unsigned long res;
 656
 657        if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
 658                pr_err("Bad debug_guardpage_minorder value\n");
 659                return 0;
 660        }
 661        _debug_guardpage_minorder = res;
 662        pr_info("Setting debug_guardpage_minorder to %lu\n", res);
 663        return 0;
 664}
 665early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
 666
 667static inline bool set_page_guard(struct zone *zone, struct page *page,
 668                                unsigned int order, int migratetype)
 669{
 670        struct page_ext *page_ext;
 671
 672        if (!debug_guardpage_enabled())
 673                return false;
 674
 675        if (order >= debug_guardpage_minorder())
 676                return false;
 677
 678        page_ext = lookup_page_ext(page);
 679        if (unlikely(!page_ext))
 680                return false;
 681
 682        __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
 683
 684        INIT_LIST_HEAD(&page->lru);
 685        set_page_private(page, order);
 686        /* Guard pages are not available for any usage */
 687        __mod_zone_freepage_state(zone, -(1 << order), migratetype);
 688
 689        return true;
 690}
 691
 692static inline void clear_page_guard(struct zone *zone, struct page *page,
 693                                unsigned int order, int migratetype)
 694{
 695        struct page_ext *page_ext;
 696
 697        if (!debug_guardpage_enabled())
 698                return;
 699
 700        page_ext = lookup_page_ext(page);
 701        if (unlikely(!page_ext))
 702                return;
 703
 704        __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
 705
 706        set_page_private(page, 0);
 707        if (!is_migrate_isolate(migratetype))
 708                __mod_zone_freepage_state(zone, (1 << order), migratetype);
 709}
 710#else
 711struct page_ext_operations debug_guardpage_ops;
 712static inline bool set_page_guard(struct zone *zone, struct page *page,
 713                        unsigned int order, int migratetype) { return false; }
 714static inline void clear_page_guard(struct zone *zone, struct page *page,
 715                                unsigned int order, int migratetype) {}
 716#endif
 717
 718static inline void set_page_order(struct page *page, unsigned int order)
 719{
 720        set_page_private(page, order);
 721        __SetPageBuddy(page);
 722}
 723
 724static inline void rmv_page_order(struct page *page)
 725{
 726        __ClearPageBuddy(page);
 727        set_page_private(page, 0);
 728}
 729
 730/*
 731 * This function checks whether a page is free && is the buddy
 732 * we can do coalesce a page and its buddy if
 733 * (a) the buddy is not in a hole (check before calling!) &&
 734 * (b) the buddy is in the buddy system &&
 735 * (c) a page and its buddy have the same order &&
 736 * (d) a page and its buddy are in the same zone.
 737 *
 738 * For recording whether a page is in the buddy system, we set ->_mapcount
 739 * PAGE_BUDDY_MAPCOUNT_VALUE.
 740 * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
 741 * serialized by zone->lock.
 742 *
 743 * For recording page's order, we use page_private(page).
 744 */
 745static inline int page_is_buddy(struct page *page, struct page *buddy,
 746                                                        unsigned int order)
 747{
 748        if (page_is_guard(buddy) && page_order(buddy) == order) {
 749                if (page_zone_id(page) != page_zone_id(buddy))
 750                        return 0;
 751
 752                VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
 753
 754                return 1;
 755        }
 756
 757        if (PageBuddy(buddy) && page_order(buddy) == order) {
 758                /*
 759                 * zone check is done late to avoid uselessly
 760                 * calculating zone/node ids for pages that could
 761                 * never merge.
 762                 */
 763                if (page_zone_id(page) != page_zone_id(buddy))
 764                        return 0;
 765
 766                VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
 767
 768                return 1;
 769        }
 770        return 0;
 771}
 772
 773/*
 774 * Freeing function for a buddy system allocator.
 775 *
 776 * The concept of a buddy system is to maintain direct-mapped table
 777 * (containing bit values) for memory blocks of various "orders".
 778 * The bottom level table contains the map for the smallest allocatable
 779 * units of memory (here, pages), and each level above it describes
 780 * pairs of units from the levels below, hence, "buddies".
 781 * At a high level, all that happens here is marking the table entry
 782 * at the bottom level available, and propagating the changes upward
 783 * as necessary, plus some accounting needed to play nicely with other
 784 * parts of the VM system.
 785 * At each level, we keep a list of pages, which are heads of continuous
 786 * free pages of length of (1 << order) and marked with _mapcount
 787 * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
 788 * field.
 789 * So when we are allocating or freeing one, we can derive the state of the
 790 * other.  That is, if we allocate a small block, and both were
 791 * free, the remainder of the region must be split into blocks.
 792 * If a block is freed, and its buddy is also free, then this
 793 * triggers coalescing into a block of larger size.
 794 *
 795 * -- nyc
 796 */
 797
 798static inline void __free_one_page(struct page *page,
 799                unsigned long pfn,
 800                struct zone *zone, unsigned int order,
 801                int migratetype)
 802{
 803        unsigned long combined_pfn;
 804        unsigned long uninitialized_var(buddy_pfn);
 805        struct page *buddy;
 806        unsigned int max_order;
 807
 808        max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
 809
 810        VM_BUG_ON(!zone_is_initialized(zone));
 811        VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
 812
 813        VM_BUG_ON(migratetype == -1);
 814        if (likely(!is_migrate_isolate(migratetype)))
 815                __mod_zone_freepage_state(zone, 1 << order, migratetype);
 816
 817        VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
 818        VM_BUG_ON_PAGE(bad_range(zone, page), page);
 819
 820continue_merging:
 821        while (order < max_order - 1) {
 822                buddy_pfn = __find_buddy_pfn(pfn, order);
 823                buddy = page + (buddy_pfn - pfn);
 824
 825                if (!pfn_valid_within(buddy_pfn))
 826                        goto done_merging;
 827                if (!page_is_buddy(page, buddy, order))
 828                        goto done_merging;
 829                /*
 830                 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
 831                 * merge with it and move up one order.
 832                 */
 833                if (page_is_guard(buddy)) {
 834                        clear_page_guard(zone, buddy, order, migratetype);
 835                } else {
 836                        list_del(&buddy->lru);
 837                        zone->free_area[order].nr_free--;
 838                        rmv_page_order(buddy);
 839                }
 840                combined_pfn = buddy_pfn & pfn;
 841                page = page + (combined_pfn - pfn);
 842                pfn = combined_pfn;
 843                order++;
 844        }
 845        if (max_order < MAX_ORDER) {
 846                /* If we are here, it means order is >= pageblock_order.
 847                 * We want to prevent merge between freepages on isolate
 848                 * pageblock and normal pageblock. Without this, pageblock
 849                 * isolation could cause incorrect freepage or CMA accounting.
 850                 *
 851                 * We don't want to hit this code for the more frequent
 852                 * low-order merging.
 853                 */
 854                if (unlikely(has_isolate_pageblock(zone))) {
 855                        int buddy_mt;
 856
 857                        buddy_pfn = __find_buddy_pfn(pfn, order);
 858                        buddy = page + (buddy_pfn - pfn);
 859                        buddy_mt = get_pageblock_migratetype(buddy);
 860
 861                        if (migratetype != buddy_mt
 862                                        && (is_migrate_isolate(migratetype) ||
 863                                                is_migrate_isolate(buddy_mt)))
 864                                goto done_merging;
 865                }
 866                max_order++;
 867                goto continue_merging;
 868        }
 869
 870done_merging:
 871        set_page_order(page, order);
 872
 873        /*
 874         * If this is not the largest possible page, check if the buddy
 875         * of the next-highest order is free. If it is, it's possible
 876         * that pages are being freed that will coalesce soon. In case,
 877         * that is happening, add the free page to the tail of the list
 878         * so it's less likely to be used soon and more likely to be merged
 879         * as a higher order page
 880         */
 881        if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn)) {
 882                struct page *higher_page, *higher_buddy;
 883                combined_pfn = buddy_pfn & pfn;
 884                higher_page = page + (combined_pfn - pfn);
 885                buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
 886                higher_buddy = higher_page + (buddy_pfn - combined_pfn);
 887                if (pfn_valid_within(buddy_pfn) &&
 888                    page_is_buddy(higher_page, higher_buddy, order + 1)) {
 889                        list_add_tail(&page->lru,
 890                                &zone->free_area[order].free_list[migratetype]);
 891                        goto out;
 892                }
 893        }
 894
 895        list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
 896out:
 897        zone->free_area[order].nr_free++;
 898}
 899
 900/*
 901 * A bad page could be due to a number of fields. Instead of multiple branches,
 902 * try and check multiple fields with one check. The caller must do a detailed
 903 * check if necessary.
 904 */
 905static inline bool page_expected_state(struct page *page,
 906                                        unsigned long check_flags)
 907{
 908        if (unlikely(atomic_read(&page->_mapcount) != -1))
 909                return false;
 910
 911        if (unlikely((unsigned long)page->mapping |
 912                        page_ref_count(page) |
 913#ifdef CONFIG_MEMCG
 914                        (unsigned long)page->mem_cgroup |
 915#endif
 916                        (page->flags & check_flags)))
 917                return false;
 918
 919        return true;
 920}
 921
 922static void free_pages_check_bad(struct page *page)
 923{
 924        const char *bad_reason;
 925        unsigned long bad_flags;
 926
 927        bad_reason = NULL;
 928        bad_flags = 0;
 929
 930        if (unlikely(atomic_read(&page->_mapcount) != -1))
 931                bad_reason = "nonzero mapcount";
 932        if (unlikely(page->mapping != NULL))
 933                bad_reason = "non-NULL mapping";
 934        if (unlikely(page_ref_count(page) != 0))
 935                bad_reason = "nonzero _refcount";
 936        if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
 937                bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
 938                bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
 939        }
 940#ifdef CONFIG_MEMCG
 941        if (unlikely(page->mem_cgroup))
 942                bad_reason = "page still charged to cgroup";
 943#endif
 944        bad_page(page, bad_reason, bad_flags);
 945}
 946
 947static inline int free_pages_check(struct page *page)
 948{
 949        if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
 950                return 0;
 951
 952        /* Something has gone sideways, find it */
 953        free_pages_check_bad(page);
 954        return 1;
 955}
 956
 957static int free_tail_pages_check(struct page *head_page, struct page *page)
 958{
 959        int ret = 1;
 960
 961        /*
 962         * We rely page->lru.next never has bit 0 set, unless the page
 963         * is PageTail(). Let's make sure that's true even for poisoned ->lru.
 964         */
 965        BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
 966
 967        if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
 968                ret = 0;
 969                goto out;
 970        }
 971        switch (page - head_page) {
 972        case 1:
 973                /* the first tail page: ->mapping is compound_mapcount() */
 974                if (unlikely(compound_mapcount(page))) {
 975                        bad_page(page, "nonzero compound_mapcount", 0);
 976                        goto out;
 977                }
 978                break;
 979        case 2:
 980                /*
 981                 * the second tail page: ->mapping is
 982                 * page_deferred_list().next -- ignore value.
 983                 */
 984                break;
 985        default:
 986                if (page->mapping != TAIL_MAPPING) {
 987                        bad_page(page, "corrupted mapping in tail page", 0);
 988                        goto out;
 989                }
 990                break;
 991        }
 992        if (unlikely(!PageTail(page))) {
 993                bad_page(page, "PageTail not set", 0);
 994                goto out;
 995        }
 996        if (unlikely(compound_head(page) != head_page)) {
 997                bad_page(page, "compound_head not consistent", 0);
 998                goto out;
 999        }
1000        ret = 0;
1001out:
1002        page->mapping = NULL;
1003        clear_compound_head(page);
1004        return ret;
1005}
1006
1007static __always_inline bool free_pages_prepare(struct page *page,
1008                                        unsigned int order, bool check_free)
1009{
1010        int bad = 0;
1011
1012        VM_BUG_ON_PAGE(PageTail(page), page);
1013
1014        trace_mm_page_free(page, order);
1015        kmemcheck_free_shadow(page, order);
1016
1017        /*
1018         * Check tail pages before head page information is cleared to
1019         * avoid checking PageCompound for order-0 pages.
1020         */
1021        if (unlikely(order)) {
1022                bool compound = PageCompound(page);
1023                int i;
1024
1025                VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1026
1027                if (compound)
1028                        ClearPageDoubleMap(page);
1029                for (i = 1; i < (1 << order); i++) {
1030                        if (compound)
1031                                bad += free_tail_pages_check(page, page + i);
1032                        if (unlikely(free_pages_check(page + i))) {
1033                                bad++;
1034                                continue;
1035                        }
1036                        (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1037                }
1038        }
1039        if (PageMappingFlags(page))
1040                page->mapping = NULL;
1041        if (memcg_kmem_enabled() && PageKmemcg(page))
1042                memcg_kmem_uncharge(page, order);
1043        if (check_free)
1044                bad += free_pages_check(page);
1045        if (bad)
1046                return false;
1047
1048        page_cpupid_reset_last(page);
1049        page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1050        reset_page_owner(page, order);
1051
1052        if (!PageHighMem(page)) {
1053                debug_check_no_locks_freed(page_address(page),
1054                                           PAGE_SIZE << order);
1055                debug_check_no_obj_freed(page_address(page),
1056                                           PAGE_SIZE << order);
1057        }
1058        arch_free_page(page, order);
1059        kernel_poison_pages(page, 1 << order, 0);
1060        kernel_map_pages(page, 1 << order, 0);
1061        kasan_free_pages(page, order);
1062
1063        return true;
1064}
1065
1066#ifdef CONFIG_DEBUG_VM
1067static inline bool free_pcp_prepare(struct page *page)
1068{
1069        return free_pages_prepare(page, 0, true);
1070}
1071
1072static inline bool bulkfree_pcp_prepare(struct page *page)
1073{
1074        return false;
1075}
1076#else
1077static bool free_pcp_prepare(struct page *page)
1078{
1079        return free_pages_prepare(page, 0, false);
1080}
1081
1082static bool bulkfree_pcp_prepare(struct page *page)
1083{
1084        return free_pages_check(page);
1085}
1086#endif /* CONFIG_DEBUG_VM */
1087
1088/*
1089 * Frees a number of pages from the PCP lists
1090 * Assumes all pages on list are in same zone, and of same order.
1091 * count is the number of pages to free.
1092 *
1093 * If the zone was previously in an "all pages pinned" state then look to
1094 * see if this freeing clears that state.
1095 *
1096 * And clear the zone's pages_scanned counter, to hold off the "all pages are
1097 * pinned" detection logic.
1098 */
1099static void free_pcppages_bulk(struct zone *zone, int count,
1100                                        struct per_cpu_pages *pcp)
1101{
1102        int migratetype = 0;
1103        int batch_free = 0;
1104        bool isolated_pageblocks;
1105
1106        spin_lock(&zone->lock);
1107        isolated_pageblocks = has_isolate_pageblock(zone);
1108
1109        while (count) {
1110                struct page *page;
1111                struct list_head *list;
1112
1113                /*
1114                 * Remove pages from lists in a round-robin fashion. A
1115                 * batch_free count is maintained that is incremented when an
1116                 * empty list is encountered.  This is so more pages are freed
1117                 * off fuller lists instead of spinning excessively around empty
1118                 * lists
1119                 */
1120                do {
1121                        batch_free++;
1122                        if (++migratetype == MIGRATE_PCPTYPES)
1123                                migratetype = 0;
1124                        list = &pcp->lists[migratetype];
1125                } while (list_empty(list));
1126
1127                /* This is the only non-empty list. Free them all. */
1128                if (batch_free == MIGRATE_PCPTYPES)
1129                        batch_free = count;
1130
1131                do {
1132                        int mt; /* migratetype of the to-be-freed page */
1133
1134                        page = list_last_entry(list, struct page, lru);
1135                        /* must delete as __free_one_page list manipulates */
1136                        list_del(&page->lru);
1137
1138                        mt = get_pcppage_migratetype(page);
1139                        /* MIGRATE_ISOLATE page should not go to pcplists */
1140                        VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1141                        /* Pageblock could have been isolated meanwhile */
1142                        if (unlikely(isolated_pageblocks))
1143                                mt = get_pageblock_migratetype(page);
1144
1145                        if (bulkfree_pcp_prepare(page))
1146                                continue;
1147
1148                        __free_one_page(page, page_to_pfn(page), zone, 0, mt);
1149                        trace_mm_page_pcpu_drain(page, 0, mt);
1150                } while (--count && --batch_free && !list_empty(list));
1151        }
1152        spin_unlock(&zone->lock);
1153}
1154
1155static void free_one_page(struct zone *zone,
1156                                struct page *page, unsigned long pfn,
1157                                unsigned int order,
1158                                int migratetype)
1159{
1160        spin_lock(&zone->lock);
1161        if (unlikely(has_isolate_pageblock(zone) ||
1162                is_migrate_isolate(migratetype))) {
1163                migratetype = get_pfnblock_migratetype(page, pfn);
1164        }
1165        __free_one_page(page, pfn, zone, order, migratetype);
1166        spin_unlock(&zone->lock);
1167}
1168
1169static void __meminit __init_single_page(struct page *page, unsigned long pfn,
1170                                unsigned long zone, int nid)
1171{
1172        set_page_links(page, zone, nid, pfn);
1173        init_page_count(page);
1174        page_mapcount_reset(page);
1175        page_cpupid_reset_last(page);
1176
1177        INIT_LIST_HEAD(&page->lru);
1178#ifdef WANT_PAGE_VIRTUAL
1179        /* The shift won't overflow because ZONE_NORMAL is below 4G. */
1180        if (!is_highmem_idx(zone))
1181                set_page_address(page, __va(pfn << PAGE_SHIFT));
1182#endif
1183}
1184
1185static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
1186                                        int nid)
1187{
1188        return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
1189}
1190
1191#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1192static void init_reserved_page(unsigned long pfn)
1193{
1194        pg_data_t *pgdat;
1195        int nid, zid;
1196
1197        if (!early_page_uninitialised(pfn))
1198                return;
1199
1200        nid = early_pfn_to_nid(pfn);
1201        pgdat = NODE_DATA(nid);
1202
1203        for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1204                struct zone *zone = &pgdat->node_zones[zid];
1205
1206                if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
1207                        break;
1208        }
1209        __init_single_pfn(pfn, zid, nid);
1210}
1211#else
1212static inline void init_reserved_page(unsigned long pfn)
1213{
1214}
1215#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1216
1217/*
1218 * Initialised pages do not have PageReserved set. This function is
1219 * called for each range allocated by the bootmem allocator and
1220 * marks the pages PageReserved. The remaining valid pages are later
1221 * sent to the buddy page allocator.
1222 */
1223void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
1224{
1225        unsigned long start_pfn = PFN_DOWN(start);
1226        unsigned long end_pfn = PFN_UP(end);
1227
1228        for (; start_pfn < end_pfn; start_pfn++) {
1229                if (pfn_valid(start_pfn)) {
1230                        struct page *page = pfn_to_page(start_pfn);
1231
1232                        init_reserved_page(start_pfn);
1233
1234                        /* Avoid false-positive PageTail() */
1235                        INIT_LIST_HEAD(&page->lru);
1236
1237                        SetPageReserved(page);
1238                }
1239        }
1240}
1241
1242static void __free_pages_ok(struct page *page, unsigned int order)
1243{
1244        unsigned long flags;
1245        int migratetype;
1246        unsigned long pfn = page_to_pfn(page);
1247
1248        if (!free_pages_prepare(page, order, true))
1249                return;
1250
1251        migratetype = get_pfnblock_migratetype(page, pfn);
1252        local_irq_save(flags);
1253        __count_vm_events(PGFREE, 1 << order);
1254        free_one_page(page_zone(page), page, pfn, order, migratetype);
1255        local_irq_restore(flags);
1256}
1257
1258static void __init __free_pages_boot_core(struct page *page, unsigned int order)
1259{
1260        unsigned int nr_pages = 1 << order;
1261        struct page *p = page;
1262        unsigned int loop;
1263
1264        prefetchw(p);
1265        for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1266                prefetchw(p + 1);
1267                __ClearPageReserved(p);
1268                set_page_count(p, 0);
1269        }
1270        __ClearPageReserved(p);
1271        set_page_count(p, 0);
1272
1273        page_zone(page)->managed_pages += nr_pages;
1274        set_page_refcounted(page);
1275        __free_pages(page, order);
1276}
1277
1278#if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
1279        defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
1280
1281static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
1282
1283int __meminit early_pfn_to_nid(unsigned long pfn)
1284{
1285        static DEFINE_SPINLOCK(early_pfn_lock);
1286        int nid;
1287
1288        spin_lock(&early_pfn_lock);
1289        nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
1290        if (nid < 0)
1291                nid = first_online_node;
1292        spin_unlock(&early_pfn_lock);
1293
1294        return nid;
1295}
1296#endif
1297
1298#ifdef CONFIG_NODES_SPAN_OTHER_NODES
1299static inline bool __meminit __maybe_unused
1300meminit_pfn_in_nid(unsigned long pfn, int node,
1301                   struct mminit_pfnnid_cache *state)
1302{
1303        int nid;
1304
1305        nid = __early_pfn_to_nid(pfn, state);
1306        if (nid >= 0 && nid != node)
1307                return false;
1308        return true;
1309}
1310
1311/* Only safe to use early in boot when initialisation is single-threaded */
1312static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1313{
1314        return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache);
1315}
1316
1317#else
1318
1319static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1320{
1321        return true;
1322}
1323static inline bool __meminit  __maybe_unused
1324meminit_pfn_in_nid(unsigned long pfn, int node,
1325                   struct mminit_pfnnid_cache *state)
1326{
1327        return true;
1328}
1329#endif
1330
1331
1332void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
1333                                                        unsigned int order)
1334{
1335        if (early_page_uninitialised(pfn))
1336                return;
1337        return __free_pages_boot_core(page, order);
1338}
1339
1340/*
1341 * Check that the whole (or subset of) a pageblock given by the interval of
1342 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1343 * with the migration of free compaction scanner. The scanners then need to
1344 * use only pfn_valid_within() check for arches that allow holes within
1345 * pageblocks.
1346 *
1347 * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1348 *
1349 * It's possible on some configurations to have a setup like node0 node1 node0
1350 * i.e. it's possible that all pages within a zones range of pages do not
1351 * belong to a single zone. We assume that a border between node0 and node1
1352 * can occur within a single pageblock, but not a node0 node1 node0
1353 * interleaving within a single pageblock. It is therefore sufficient to check
1354 * the first and last page of a pageblock and avoid checking each individual
1355 * page in a pageblock.
1356 */
1357struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1358                                     unsigned long end_pfn, struct zone *zone)
1359{
1360        struct page *start_page;
1361        struct page *end_page;
1362
1363        /* end_pfn is one past the range we are checking */
1364        end_pfn--;
1365
1366        if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
1367                return NULL;
1368
1369        start_page = pfn_to_online_page(start_pfn);
1370        if (!start_page)
1371                return NULL;
1372
1373        if (page_zone(start_page) != zone)
1374                return NULL;
1375
1376        end_page = pfn_to_page(end_pfn);
1377
1378        /* This gives a shorter code than deriving page_zone(end_page) */
1379        if (page_zone_id(start_page) != page_zone_id(end_page))
1380                return NULL;
1381
1382        return start_page;
1383}
1384
1385void set_zone_contiguous(struct zone *zone)
1386{
1387        unsigned long block_start_pfn = zone->zone_start_pfn;
1388        unsigned long block_end_pfn;
1389
1390        block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
1391        for (; block_start_pfn < zone_end_pfn(zone);
1392                        block_start_pfn = block_end_pfn,
1393                         block_end_pfn += pageblock_nr_pages) {
1394
1395                block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
1396
1397                if (!__pageblock_pfn_to_page(block_start_pfn,
1398                                             block_end_pfn, zone))
1399                        return;
1400        }
1401
1402        /* We confirm that there is no hole */
1403        zone->contiguous = true;
1404}
1405
1406void clear_zone_contiguous(struct zone *zone)
1407{
1408        zone->contiguous = false;
1409}
1410
1411#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1412static void __init deferred_free_range(struct page *page,
1413                                        unsigned long pfn, int nr_pages)
1414{
1415        int i;
1416
1417        if (!page)
1418                return;
1419
1420        /* Free a large naturally-aligned chunk if possible */
1421        if (nr_pages == pageblock_nr_pages &&
1422            (pfn & (pageblock_nr_pages - 1)) == 0) {
1423                set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1424                __free_pages_boot_core(page, pageblock_order);
1425                return;
1426        }
1427
1428        for (i = 0; i < nr_pages; i++, page++, pfn++) {
1429                if ((pfn & (pageblock_nr_pages - 1)) == 0)
1430                        set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1431                __free_pages_boot_core(page, 0);
1432        }
1433}
1434
1435/* Completion tracking for deferred_init_memmap() threads */
1436static atomic_t pgdat_init_n_undone __initdata;
1437static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1438
1439static inline void __init pgdat_init_report_one_done(void)
1440{
1441        if (atomic_dec_and_test(&pgdat_init_n_undone))
1442                complete(&pgdat_init_all_done_comp);
1443}
1444
1445/* Initialise remaining memory on a node */
1446static int __init deferred_init_memmap(void *data)
1447{
1448        pg_data_t *pgdat = data;
1449        int nid = pgdat->node_id;
1450        struct mminit_pfnnid_cache nid_init_state = { };
1451        unsigned long start = jiffies;
1452        unsigned long nr_pages = 0;
1453        unsigned long walk_start, walk_end;
1454        int i, zid;
1455        struct zone *zone;
1456        unsigned long first_init_pfn = pgdat->first_deferred_pfn;
1457        const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1458
1459        if (first_init_pfn == ULONG_MAX) {
1460                pgdat_init_report_one_done();
1461                return 0;
1462        }
1463
1464        /* Bind memory initialisation thread to a local node if possible */
1465        if (!cpumask_empty(cpumask))
1466                set_cpus_allowed_ptr(current, cpumask);
1467
1468        /* Sanity check boundaries */
1469        BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
1470        BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
1471        pgdat->first_deferred_pfn = ULONG_MAX;
1472
1473        /* Only the highest zone is deferred so find it */
1474        for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1475                zone = pgdat->node_zones + zid;
1476                if (first_init_pfn < zone_end_pfn(zone))
1477                        break;
1478        }
1479
1480        for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) {
1481                unsigned long pfn, end_pfn;
1482                struct page *page = NULL;
1483                struct page *free_base_page = NULL;
1484                unsigned long free_base_pfn = 0;
1485                int nr_to_free = 0;
1486
1487                end_pfn = min(walk_end, zone_end_pfn(zone));
1488                pfn = first_init_pfn;
1489                if (pfn < walk_start)
1490                        pfn = walk_start;
1491                if (pfn < zone->zone_start_pfn)
1492                        pfn = zone->zone_start_pfn;
1493
1494                for (; pfn < end_pfn; pfn++) {
1495                        if (!pfn_valid_within(pfn))
1496                                goto free_range;
1497
1498                        /*
1499                         * Ensure pfn_valid is checked every
1500                         * pageblock_nr_pages for memory holes
1501                         */
1502                        if ((pfn & (pageblock_nr_pages - 1)) == 0) {
1503                                if (!pfn_valid(pfn)) {
1504                                        page = NULL;
1505                                        goto free_range;
1506                                }
1507                        }
1508
1509                        if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) {
1510                                page = NULL;
1511                                goto free_range;
1512                        }
1513
1514                        /* Minimise pfn page lookups and scheduler checks */
1515                        if (page && (pfn & (pageblock_nr_pages - 1)) != 0) {
1516                                page++;
1517                        } else {
1518                                nr_pages += nr_to_free;
1519                                deferred_free_range(free_base_page,
1520                                                free_base_pfn, nr_to_free);
1521                                free_base_page = NULL;
1522                                free_base_pfn = nr_to_free = 0;
1523
1524                                page = pfn_to_page(pfn);
1525                                cond_resched();
1526                        }
1527
1528                        if (page->flags) {
1529                                VM_BUG_ON(page_zone(page) != zone);
1530                                goto free_range;
1531                        }
1532
1533                        __init_single_page(page, pfn, zid, nid);
1534                        if (!free_base_page) {
1535                                free_base_page = page;
1536                                free_base_pfn = pfn;
1537                                nr_to_free = 0;
1538                        }
1539                        nr_to_free++;
1540
1541                        /* Where possible, batch up pages for a single free */
1542                        continue;
1543free_range:
1544                        /* Free the current block of pages to allocator */
1545                        nr_pages += nr_to_free;
1546                        deferred_free_range(free_base_page, free_base_pfn,
1547                                                                nr_to_free);
1548                        free_base_page = NULL;
1549                        free_base_pfn = nr_to_free = 0;
1550                }
1551                /* Free the last block of pages to allocator */
1552                nr_pages += nr_to_free;
1553                deferred_free_range(free_base_page, free_base_pfn, nr_to_free);
1554
1555                first_init_pfn = max(end_pfn, first_init_pfn);
1556        }
1557
1558        /* Sanity check that the next zone really is unpopulated */
1559        WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
1560
1561        pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
1562                                        jiffies_to_msecs(jiffies - start));
1563
1564        pgdat_init_report_one_done();
1565        return 0;
1566}
1567#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1568
1569void __init page_alloc_init_late(void)
1570{
1571        struct zone *zone;
1572
1573#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1574        int nid;
1575
1576        /* There will be num_node_state(N_MEMORY) threads */
1577        atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
1578        for_each_node_state(nid, N_MEMORY) {
1579                kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
1580        }
1581
1582        /* Block until all are initialised */
1583        wait_for_completion(&pgdat_init_all_done_comp);
1584
1585        /* Reinit limits that are based on free pages after the kernel is up */
1586        files_maxfiles_init();
1587#endif
1588#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
1589        /* Discard memblock private memory */
1590        memblock_discard();
1591#endif
1592
1593        for_each_populated_zone(zone)
1594                set_zone_contiguous(zone);
1595}
1596
1597#ifdef CONFIG_CMA
1598/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
1599void __init init_cma_reserved_pageblock(struct page *page)
1600{
1601        unsigned i = pageblock_nr_pages;
1602        struct page *p = page;
1603
1604        do {
1605                __ClearPageReserved(p);
1606                set_page_count(p, 0);
1607        } while (++p, --i);
1608
1609        set_pageblock_migratetype(page, MIGRATE_CMA);
1610
1611        if (pageblock_order >= MAX_ORDER) {
1612                i = pageblock_nr_pages;
1613                p = page;
1614                do {
1615                        set_page_refcounted(p);
1616                        __free_pages(p, MAX_ORDER - 1);
1617                        p += MAX_ORDER_NR_PAGES;
1618                } while (i -= MAX_ORDER_NR_PAGES);
1619        } else {
1620                set_page_refcounted(page);
1621                __free_pages(page, pageblock_order);
1622        }
1623
1624        adjust_managed_page_count(page, pageblock_nr_pages);
1625}
1626#endif
1627
1628/*
1629 * The order of subdivision here is critical for the IO subsystem.
1630 * Please do not alter this order without good reasons and regression
1631 * testing. Specifically, as large blocks of memory are subdivided,
1632 * the order in which smaller blocks are delivered depends on the order
1633 * they're subdivided in this function. This is the primary factor
1634 * influencing the order in which pages are delivered to the IO
1635 * subsystem according to empirical testing, and this is also justified
1636 * by considering the behavior of a buddy system containing a single
1637 * large block of memory acted on by a series of small allocations.
1638 * This behavior is a critical factor in sglist merging's success.
1639 *
1640 * -- nyc
1641 */
1642static inline void expand(struct zone *zone, struct page *page,
1643        int low, int high, struct free_area *area,
1644        int migratetype)
1645{
1646        unsigned long size = 1 << high;
1647
1648        while (high > low) {
1649                area--;
1650                high--;
1651                size >>= 1;
1652                VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
1653
1654                /*
1655                 * Mark as guard pages (or page), that will allow to
1656                 * merge back to allocator when buddy will be freed.
1657                 * Corresponding page table entries will not be touched,
1658                 * pages will stay not present in virtual address space
1659                 */
1660                if (set_page_guard(zone, &page[size], high, migratetype))
1661                        continue;
1662
1663                list_add(&page[size].lru, &area->free_list[migratetype]);
1664                area->nr_free++;
1665                set_page_order(&page[size], high);
1666        }
1667}
1668
1669static void check_new_page_bad(struct page *page)
1670{
1671        const char *bad_reason = NULL;
1672        unsigned long bad_flags = 0;
1673
1674        if (unlikely(atomic_read(&page->_mapcount) != -1))
1675                bad_reason = "nonzero mapcount";
1676        if (unlikely(page->mapping != NULL))
1677                bad_reason = "non-NULL mapping";
1678        if (unlikely(page_ref_count(page) != 0))
1679                bad_reason = "nonzero _count";
1680        if (unlikely(page->flags & __PG_HWPOISON)) {
1681                bad_reason = "HWPoisoned (hardware-corrupted)";
1682                bad_flags = __PG_HWPOISON;
1683                /* Don't complain about hwpoisoned pages */
1684                page_mapcount_reset(page); /* remove PageBuddy */
1685                return;
1686        }
1687        if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
1688                bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
1689                bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
1690        }
1691#ifdef CONFIG_MEMCG
1692        if (unlikely(page->mem_cgroup))
1693                bad_reason = "page still charged to cgroup";
1694#endif
1695        bad_page(page, bad_reason, bad_flags);
1696}
1697
1698/*
1699 * This page is about to be returned from the page allocator
1700 */
1701static inline int check_new_page(struct page *page)
1702{
1703        if (likely(page_expected_state(page,
1704                                PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
1705                return 0;
1706
1707        check_new_page_bad(page);
1708        return 1;
1709}
1710
1711static inline bool free_pages_prezeroed(void)
1712{
1713        return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
1714                page_poisoning_enabled();
1715}
1716
1717#ifdef CONFIG_DEBUG_VM
1718static bool check_pcp_refill(struct page *page)
1719{
1720        return false;
1721}
1722
1723static bool check_new_pcp(struct page *page)
1724{
1725        return check_new_page(page);
1726}
1727#else
1728static bool check_pcp_refill(struct page *page)
1729{
1730        return check_new_page(page);
1731}
1732static bool check_new_pcp(struct page *page)
1733{
1734        return false;
1735}
1736#endif /* CONFIG_DEBUG_VM */
1737
1738static bool check_new_pages(struct page *page, unsigned int order)
1739{
1740        int i;
1741        for (i = 0; i < (1 << order); i++) {
1742                struct page *p = page + i;
1743
1744                if (unlikely(check_new_page(p)))
1745                        return true;
1746        }
1747
1748        return false;
1749}
1750
1751inline void post_alloc_hook(struct page *page, unsigned int order,
1752                                gfp_t gfp_flags)
1753{
1754        set_page_private(page, 0);
1755        set_page_refcounted(page);
1756
1757        arch_alloc_page(page, order);
1758        kernel_map_pages(page, 1 << order, 1);
1759        kernel_poison_pages(page, 1 << order, 1);
1760        kasan_alloc_pages(page, order);
1761        set_page_owner(page, order, gfp_flags);
1762}
1763
1764static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
1765                                                        unsigned int alloc_flags)
1766{
1767        int i;
1768
1769        post_alloc_hook(page, order, gfp_flags);
1770
1771        if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO))
1772                for (i = 0; i < (1 << order); i++)
1773                        clear_highpage(page + i);
1774
1775        if (order && (gfp_flags & __GFP_COMP))
1776                prep_compound_page(page, order);
1777
1778        /*
1779         * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
1780         * allocate the page. The expectation is that the caller is taking
1781         * steps that will free more memory. The caller should avoid the page
1782         * being used for !PFMEMALLOC purposes.
1783         */
1784        if (alloc_flags & ALLOC_NO_WATERMARKS)
1785                set_page_pfmemalloc(page);
1786        else
1787                clear_page_pfmemalloc(page);
1788}
1789
1790/*
1791 * Go through the free lists for the given migratetype and remove
1792 * the smallest available page from the freelists
1793 */
1794static inline
1795struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
1796                                                int migratetype)
1797{
1798        unsigned int current_order;
1799        struct free_area *area;
1800        struct page *page;
1801
1802        /* Find a page of the appropriate size in the preferred list */
1803        for (current_order = order; current_order < MAX_ORDER; ++current_order) {
1804                area = &(zone->free_area[current_order]);
1805                page = list_first_entry_or_null(&area->free_list[migratetype],
1806                                                        struct page, lru);
1807                if (!page)
1808                        continue;
1809                list_del(&page->lru);
1810                rmv_page_order(page);
1811                area->nr_free--;
1812                expand(zone, page, order, current_order, area, migratetype);
1813                set_pcppage_migratetype(page, migratetype);
1814                return page;
1815        }
1816
1817        return NULL;
1818}
1819
1820
1821/*
1822 * This array describes the order lists are fallen back to when
1823 * the free lists for the desirable migrate type are depleted
1824 */
1825static int fallbacks[MIGRATE_TYPES][4] = {
1826        [MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_TYPES },
1827        [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_TYPES },
1828        [MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
1829#ifdef CONFIG_CMA
1830        [MIGRATE_CMA]         = { MIGRATE_TYPES }, /* Never used */
1831#endif
1832#ifdef CONFIG_MEMORY_ISOLATION
1833        [MIGRATE_ISOLATE]     = { MIGRATE_TYPES }, /* Never used */
1834#endif
1835};
1836
1837#ifdef CONFIG_CMA
1838static struct page *__rmqueue_cma_fallback(struct zone *zone,
1839                                        unsigned int order)
1840{
1841        return __rmqueue_smallest(zone, order, MIGRATE_CMA);
1842}
1843#else
1844static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1845                                        unsigned int order) { return NULL; }
1846#endif
1847
1848/*
1849 * Move the free pages in a range to the free lists of the requested type.
1850 * Note that start_page and end_pages are not aligned on a pageblock
1851 * boundary. If alignment is required, use move_freepages_block()
1852 */
1853static int move_freepages(struct zone *zone,
1854                          struct page *start_page, struct page *end_page,
1855                          int migratetype, int *num_movable)
1856{
1857        struct page *page;
1858        unsigned int order;
1859        int pages_moved = 0;
1860
1861#ifndef CONFIG_HOLES_IN_ZONE
1862        /*
1863         * page_zone is not safe to call in this context when
1864         * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
1865         * anyway as we check zone boundaries in move_freepages_block().
1866         * Remove at a later date when no bug reports exist related to
1867         * grouping pages by mobility
1868         */
1869        VM_BUG_ON(page_zone(start_page) != page_zone(end_page));
1870#endif
1871
1872        if (num_movable)
1873                *num_movable = 0;
1874
1875        for (page = start_page; page <= end_page;) {
1876                if (!pfn_valid_within(page_to_pfn(page))) {
1877                        page++;
1878                        continue;
1879                }
1880
1881                /* Make sure we are not inadvertently changing nodes */
1882                VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
1883
1884                if (!PageBuddy(page)) {
1885                        /*
1886                         * We assume that pages that could be isolated for
1887                         * migration are movable. But we don't actually try
1888                         * isolating, as that would be expensive.
1889                         */
1890                        if (num_movable &&
1891                                        (PageLRU(page) || __PageMovable(page)))
1892                                (*num_movable)++;
1893
1894                        page++;
1895                        continue;
1896                }
1897
1898                order = page_order(page);
1899                list_move(&page->lru,
1900                          &zone->free_area[order].free_list[migratetype]);
1901                page += 1 << order;
1902                pages_moved += 1 << order;
1903        }
1904
1905        return pages_moved;
1906}
1907
1908int move_freepages_block(struct zone *zone, struct page *page,
1909                                int migratetype, int *num_movable)
1910{
1911        unsigned long start_pfn, end_pfn;
1912        struct page *start_page, *end_page;
1913
1914        start_pfn = page_to_pfn(page);
1915        start_pfn = start_pfn & ~(pageblock_nr_pages-1);
1916        start_page = pfn_to_page(start_pfn);
1917        end_page = start_page + pageblock_nr_pages - 1;
1918        end_pfn = start_pfn + pageblock_nr_pages - 1;
1919
1920        /* Do not cross zone boundaries */
1921        if (!zone_spans_pfn(zone, start_pfn))
1922                start_page = page;
1923        if (!zone_spans_pfn(zone, end_pfn))
1924                return 0;
1925
1926        return move_freepages(zone, start_page, end_page, migratetype,
1927                                                                num_movable);
1928}
1929
1930static void change_pageblock_range(struct page *pageblock_page,
1931                                        int start_order, int migratetype)
1932{
1933        int nr_pageblocks = 1 << (start_order - pageblock_order);
1934
1935        while (nr_pageblocks--) {
1936                set_pageblock_migratetype(pageblock_page, migratetype);
1937                pageblock_page += pageblock_nr_pages;
1938        }
1939}
1940
1941/*
1942 * When we are falling back to another migratetype during allocation, try to
1943 * steal extra free pages from the same pageblocks to satisfy further
1944 * allocations, instead of polluting multiple pageblocks.
1945 *
1946 * If we are stealing a relatively large buddy page, it is likely there will
1947 * be more free pages in the pageblock, so try to steal them all. For
1948 * reclaimable and unmovable allocations, we steal regardless of page size,
1949 * as fragmentation caused by those allocations polluting movable pageblocks
1950 * is worse than movable allocations stealing from unmovable and reclaimable
1951 * pageblocks.
1952 */
1953static bool can_steal_fallback(unsigned int order, int start_mt)
1954{
1955        /*
1956         * Leaving this order check is intended, although there is
1957         * relaxed order check in next check. The reason is that
1958         * we can actually steal whole pageblock if this condition met,
1959         * but, below check doesn't guarantee it and that is just heuristic
1960         * so could be changed anytime.
1961         */
1962        if (order >= pageblock_order)
1963                return true;
1964
1965        if (order >= pageblock_order / 2 ||
1966                start_mt == MIGRATE_RECLAIMABLE ||
1967                start_mt == MIGRATE_UNMOVABLE ||
1968                page_group_by_mobility_disabled)
1969                return true;
1970
1971        return false;
1972}
1973
1974/*
1975 * This function implements actual steal behaviour. If order is large enough,
1976 * we can steal whole pageblock. If not, we first move freepages in this
1977 * pageblock to our migratetype and determine how many already-allocated pages
1978 * are there in the pageblock with a compatible migratetype. If at least half
1979 * of pages are free or compatible, we can change migratetype of the pageblock
1980 * itself, so pages freed in the future will be put on the correct free list.
1981 */
1982static void steal_suitable_fallback(struct zone *zone, struct page *page,
1983                                        int start_type, bool whole_block)
1984{
1985        unsigned int current_order = page_order(page);
1986        struct free_area *area;
1987        int free_pages, movable_pages, alike_pages;
1988        int old_block_type;
1989
1990        old_block_type = get_pageblock_migratetype(page);
1991
1992        /*
1993         * This can happen due to races and we want to prevent broken
1994         * highatomic accounting.
1995         */
1996        if (is_migrate_highatomic(old_block_type))
1997                goto single_page;
1998
1999        /* Take ownership for orders >= pageblock_order */
2000        if (current_order >= pageblock_order) {
2001                change_pageblock_range(page, current_order, start_type);
2002                goto single_page;
2003        }
2004
2005        /* We are not allowed to try stealing from the whole block */
2006        if (!whole_block)
2007                goto single_page;
2008
2009        free_pages = move_freepages_block(zone, page, start_type,
2010                                                &movable_pages);
2011        /*
2012         * Determine how many pages are compatible with our allocation.
2013         * For movable allocation, it's the number of movable pages which
2014         * we just obtained. For other types it's a bit more tricky.
2015         */
2016        if (start_type == MIGRATE_MOVABLE) {
2017                alike_pages = movable_pages;
2018        } else {
2019                /*
2020                 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
2021                 * to MOVABLE pageblock, consider all non-movable pages as
2022                 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
2023                 * vice versa, be conservative since we can't distinguish the
2024                 * exact migratetype of non-movable pages.
2025                 */
2026                if (old_block_type == MIGRATE_MOVABLE)
2027                        alike_pages = pageblock_nr_pages
2028                                                - (free_pages + movable_pages);
2029                else
2030                        alike_pages = 0;
2031        }
2032
2033        /* moving whole block can fail due to zone boundary conditions */
2034        if (!free_pages)
2035                goto single_page;
2036
2037        /*
2038         * If a sufficient number of pages in the block are either free or of
2039         * comparable migratability as our allocation, claim the whole block.
2040         */
2041        if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
2042                        page_group_by_mobility_disabled)
2043                set_pageblock_migratetype(page, start_type);
2044
2045        return;
2046
2047single_page:
2048        area = &zone->free_area[current_order];
2049        list_move(&page->lru, &area->free_list[start_type]);
2050}
2051
2052/*
2053 * Check whether there is a suitable fallback freepage with requested order.
2054 * If only_stealable is true, this function returns fallback_mt only if
2055 * we can steal other freepages all together. This would help to reduce
2056 * fragmentation due to mixed migratetype pages in one pageblock.
2057 */
2058int find_suitable_fallback(struct free_area *area, unsigned int order,
2059                        int migratetype, bool only_stealable, bool *can_steal)
2060{
2061        int i;
2062        int fallback_mt;
2063
2064        if (area->nr_free == 0)
2065                return -1;
2066
2067        *can_steal = false;
2068        for (i = 0;; i++) {
2069                fallback_mt = fallbacks[migratetype][i];
2070                if (fallback_mt == MIGRATE_TYPES)
2071                        break;
2072
2073                if (list_empty(&area->free_list[fallback_mt]))
2074                        continue;
2075
2076                if (can_steal_fallback(order, migratetype))
2077                        *can_steal = true;
2078
2079                if (!only_stealable)
2080                        return fallback_mt;
2081
2082                if (*can_steal)
2083                        return fallback_mt;
2084        }
2085
2086        return -1;
2087}
2088
2089/*
2090 * Reserve a pageblock for exclusive use of high-order atomic allocations if
2091 * there are no empty page blocks that contain a page with a suitable order
2092 */
2093static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
2094                                unsigned int alloc_order)
2095{
2096        int mt;
2097        unsigned long max_managed, flags;
2098
2099        /*
2100         * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
2101         * Check is race-prone but harmless.
2102         */
2103        max_managed = (zone->managed_pages / 100) + pageblock_nr_pages;
2104        if (zone->nr_reserved_highatomic >= max_managed)
2105                return;
2106
2107        spin_lock_irqsave(&zone->lock, flags);
2108
2109        /* Recheck the nr_reserved_highatomic limit under the lock */
2110        if (zone->nr_reserved_highatomic >= max_managed)
2111                goto out_unlock;
2112
2113        /* Yoink! */
2114        mt = get_pageblock_migratetype(page);
2115        if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt)
2116            && !is_migrate_cma(mt)) {
2117                zone->nr_reserved_highatomic += pageblock_nr_pages;
2118                set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
2119                move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
2120        }
2121
2122out_unlock:
2123        spin_unlock_irqrestore(&zone->lock, flags);
2124}
2125
2126/*
2127 * Used when an allocation is about to fail under memory pressure. This
2128 * potentially hurts the reliability of high-order allocations when under
2129 * intense memory pressure but failed atomic allocations should be easier
2130 * to recover from than an OOM.
2131 *
2132 * If @force is true, try to unreserve a pageblock even though highatomic
2133 * pageblock is exhausted.
2134 */
2135static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2136                                                bool force)
2137{
2138        struct zonelist *zonelist = ac->zonelist;
2139        unsigned long flags;
2140        struct zoneref *z;
2141        struct zone *zone;
2142        struct page *page;
2143        int order;
2144        bool ret;
2145
2146        for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
2147                                                                ac->nodemask) {
2148                /*
2149                 * Preserve at least one pageblock unless memory pressure
2150                 * is really high.
2151                 */
2152                if (!force && zone->nr_reserved_highatomic <=
2153                                        pageblock_nr_pages)
2154                        continue;
2155
2156                spin_lock_irqsave(&zone->lock, flags);
2157                for (order = 0; order < MAX_ORDER; order++) {
2158                        struct free_area *area = &(zone->free_area[order]);
2159
2160                        page = list_first_entry_or_null(
2161                                        &area->free_list[MIGRATE_HIGHATOMIC],
2162                                        struct page, lru);
2163                        if (!page)
2164                                continue;
2165
2166                        /*
2167                         * In page freeing path, migratetype change is racy so
2168                         * we can counter several free pages in a pageblock
2169                         * in this loop althoug we changed the pageblock type
2170                         * from highatomic to ac->migratetype. So we should
2171                         * adjust the count once.
2172                         */
2173                        if (is_migrate_highatomic_page(page)) {
2174                                /*
2175                                 * It should never happen but changes to
2176                                 * locking could inadvertently allow a per-cpu
2177                                 * drain to add pages to MIGRATE_HIGHATOMIC
2178                                 * while unreserving so be safe and watch for
2179                                 * underflows.
2180                                 */
2181                                zone->nr_reserved_highatomic -= min(
2182                                                pageblock_nr_pages,
2183                                                zone->nr_reserved_highatomic);
2184                        }
2185
2186                        /*
2187                         * Convert to ac->migratetype and avoid the normal
2188                         * pageblock stealing heuristics. Minimally, the caller
2189                         * is doing the work and needs the pages. More
2190                         * importantly, if the block was always converted to
2191                         * MIGRATE_UNMOVABLE or another type then the number
2192                         * of pageblocks that cannot be completely freed
2193                         * may increase.
2194                         */
2195                        set_pageblock_migratetype(page, ac->migratetype);
2196                        ret = move_freepages_block(zone, page, ac->migratetype,
2197                                                                        NULL);
2198                        if (ret) {
2199                                spin_unlock_irqrestore(&zone->lock, flags);
2200                                return ret;
2201                        }
2202                }
2203                spin_unlock_irqrestore(&zone->lock, flags);
2204        }
2205
2206        return false;
2207}
2208
2209/*
2210 * Try finding a free buddy page on the fallback list and put it on the free
2211 * list of requested migratetype, possibly along with other pages from the same
2212 * block, depending on fragmentation avoidance heuristics. Returns true if
2213 * fallback was found so that __rmqueue_smallest() can grab it.
2214 *
2215 * The use of signed ints for order and current_order is a deliberate
2216 * deviation from the rest of this file, to make the for loop
2217 * condition simpler.
2218 */
2219static inline bool
2220__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
2221{
2222        struct free_area *area;
2223        int current_order;
2224        struct page *page;
2225        int fallback_mt;
2226        bool can_steal;
2227
2228        /*
2229         * Find the largest available free page in the other list. This roughly
2230         * approximates finding the pageblock with the most free pages, which
2231         * would be too costly to do exactly.
2232         */
2233        for (current_order = MAX_ORDER - 1; current_order >= order;
2234                                --current_order) {
2235                area = &(zone->free_area[current_order]);
2236                fallback_mt = find_suitable_fallback(area, current_order,
2237                                start_migratetype, false, &can_steal);
2238                if (fallback_mt == -1)
2239                        continue;
2240
2241                /*
2242                 * We cannot steal all free pages from the pageblock and the
2243                 * requested migratetype is movable. In that case it's better to
2244                 * steal and split the smallest available page instead of the
2245                 * largest available page, because even if the next movable
2246                 * allocation falls back into a different pageblock than this
2247                 * one, it won't cause permanent fragmentation.
2248                 */
2249                if (!can_steal && start_migratetype == MIGRATE_MOVABLE
2250                                        && current_order > order)
2251                        goto find_smallest;
2252
2253                goto do_steal;
2254        }
2255
2256        return false;
2257
2258find_smallest:
2259        for (current_order = order; current_order < MAX_ORDER;
2260                                                        current_order++) {
2261                area = &(zone->free_area[current_order]);
2262                fallback_mt = find_suitable_fallback(area, current_order,
2263                                start_migratetype, false, &can_steal);
2264                if (fallback_mt != -1)
2265                        break;
2266        }
2267
2268        /*
2269         * This should not happen - we already found a suitable fallback
2270         * when looking for the largest page.
2271         */
2272        VM_BUG_ON(current_order == MAX_ORDER);
2273
2274do_steal:
2275        page = list_first_entry(&area->free_list[fallback_mt],
2276                                                        struct page, lru);
2277
2278        steal_suitable_fallback(zone, page, start_migratetype, can_steal);
2279
2280        trace_mm_page_alloc_extfrag(page, order, current_order,
2281                start_migratetype, fallback_mt);
2282
2283        return true;
2284
2285}
2286
2287/*
2288 * Do the hard work of removing an element from the buddy allocator.
2289 * Call me with the zone->lock already held.
2290 */
2291static struct page *__rmqueue(struct zone *zone, unsigned int order,
2292                                int migratetype)
2293{
2294        struct page *page;
2295
2296retry:
2297        page = __rmqueue_smallest(zone, order, migratetype);
2298        if (unlikely(!page)) {
2299                if (migratetype == MIGRATE_MOVABLE)
2300                        page = __rmqueue_cma_fallback(zone, order);
2301
2302                if (!page && __rmqueue_fallback(zone, order, migratetype))
2303                        goto retry;
2304        }
2305
2306        trace_mm_page_alloc_zone_locked(page, order, migratetype);
2307        return page;
2308}
2309
2310/*
2311 * Obtain a specified number of elements from the buddy allocator, all under
2312 * a single hold of the lock, for efficiency.  Add them to the supplied list.
2313 * Returns the number of new pages which were placed at *list.
2314 */
2315static int rmqueue_bulk(struct zone *zone, unsigned int order,
2316                        unsigned long count, struct list_head *list,
2317                        int migratetype, bool cold)
2318{
2319        int i, alloced = 0;
2320
2321        spin_lock(&zone->lock);
2322        for (i = 0; i < count; ++i) {
2323                struct page *page = __rmqueue(zone, order, migratetype);
2324                if (unlikely(page == NULL))
2325                        break;
2326
2327                if (unlikely(check_pcp_refill(page)))
2328                        continue;
2329
2330                /*
2331                 * Split buddy pages returned by expand() are received here
2332                 * in physical page order. The page is added to the callers and
2333                 * list and the list head then moves forward. From the callers
2334                 * perspective, the linked list is ordered by page number in
2335                 * some conditions. This is useful for IO devices that can
2336                 * merge IO requests if the physical pages are ordered
2337                 * properly.
2338                 */
2339                if (likely(!cold))
2340                        list_add(&page->lru, list);
2341                else
2342                        list_add_tail(&page->lru, list);
2343                list = &page->lru;
2344                alloced++;
2345                if (is_migrate_cma(get_pcppage_migratetype(page)))
2346                        __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
2347                                              -(1 << order));
2348        }
2349
2350        /*
2351         * i pages were removed from the buddy list even if some leak due
2352         * to check_pcp_refill failing so adjust NR_FREE_PAGES based
2353         * on i. Do not confuse with 'alloced' which is the number of
2354         * pages added to the pcp list.
2355         */
2356        __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
2357        spin_unlock(&zone->lock);
2358        return alloced;
2359}
2360
2361#ifdef CONFIG_NUMA
2362/*
2363 * Called from the vmstat counter updater to drain pagesets of this
2364 * currently executing processor on remote nodes after they have
2365 * expired.
2366 *
2367 * Note that this function must be called with the thread pinned to
2368 * a single processor.
2369 */
2370void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
2371{
2372        unsigned long flags;
2373        int to_drain, batch;
2374
2375        local_irq_save(flags);
2376        batch = READ_ONCE(pcp->batch);
2377        to_drain = min(pcp->count, batch);
2378        if (to_drain > 0) {
2379                free_pcppages_bulk(zone, to_drain, pcp);
2380                pcp->count -= to_drain;
2381        }
2382        local_irq_restore(flags);
2383}
2384#endif
2385
2386/*
2387 * Drain pcplists of the indicated processor and zone.
2388 *
2389 * The processor must either be the current processor and the
2390 * thread pinned to the current processor or a processor that
2391 * is not online.
2392 */
2393static void drain_pages_zone(unsigned int cpu, struct zone *zone)
2394{
2395        unsigned long flags;
2396        struct per_cpu_pageset *pset;
2397        struct per_cpu_pages *pcp;
2398
2399        local_irq_save(flags);
2400        pset = per_cpu_ptr(zone->pageset, cpu);
2401
2402        pcp = &pset->pcp;
2403        if (pcp->count) {
2404                free_pcppages_bulk(zone, pcp->count, pcp);
2405                pcp->count = 0;
2406        }
2407        local_irq_restore(flags);
2408}
2409
2410/*
2411 * Drain pcplists of all zones on the indicated processor.
2412 *
2413 * The processor must either be the current processor and the
2414 * thread pinned to the current processor or a processor that
2415 * is not online.
2416 */
2417static void drain_pages(unsigned int cpu)
2418{
2419        struct zone *zone;
2420
2421        for_each_populated_zone(zone) {
2422                drain_pages_zone(cpu, zone);
2423        }
2424}
2425
2426/*
2427 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
2428 *
2429 * The CPU has to be pinned. When zone parameter is non-NULL, spill just
2430 * the single zone's pages.
2431 */
2432void drain_local_pages(struct zone *zone)
2433{
2434        int cpu = smp_processor_id();
2435
2436        if (zone)
2437                drain_pages_zone(cpu, zone);
2438        else
2439                drain_pages(cpu);
2440}
2441
2442static void drain_local_pages_wq(struct work_struct *work)
2443{
2444        /*
2445         * drain_all_pages doesn't use proper cpu hotplug protection so
2446         * we can race with cpu offline when the WQ can move this from
2447         * a cpu pinned worker to an unbound one. We can operate on a different
2448         * cpu which is allright but we also have to make sure to not move to
2449         * a different one.
2450         */
2451        preempt_disable();
2452        drain_local_pages(NULL);
2453        preempt_enable();
2454}
2455
2456/*
2457 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
2458 *
2459 * When zone parameter is non-NULL, spill just the single zone's pages.
2460 *
2461 * Note that this can be extremely slow as the draining happens in a workqueue.
2462 */
2463void drain_all_pages(struct zone *zone)
2464{
2465        int cpu;
2466
2467        /*
2468         * Allocate in the BSS so we wont require allocation in
2469         * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
2470         */
2471        static cpumask_t cpus_with_pcps;
2472
2473        /*
2474         * Make sure nobody triggers this path before mm_percpu_wq is fully
2475         * initialized.
2476         */
2477        if (WARN_ON_ONCE(!mm_percpu_wq))
2478                return;
2479
2480        /* Workqueues cannot recurse */
2481        if (current->flags & PF_WQ_WORKER)
2482                return;
2483
2484        /*
2485         * Do not drain if one is already in progress unless it's specific to
2486         * a zone. Such callers are primarily CMA and memory hotplug and need
2487         * the drain to be complete when the call returns.
2488         */
2489        if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
2490                if (!zone)
2491                        return;
2492                mutex_lock(&pcpu_drain_mutex);
2493        }
2494
2495        /*
2496         * We don't care about racing with CPU hotplug event
2497         * as offline notification will cause the notified
2498         * cpu to drain that CPU pcps and on_each_cpu_mask
2499         * disables preemption as part of its processing
2500         */
2501        for_each_online_cpu(cpu) {
2502                struct per_cpu_pageset *pcp;
2503                struct zone *z;
2504                bool has_pcps = false;
2505
2506                if (zone) {
2507                        pcp = per_cpu_ptr(zone->pageset, cpu);
2508                        if (pcp->pcp.count)
2509                                has_pcps = true;
2510                } else {
2511                        for_each_populated_zone(z) {
2512                                pcp = per_cpu_ptr(z->pageset, cpu);
2513                                if (pcp->pcp.count) {
2514                                        has_pcps = true;
2515                                        break;
2516                                }
2517                        }
2518                }
2519
2520                if (has_pcps)
2521                        cpumask_set_cpu(cpu, &cpus_with_pcps);
2522                else
2523                        cpumask_clear_cpu(cpu, &cpus_with_pcps);
2524        }
2525
2526        for_each_cpu(cpu, &cpus_with_pcps) {
2527                struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
2528                INIT_WORK(work, drain_local_pages_wq);
2529                queue_work_on(cpu, mm_percpu_wq, work);
2530        }
2531        for_each_cpu(cpu, &cpus_with_pcps)
2532                flush_work(per_cpu_ptr(&pcpu_drain, cpu));
2533
2534        mutex_unlock(&pcpu_drain_mutex);
2535}
2536
2537#ifdef CONFIG_HIBERNATION
2538
2539/*
2540 * Touch the watchdog for every WD_PAGE_COUNT pages.
2541 */
2542#define WD_PAGE_COUNT   (128*1024)
2543
2544void mark_free_pages(struct zone *zone)
2545{
2546        unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
2547        unsigned long flags;
2548        unsigned int order, t;
2549        struct page *page;
2550
2551        if (zone_is_empty(zone))
2552                return;
2553
2554        spin_lock_irqsave(&zone->lock, flags);
2555
2556        max_zone_pfn = zone_end_pfn(zone);
2557        for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
2558                if (pfn_valid(pfn)) {
2559                        page = pfn_to_page(pfn);
2560
2561                        if (!--page_count) {
2562                                touch_nmi_watchdog();
2563                                page_count = WD_PAGE_COUNT;
2564                        }
2565
2566                        if (page_zone(page) != zone)
2567                                continue;
2568
2569                        if (!swsusp_page_is_forbidden(page))
2570                                swsusp_unset_page_free(page);
2571                }
2572
2573        for_each_migratetype_order(order, t) {
2574                list_for_each_entry(page,
2575                                &zone->free_area[order].free_list[t], lru) {
2576                        unsigned long i;
2577
2578                        pfn = page_to_pfn(page);
2579                        for (i = 0; i < (1UL << order); i++) {
2580                                if (!--page_count) {
2581                                        touch_nmi_watchdog();
2582                                        page_count = WD_PAGE_COUNT;
2583                                }
2584                                swsusp_set_page_free(pfn_to_page(pfn + i));
2585                        }
2586                }
2587        }
2588        spin_unlock_irqrestore(&zone->lock, flags);
2589}
2590#endif /* CONFIG_PM */
2591
2592/*
2593 * Free a 0-order page
2594 * cold == true ? free a cold page : free a hot page
2595 */
2596void free_hot_cold_page(struct page *page, bool cold)
2597{
2598        struct zone *zone = page_zone(page);
2599        struct per_cpu_pages *pcp;
2600        unsigned long flags;
2601        unsigned long pfn = page_to_pfn(page);
2602        int migratetype;
2603
2604        if (!free_pcp_prepare(page))
2605                return;
2606
2607        migratetype = get_pfnblock_migratetype(page, pfn);
2608        set_pcppage_migratetype(page, migratetype);
2609        local_irq_save(flags);
2610        __count_vm_event(PGFREE);
2611
2612        /*
2613         * We only track unmovable, reclaimable and movable on pcp lists.
2614         * Free ISOLATE pages back to the allocator because they are being
2615         * offlined but treat HIGHATOMIC as movable pages so we can get those
2616         * areas back if necessary. Otherwise, we may have to free
2617         * excessively into the page allocator
2618         */
2619        if (migratetype >= MIGRATE_PCPTYPES) {
2620                if (unlikely(is_migrate_isolate(migratetype))) {
2621                        free_one_page(zone, page, pfn, 0, migratetype);
2622                        goto out;
2623                }
2624                migratetype = MIGRATE_MOVABLE;
2625        }
2626
2627        pcp = &this_cpu_ptr(zone->pageset)->pcp;
2628        if (!cold)
2629                list_add(&page->lru, &pcp->lists[migratetype]);
2630        else
2631                list_add_tail(&page->lru, &pcp->lists[migratetype]);
2632        pcp->count++;
2633        if (pcp->count >= pcp->high) {
2634                unsigned long batch = READ_ONCE(pcp->batch);
2635                free_pcppages_bulk(zone, batch, pcp);
2636                pcp->count -= batch;
2637        }
2638
2639out:
2640        local_irq_restore(flags);
2641}
2642
2643/*
2644 * Free a list of 0-order pages
2645 */
2646void free_hot_cold_page_list(struct list_head *list, bool cold)
2647{
2648        struct page *page, *next;
2649
2650        list_for_each_entry_safe(page, next, list, lru) {
2651                trace_mm_page_free_batched(page, cold);
2652                free_hot_cold_page(page, cold);
2653        }
2654}
2655
2656/*
2657 * split_page takes a non-compound higher-order page, and splits it into
2658 * n (1<<order) sub-pages: page[0..n]
2659 * Each sub-page must be freed individually.
2660 *
2661 * Note: this is probably too low level an operation for use in drivers.
2662 * Please consult with lkml before using this in your driver.
2663 */
2664void split_page(struct page *page, unsigned int order)
2665{
2666        int i;
2667
2668        VM_BUG_ON_PAGE(PageCompound(page), page);
2669        VM_BUG_ON_PAGE(!page_count(page), page);
2670
2671#ifdef CONFIG_KMEMCHECK
2672        /*
2673         * Split shadow pages too, because free(page[0]) would
2674         * otherwise free the whole shadow.
2675         */
2676        if (kmemcheck_page_is_tracked(page))
2677                split_page(virt_to_page(page[0].shadow), order);
2678#endif
2679
2680        for (i = 1; i < (1 << order); i++)
2681                set_page_refcounted(page + i);
2682        split_page_owner(page, order);
2683}
2684EXPORT_SYMBOL_GPL(split_page);
2685
2686int __isolate_free_page(struct page *page, unsigned int order)
2687{
2688        unsigned long watermark;
2689        struct zone *zone;
2690        int mt;
2691
2692        BUG_ON(!PageBuddy(page));
2693
2694        zone = page_zone(page);
2695        mt = get_pageblock_migratetype(page);
2696
2697        if (!is_migrate_isolate(mt)) {
2698                /*
2699                 * Obey watermarks as if the page was being allocated. We can
2700                 * emulate a high-order watermark check with a raised order-0
2701                 * watermark, because we already know our high-order page
2702                 * exists.
2703                 */
2704                watermark = min_wmark_pages(zone) + (1UL << order);
2705                if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
2706                        return 0;
2707
2708                __mod_zone_freepage_state(zone, -(1UL << order), mt);
2709        }
2710
2711        /* Remove page from free list */
2712        list_del(&page->lru);
2713        zone->free_area[order].nr_free--;
2714        rmv_page_order(page);
2715
2716        /*
2717         * Set the pageblock if the isolated page is at least half of a
2718         * pageblock
2719         */
2720        if (order >= pageblock_order - 1) {
2721                struct page *endpage = page + (1 << order) - 1;
2722                for (; page < endpage; page += pageblock_nr_pages) {
2723                        int mt = get_pageblock_migratetype(page);
2724                        if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
2725                            && !is_migrate_highatomic(mt))
2726                                set_pageblock_migratetype(page,
2727                                                          MIGRATE_MOVABLE);
2728                }
2729        }
2730
2731
2732        return 1UL << order;
2733}
2734
2735/*
2736 * Update NUMA hit/miss statistics
2737 *
2738 * Must be called with interrupts disabled.
2739 */
2740static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
2741{
2742#ifdef CONFIG_NUMA
2743        enum zone_stat_item local_stat = NUMA_LOCAL;
2744
2745        if (z->node != numa_node_id())
2746                local_stat = NUMA_OTHER;
2747
2748        if (z->node == preferred_zone->node)
2749                __inc_zone_state(z, NUMA_HIT);
2750        else {
2751                __inc_zone_state(z, NUMA_MISS);
2752                __inc_zone_state(preferred_zone, NUMA_FOREIGN);
2753        }
2754        __inc_zone_state(z, local_stat);
2755#endif
2756}
2757
2758/* Remove page from the per-cpu list, caller must protect the list */
2759static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
2760                        bool cold, struct per_cpu_pages *pcp,
2761                        struct list_head *list)
2762{
2763        struct page *page;
2764
2765        do {
2766                if (list_empty(list)) {
2767                        pcp->count += rmqueue_bulk(zone, 0,
2768                                        pcp->batch, list,
2769                                        migratetype, cold);
2770                        if (unlikely(list_empty(list)))
2771                                return NULL;
2772                }
2773
2774                if (cold)
2775                        page = list_last_entry(list, struct page, lru);
2776                else
2777                        page = list_first_entry(list, struct page, lru);
2778
2779                list_del(&page->lru);
2780                pcp->count--;
2781        } while (check_new_pcp(page));
2782
2783        return page;
2784}
2785
2786/* Lock and remove page from the per-cpu list */
2787static struct page *rmqueue_pcplist(struct zone *preferred_zone,
2788                        struct zone *zone, unsigned int order,
2789                        gfp_t gfp_flags, int migratetype)
2790{
2791        struct per_cpu_pages *pcp;
2792        struct list_head *list;
2793        bool cold = ((gfp_flags & __GFP_COLD) != 0);
2794        struct page *page;
2795        unsigned long flags;
2796
2797        local_irq_save(flags);
2798        pcp = &this_cpu_ptr(zone->pageset)->pcp;
2799        list = &pcp->lists[migratetype];
2800        page = __rmqueue_pcplist(zone,  migratetype, cold, pcp, list);
2801        if (page) {
2802                __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
2803                zone_statistics(preferred_zone, zone);
2804        }
2805        local_irq_restore(flags);
2806        return page;
2807}
2808
2809/*
2810 * Allocate a page from the given zone. Use pcplists for order-0 allocations.
2811 */
2812static inline
2813struct page *rmqueue(struct zone *preferred_zone,
2814                        struct zone *zone, unsigned int order,
2815                        gfp_t gfp_flags, unsigned int alloc_flags,
2816                        int migratetype)
2817{
2818        unsigned long flags;
2819        struct page *page;
2820
2821        if (likely(order == 0)) {
2822                page = rmqueue_pcplist(preferred_zone, zone, order,
2823                                gfp_flags, migratetype);
2824                goto out;
2825        }
2826
2827        /*
2828         * We most definitely don't want callers attempting to
2829         * allocate greater than order-1 page units with __GFP_NOFAIL.
2830         */
2831        WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
2832        spin_lock_irqsave(&zone->lock, flags);
2833
2834        do {
2835                page = NULL;
2836                if (alloc_flags & ALLOC_HARDER) {
2837                        page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
2838                        if (page)
2839                                trace_mm_page_alloc_zone_locked(page, order, migratetype);
2840                }
2841                if (!page)
2842                        page = __rmqueue(zone, order, migratetype);
2843        } while (page && check_new_pages(page, order));
2844        spin_unlock(&zone->lock);
2845        if (!page)
2846                goto failed;
2847        __mod_zone_freepage_state(zone, -(1 << order),
2848                                  get_pcppage_migratetype(page));
2849
2850        __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
2851        zone_statistics(preferred_zone, zone);
2852        local_irq_restore(flags);
2853
2854out:
2855        VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
2856        return page;
2857
2858failed:
2859        local_irq_restore(flags);
2860        return NULL;
2861}
2862
2863#ifdef CONFIG_FAIL_PAGE_ALLOC
2864
2865static struct {
2866        struct fault_attr attr;
2867
2868        bool ignore_gfp_highmem;
2869        bool ignore_gfp_reclaim;
2870        u32 min_order;
2871} fail_page_alloc = {
2872        .attr = FAULT_ATTR_INITIALIZER,
2873        .ignore_gfp_reclaim = true,
2874        .ignore_gfp_highmem = true,
2875        .min_order = 1,
2876};
2877
2878static int __init setup_fail_page_alloc(char *str)
2879{
2880        return setup_fault_attr(&fail_page_alloc.attr, str);
2881}
2882__setup("fail_page_alloc=", setup_fail_page_alloc);
2883
2884static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
2885{
2886        if (order < fail_page_alloc.min_order)
2887                return false;
2888        if (gfp_mask & __GFP_NOFAIL)
2889                return false;
2890        if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
2891                return false;
2892        if (fail_page_alloc.ignore_gfp_reclaim &&
2893                        (gfp_mask & __GFP_DIRECT_RECLAIM))
2894                return false;
2895
2896        return should_fail(&fail_page_alloc.attr, 1 << order);
2897}
2898
2899#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
2900
2901static int __init fail_page_alloc_debugfs(void)
2902{
2903        umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
2904        struct dentry *dir;
2905
2906        dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
2907                                        &fail_page_alloc.attr);
2908        if (IS_ERR(dir))
2909                return PTR_ERR(dir);
2910
2911        if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
2912                                &fail_page_alloc.ignore_gfp_reclaim))
2913                goto fail;
2914        if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
2915                                &fail_page_alloc.ignore_gfp_highmem))
2916                goto fail;
2917        if (!debugfs_create_u32("min-order", mode, dir,
2918                                &fail_page_alloc.min_order))
2919                goto fail;
2920
2921        return 0;
2922fail:
2923        debugfs_remove_recursive(dir);
2924
2925        return -ENOMEM;
2926}
2927
2928late_initcall(fail_page_alloc_debugfs);
2929
2930#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
2931
2932#else /* CONFIG_FAIL_PAGE_ALLOC */
2933
2934static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
2935{
2936        return false;
2937}
2938
2939#endif /* CONFIG_FAIL_PAGE_ALLOC */
2940
2941/*
2942 * Return true if free base pages are above 'mark'. For high-order checks it
2943 * will return true of the order-0 watermark is reached and there is at least
2944 * one free page of a suitable size. Checking now avoids taking the zone lock
2945 * to check in the allocation paths if no pages are free.
2946 */
2947bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
2948                         int classzone_idx, unsigned int alloc_flags,
2949                         long free_pages)
2950{
2951        long min = mark;
2952        int o;
2953        const bool alloc_harder = (alloc_flags & ALLOC_HARDER);
2954
2955        /* free_pages may go negative - that's OK */
2956        free_pages -= (1 << order) - 1;
2957
2958        if (alloc_flags & ALLOC_HIGH)
2959                min -= min / 2;
2960
2961        /*
2962         * If the caller does not have rights to ALLOC_HARDER then subtract
2963         * the high-atomic reserves. This will over-estimate the size of the
2964         * atomic reserve but it avoids a search.
2965         */
2966        if (likely(!alloc_harder))
2967                free_pages -= z->nr_reserved_highatomic;
2968        else
2969                min -= min / 4;
2970
2971#ifdef CONFIG_CMA
2972        /* If allocation can't use CMA areas don't use free CMA pages */
2973        if (!(alloc_flags & ALLOC_CMA))
2974                free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
2975#endif
2976
2977        /*
2978         * Check watermarks for an order-0 allocation request. If these
2979         * are not met, then a high-order request also cannot go ahead
2980         * even if a suitable page happened to be free.
2981         */
2982        if (free_pages <= min + z->lowmem_reserve[classzone_idx])
2983                return false;
2984
2985        /* If this is an order-0 request then the watermark is fine */
2986        if (!order)
2987                return true;
2988
2989        /* For a high-order request, check at least one suitable page is free */
2990        for (o = order; o < MAX_ORDER; o++) {
2991                struct free_area *area = &z->free_area[o];
2992                int mt;
2993
2994                if (!area->nr_free)
2995                        continue;
2996
2997                if (alloc_harder)
2998                        return true;
2999
3000                for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
3001                        if (!list_empty(&area->free_list[mt]))
3002                                return true;
3003                }
3004
3005#ifdef CONFIG_CMA
3006                if ((alloc_flags & ALLOC_CMA) &&
3007                    !list_empty(&area->free_list[MIGRATE_CMA])) {
3008                        return true;
3009                }
3010#endif
3011        }
3012        return false;
3013}
3014
3015bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3016                      int classzone_idx, unsigned int alloc_flags)
3017{
3018        return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
3019                                        zone_page_state(z, NR_FREE_PAGES));
3020}
3021
3022static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
3023                unsigned long mark, int classzone_idx, unsigned int alloc_flags)
3024{
3025        long free_pages = zone_page_state(z, NR_FREE_PAGES);
3026        long cma_pages = 0;
3027
3028#ifdef CONFIG_CMA
3029        /* If allocation can't use CMA areas don't use free CMA pages */
3030        if (!(alloc_flags & ALLOC_CMA))
3031                cma_pages = zone_page_state(z, NR_FREE_CMA_PAGES);
3032#endif
3033
3034        /*
3035         * Fast check for order-0 only. If this fails then the reserves
3036         * need to be calculated. There is a corner case where the check
3037         * passes but only the high-order atomic reserve are free. If
3038         * the caller is !atomic then it'll uselessly search the free
3039         * list. That corner case is then slower but it is harmless.
3040         */
3041        if (!order && (free_pages - cma_pages) > mark + z->lowmem_reserve[classzone_idx])
3042                return true;
3043
3044        return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
3045                                        free_pages);
3046}
3047
3048bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
3049                        unsigned long mark, int classzone_idx)
3050{
3051        long free_pages = zone_page_state(z, NR_FREE_PAGES);
3052
3053        if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
3054                free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
3055
3056        return __zone_watermark_ok(z, order, mark, classzone_idx, 0,
3057                                                                free_pages);
3058}
3059
3060#ifdef CONFIG_NUMA
3061static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3062{
3063        return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
3064                                RECLAIM_DISTANCE;
3065}
3066#else   /* CONFIG_NUMA */
3067static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3068{
3069        return true;
3070}
3071#endif  /* CONFIG_NUMA */
3072
3073/*
3074 * get_page_from_freelist goes through the zonelist trying to allocate
3075 * a page.
3076 */
3077static struct page *
3078get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
3079                                                const struct alloc_context *ac)
3080{
3081        struct zoneref *z = ac->preferred_zoneref;
3082        struct zone *zone;
3083        struct pglist_data *last_pgdat_dirty_limit = NULL;
3084
3085        /*
3086         * Scan zonelist, looking for a zone with enough free.
3087         * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
3088         */
3089        for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3090                                                                ac->nodemask) {
3091                struct page *page;
3092                unsigned long mark;
3093
3094                if (cpusets_enabled() &&
3095                        (alloc_flags & ALLOC_CPUSET) &&
3096                        !__cpuset_zone_allowed(zone, gfp_mask))
3097                                continue;
3098                /*
3099                 * When allocating a page cache page for writing, we
3100                 * want to get it from a node that is within its dirty
3101                 * limit, such that no single node holds more than its
3102                 * proportional share of globally allowed dirty pages.
3103                 * The dirty limits take into account the node's
3104                 * lowmem reserves and high watermark so that kswapd
3105                 * should be able to balance it without having to
3106                 * write pages from its LRU list.
3107                 *
3108                 * XXX: For now, allow allocations to potentially
3109                 * exceed the per-node dirty limit in the slowpath
3110                 * (spread_dirty_pages unset) before going into reclaim,
3111                 * which is important when on a NUMA setup the allowed
3112                 * nodes are together not big enough to reach the
3113                 * global limit.  The proper fix for these situations
3114                 * will require awareness of nodes in the
3115                 * dirty-throttling and the flusher threads.
3116                 */
3117                if (ac->spread_dirty_pages) {
3118                        if (last_pgdat_dirty_limit == zone->zone_pgdat)
3119                                continue;
3120
3121                        if (!node_dirty_ok(zone->zone_pgdat)) {
3122                                last_pgdat_dirty_limit = zone->zone_pgdat;
3123                                continue;
3124                        }
3125                }
3126
3127                mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
3128                if (!zone_watermark_fast(zone, order, mark,
3129                                       ac_classzone_idx(ac), alloc_flags)) {
3130                        int ret;
3131
3132                        /* Checked here to keep the fast path fast */
3133                        BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
3134                        if (alloc_flags & ALLOC_NO_WATERMARKS)
3135                                goto try_this_zone;
3136
3137                        if (node_reclaim_mode == 0 ||
3138                            !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
3139                                continue;
3140
3141                        ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
3142                        switch (ret) {
3143                        case NODE_RECLAIM_NOSCAN:
3144                                /* did not scan */
3145                                continue;
3146                        case NODE_RECLAIM_FULL:
3147                                /* scanned but unreclaimable */
3148                                continue;
3149                        default:
3150                                /* did we reclaim enough */
3151                                if (zone_watermark_ok(zone, order, mark,
3152                                                ac_classzone_idx(ac), alloc_flags))
3153                                        goto try_this_zone;
3154
3155                                continue;
3156                        }
3157                }
3158
3159try_this_zone:
3160                page = rmqueue(ac->preferred_zoneref->zone, zone, order,
3161                                gfp_mask, alloc_flags, ac->migratetype);
3162                if (page) {
3163                        prep_new_page(page, order, gfp_mask, alloc_flags);
3164
3165                        /*
3166                         * If this is a high-order atomic allocation then check
3167                         * if the pageblock should be reserved for the future
3168                         */
3169                        if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
3170                                reserve_highatomic_pageblock(page, zone, order);
3171
3172                        return page;
3173                }
3174        }
3175
3176        return NULL;
3177}
3178
3179/*
3180 * Large machines with many possible nodes should not always dump per-node
3181 * meminfo in irq context.
3182 */
3183static inline bool should_suppress_show_mem(void)
3184{
3185        bool ret = false;
3186
3187#if NODES_SHIFT > 8
3188        ret = in_interrupt();
3189#endif
3190        return ret;
3191}
3192
3193static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
3194{
3195        unsigned int filter = SHOW_MEM_FILTER_NODES;
3196        static DEFINE_RATELIMIT_STATE(show_mem_rs, HZ, 1);
3197
3198        if (should_suppress_show_mem() || !__ratelimit(&show_mem_rs))
3199                return;
3200
3201        /*
3202         * This documents exceptions given to allocations in certain
3203         * contexts that are allowed to allocate outside current's set
3204         * of allowed nodes.
3205         */
3206        if (!(gfp_mask & __GFP_NOMEMALLOC))
3207                if (test_thread_flag(TIF_MEMDIE) ||
3208                    (current->flags & (PF_MEMALLOC | PF_EXITING)))
3209                        filter &= ~SHOW_MEM_FILTER_NODES;
3210        if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
3211                filter &= ~SHOW_MEM_FILTER_NODES;
3212
3213        show_mem(filter, nodemask);
3214}
3215
3216void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
3217{
3218        struct va_format vaf;
3219        va_list args;
3220        static DEFINE_RATELIMIT_STATE(nopage_rs, DEFAULT_RATELIMIT_INTERVAL,
3221                                      DEFAULT_RATELIMIT_BURST);
3222
3223        if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
3224                return;
3225
3226        pr_warn("%s: ", current->comm);
3227
3228        va_start(args, fmt);
3229        vaf.fmt = fmt;
3230        vaf.va = &args;
3231        pr_cont("%pV", &vaf);
3232        va_end(args);
3233
3234        pr_cont(", mode:%#x(%pGg), nodemask=", gfp_mask, &gfp_mask);
3235        if (nodemask)
3236                pr_cont("%*pbl\n", nodemask_pr_args(nodemask));
3237        else
3238                pr_cont("(null)\n");
3239
3240        cpuset_print_current_mems_allowed();
3241
3242        dump_stack();
3243        warn_alloc_show_mem(gfp_mask, nodemask);
3244}
3245
3246static inline struct page *
3247__alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
3248                              unsigned int alloc_flags,
3249                              const struct alloc_context *ac)
3250{
3251        struct page *page;
3252
3253        page = get_page_from_freelist(gfp_mask, order,
3254                        alloc_flags|ALLOC_CPUSET, ac);
3255        /*
3256         * fallback to ignore cpuset restriction if our nodes
3257         * are depleted
3258         */
3259        if (!page)
3260                page = get_page_from_freelist(gfp_mask, order,
3261                                alloc_flags, ac);
3262
3263        return page;
3264}
3265
3266static inline struct page *
3267__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
3268        const struct alloc_context *ac, unsigned long *did_some_progress)
3269{
3270        struct oom_control oc = {
3271                .zonelist = ac->zonelist,
3272                .nodemask = ac->nodemask,
3273                .memcg = NULL,
3274                .gfp_mask = gfp_mask,
3275                .order = order,
3276        };
3277        struct page *page;
3278
3279        *did_some_progress = 0;
3280
3281        /*
3282         * Acquire the oom lock.  If that fails, somebody else is
3283         * making progress for us.
3284         */
3285        if (!mutex_trylock(&oom_lock)) {
3286                *did_some_progress = 1;
3287                schedule_timeout_uninterruptible(1);
3288                return NULL;
3289        }
3290
3291        /*
3292         * Go through the zonelist yet one more time, keep very high watermark
3293         * here, this is only to catch a parallel oom killing, we must fail if
3294         * we're still under heavy pressure. But make sure that this reclaim
3295         * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
3296         * allocation which will never fail due to oom_lock already held.
3297         */
3298        page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
3299                                      ~__GFP_DIRECT_RECLAIM, order,
3300                                      ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
3301        if (page)
3302                goto out;
3303
3304        /* Coredumps can quickly deplete all memory reserves */
3305        if (current->flags & PF_DUMPCORE)
3306                goto out;
3307        /* The OOM killer will not help higher order allocs */
3308        if (order > PAGE_ALLOC_COSTLY_ORDER)
3309                goto out;
3310        /*
3311         * We have already exhausted all our reclaim opportunities without any
3312         * success so it is time to admit defeat. We will skip the OOM killer
3313         * because it is very likely that the caller has a more reasonable
3314         * fallback than shooting a random task.
3315         */
3316        if (gfp_mask & __GFP_RETRY_MAYFAIL)
3317                goto out;
3318        /* The OOM killer does not needlessly kill tasks for lowmem */
3319        if (ac->high_zoneidx < ZONE_NORMAL)
3320                goto out;
3321        if (pm_suspended_storage())
3322                goto out;
3323        /*
3324         * XXX: GFP_NOFS allocations should rather fail than rely on
3325         * other request to make a forward progress.
3326         * We are in an unfortunate situation where out_of_memory cannot
3327         * do much for this context but let's try it to at least get
3328         * access to memory reserved if the current task is killed (see
3329         * out_of_memory). Once filesystems are ready to handle allocation
3330         * failures more gracefully we should just bail out here.
3331         */
3332
3333        /* The OOM killer may not free memory on a specific node */
3334        if (gfp_mask & __GFP_THISNODE)
3335                goto out;
3336
3337        /* Exhausted what can be done so it's blamo time */
3338        if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
3339                *did_some_progress = 1;
3340
3341                /*
3342                 * Help non-failing allocations by giving them access to memory
3343                 * reserves
3344                 */
3345                if (gfp_mask & __GFP_NOFAIL)
3346                        page = __alloc_pages_cpuset_fallback(gfp_mask, order,
3347                                        ALLOC_NO_WATERMARKS, ac);
3348        }
3349out:
3350        mutex_unlock(&oom_lock);
3351        return page;
3352}
3353
3354/*
3355 * Maximum number of compaction retries wit a progress before OOM
3356 * killer is consider as the only way to move forward.
3357 */
3358#define MAX_COMPACT_RETRIES 16
3359
3360#ifdef CONFIG_COMPACTION
3361/* Try memory compaction for high-order allocations before reclaim */
3362static struct page *
3363__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3364                unsigned int alloc_flags, const struct alloc_context *ac,
3365                enum compact_priority prio, enum compact_result *compact_result)
3366{
3367        struct page *page;
3368        unsigned int noreclaim_flag;
3369
3370        if (!order)
3371                return NULL;
3372
3373        noreclaim_flag = memalloc_noreclaim_save();
3374        *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
3375                                                                        prio);
3376        memalloc_noreclaim_restore(noreclaim_flag);
3377
3378        if (*compact_result <= COMPACT_INACTIVE)
3379                return NULL;
3380
3381        /*
3382         * At least in one zone compaction wasn't deferred or skipped, so let's
3383         * count a compaction stall
3384         */
3385        count_vm_event(COMPACTSTALL);
3386
3387        page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3388
3389        if (page) {
3390                struct zone *zone = page_zone(page);
3391
3392                zone->compact_blockskip_flush = false;
3393                compaction_defer_reset(zone, order, true);
3394                count_vm_event(COMPACTSUCCESS);
3395                return page;
3396        }
3397
3398        /*
3399         * It's bad if compaction run occurs and fails. The most likely reason
3400         * is that pages exist, but not enough to satisfy watermarks.
3401         */
3402        count_vm_event(COMPACTFAIL);
3403
3404        cond_resched();
3405
3406        return NULL;
3407}
3408
3409static inline bool
3410should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
3411                     enum compact_result compact_result,
3412                     enum compact_priority *compact_priority,
3413                     int *compaction_retries)
3414{
3415        int max_retries = MAX_COMPACT_RETRIES;
3416        int min_priority;
3417        bool ret = false;
3418        int retries = *compaction_retries;
3419        enum compact_priority priority = *compact_priority;
3420
3421        if (!order)
3422                return false;
3423
3424        if (compaction_made_progress(compact_result))
3425                (*compaction_retries)++;
3426
3427        /*
3428         * compaction considers all the zone as desperately out of memory
3429         * so it doesn't really make much sense to retry except when the
3430         * failure could be caused by insufficient priority
3431         */
3432        if (compaction_failed(compact_result))
3433                goto check_priority;
3434
3435        /*
3436         * make sure the compaction wasn't deferred or didn't bail out early
3437         * due to locks contention before we declare that we should give up.
3438         * But do not retry if the given zonelist is not suitable for
3439         * compaction.
3440         */
3441        if (compaction_withdrawn(compact_result)) {
3442                ret = compaction_zonelist_suitable(ac, order, alloc_flags);
3443                goto out;
3444        }
3445
3446        /*
3447         * !costly requests are much more important than __GFP_RETRY_MAYFAIL
3448         * costly ones because they are de facto nofail and invoke OOM
3449         * killer to move on while costly can fail and users are ready
3450         * to cope with that. 1/4 retries is rather arbitrary but we
3451         * would need much more detailed feedback from compaction to
3452         * make a better decision.
3453         */
3454        if (order > PAGE_ALLOC_COSTLY_ORDER)
3455                max_retries /= 4;
3456        if (*compaction_retries <= max_retries) {
3457                ret = true;
3458                goto out;
3459        }
3460
3461        /*
3462         * Make sure there are attempts at the highest priority if we exhausted
3463         * all retries or failed at the lower priorities.
3464         */
3465check_priority:
3466        min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
3467                        MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
3468
3469        if (*compact_priority > min_priority) {
3470                (*compact_priority)--;
3471                *compaction_retries = 0;
3472                ret = true;
3473        }
3474out:
3475        trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
3476        return ret;
3477}
3478#else
3479static inline struct page *
3480__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3481                unsigned int alloc_flags, const struct alloc_context *ac,
3482                enum compact_priority prio, enum compact_result *compact_result)
3483{
3484        *compact_result = COMPACT_SKIPPED;
3485        return NULL;
3486}
3487
3488static inline bool
3489should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
3490                     enum compact_result compact_result,
3491                     enum compact_priority *compact_priority,
3492                     int *compaction_retries)
3493{
3494        struct zone *zone;
3495        struct zoneref *z;
3496
3497        if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
3498                return false;
3499
3500        /*
3501         * There are setups with compaction disabled which would prefer to loop
3502         * inside the allocator rather than hit the oom killer prematurely.
3503         * Let's give them a good hope and keep retrying while the order-0
3504         * watermarks are OK.
3505         */
3506        for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3507                                        ac->nodemask) {
3508                if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
3509                                        ac_classzone_idx(ac), alloc_flags))
3510                        return true;
3511        }
3512        return false;
3513}
3514#endif /* CONFIG_COMPACTION */
3515
3516/* Perform direct synchronous page reclaim */
3517static int
3518__perform_reclaim(gfp_t gfp_mask, unsigned int order,
3519                                        const struct alloc_context *ac)
3520{
3521        struct reclaim_state reclaim_state;
3522        int progress;
3523        unsigned int noreclaim_flag;
3524
3525        cond_resched();
3526
3527        /* We now go into synchronous reclaim */
3528        cpuset_memory_pressure_bump();
3529        noreclaim_flag = memalloc_noreclaim_save();
3530        lockdep_set_current_reclaim_state(gfp_mask);
3531        reclaim_state.reclaimed_slab = 0;
3532        current->reclaim_state = &reclaim_state;
3533
3534        progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
3535                                                                ac->nodemask);
3536
3537        current->reclaim_state = NULL;
3538        lockdep_clear_current_reclaim_state();
3539        memalloc_noreclaim_restore(noreclaim_flag);
3540
3541        cond_resched();
3542
3543        return progress;
3544}
3545
3546/* The really slow allocator path where we enter direct reclaim */
3547static inline struct page *
3548__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
3549                unsigned int alloc_flags, const struct alloc_context *ac,
3550                unsigned long *did_some_progress)
3551{
3552        struct page *page = NULL;
3553        bool drained = false;
3554
3555        *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
3556        if (unlikely(!(*did_some_progress)))
3557                return NULL;
3558
3559retry:
3560        page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3561
3562        /*
3563         * If an allocation failed after direct reclaim, it could be because
3564         * pages are pinned on the per-cpu lists or in high alloc reserves.
3565         * Shrink them them and try again
3566         */
3567        if (!page && !drained) {
3568                unreserve_highatomic_pageblock(ac, false);
3569                drain_all_pages(NULL);
3570                drained = true;
3571                goto retry;
3572        }
3573
3574        return page;
3575}
3576
3577static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac)
3578{
3579        struct zoneref *z;
3580        struct zone *zone;
3581        pg_data_t *last_pgdat = NULL;
3582
3583        for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
3584                                        ac->high_zoneidx, ac->nodemask) {
3585                if (last_pgdat != zone->zone_pgdat)
3586                        wakeup_kswapd(zone, order, ac->high_zoneidx);
3587                last_pgdat = zone->zone_pgdat;
3588        }
3589}
3590
3591static inline unsigned int
3592gfp_to_alloc_flags(gfp_t gfp_mask)
3593{
3594        unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
3595
3596        /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
3597        BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
3598
3599        /*
3600         * The caller may dip into page reserves a bit more if the caller
3601         * cannot run direct reclaim, or if the caller has realtime scheduling
3602         * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
3603         * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
3604         */
3605        alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
3606
3607        if (gfp_mask & __GFP_ATOMIC) {
3608                /*
3609                 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
3610                 * if it can't schedule.
3611                 */
3612                if (!(gfp_mask & __GFP_NOMEMALLOC))
3613                        alloc_flags |= ALLOC_HARDER;
3614                /*
3615                 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
3616                 * comment for __cpuset_node_allowed().
3617                 */
3618                alloc_flags &= ~ALLOC_CPUSET;
3619        } else if (unlikely(rt_task(current)) && !in_interrupt())
3620                alloc_flags |= ALLOC_HARDER;
3621
3622#ifdef CONFIG_CMA
3623        if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
3624                alloc_flags |= ALLOC_CMA;
3625#endif
3626        return alloc_flags;
3627}
3628
3629bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
3630{
3631        if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
3632                return false;
3633
3634        if (gfp_mask & __GFP_MEMALLOC)
3635                return true;
3636        if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
3637                return true;
3638        if (!in_interrupt() &&
3639                        ((current->flags & PF_MEMALLOC) ||
3640                         unlikely(test_thread_flag(TIF_MEMDIE))))
3641                return true;
3642
3643        return false;
3644}
3645
3646/*
3647 * Checks whether it makes sense to retry the reclaim to make a forward progress
3648 * for the given allocation request.
3649 *
3650 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
3651 * without success, or when we couldn't even meet the watermark if we
3652 * reclaimed all remaining pages on the LRU lists.
3653 *
3654 * Returns true if a retry is viable or false to enter the oom path.
3655 */
3656static inline bool
3657should_reclaim_retry(gfp_t gfp_mask, unsigned order,
3658                     struct alloc_context *ac, int alloc_flags,
3659                     bool did_some_progress, int *no_progress_loops)
3660{
3661        struct zone *zone;
3662        struct zoneref *z;
3663
3664        /*
3665         * Costly allocations might have made a progress but this doesn't mean
3666         * their order will become available due to high fragmentation so
3667         * always increment the no progress counter for them
3668         */
3669        if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
3670                *no_progress_loops = 0;
3671        else
3672                (*no_progress_loops)++;
3673
3674        /*
3675         * Make sure we converge to OOM if we cannot make any progress
3676         * several times in the row.
3677         */
3678        if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
3679                /* Before OOM, exhaust highatomic_reserve */
3680                return unreserve_highatomic_pageblock(ac, true);
3681        }
3682
3683        /*
3684         * Keep reclaiming pages while there is a chance this will lead
3685         * somewhere.  If none of the target zones can satisfy our allocation
3686         * request even if all reclaimable pages are considered then we are
3687         * screwed and have to go OOM.
3688         */
3689        for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3690                                        ac->nodemask) {
3691                unsigned long available;
3692                unsigned long reclaimable;
3693                unsigned long min_wmark = min_wmark_pages(zone);
3694                bool wmark;
3695
3696                available = reclaimable = zone_reclaimable_pages(zone);
3697                available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
3698
3699                /*
3700                 * Would the allocation succeed if we reclaimed all
3701                 * reclaimable pages?
3702                 */
3703                wmark = __zone_watermark_ok(zone, order, min_wmark,
3704                                ac_classzone_idx(ac), alloc_flags, available);
3705                trace_reclaim_retry_zone(z, order, reclaimable,
3706                                available, min_wmark, *no_progress_loops, wmark);
3707                if (wmark) {
3708                        /*
3709                         * If we didn't make any progress and have a lot of
3710                         * dirty + writeback pages then we should wait for
3711                         * an IO to complete to slow down the reclaim and
3712                         * prevent from pre mature OOM
3713                         */
3714                        if (!did_some_progress) {
3715                                unsigned long write_pending;
3716
3717                                write_pending = zone_page_state_snapshot(zone,
3718                                                        NR_ZONE_WRITE_PENDING);
3719
3720                                if (2 * write_pending > reclaimable) {
3721                                        congestion_wait(BLK_RW_ASYNC, HZ/10);
3722                                        return true;
3723                                }
3724                        }
3725
3726                        /*
3727                         * Memory allocation/reclaim might be called from a WQ
3728                         * context and the current implementation of the WQ
3729                         * concurrency control doesn't recognize that
3730                         * a particular WQ is congested if the worker thread is
3731                         * looping without ever sleeping. Therefore we have to
3732                         * do a short sleep here rather than calling
3733                         * cond_resched().
3734                         */
3735                        if (current->flags & PF_WQ_WORKER)
3736                                schedule_timeout_uninterruptible(1);
3737                        else
3738                                cond_resched();
3739
3740                        return true;
3741                }
3742        }
3743
3744        return false;
3745}
3746
3747static inline bool
3748check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
3749{
3750        /*
3751         * It's possible that cpuset's mems_allowed and the nodemask from
3752         * mempolicy don't intersect. This should be normally dealt with by
3753         * policy_nodemask(), but it's possible to race with cpuset update in
3754         * such a way the check therein was true, and then it became false
3755         * before we got our cpuset_mems_cookie here.
3756         * This assumes that for all allocations, ac->nodemask can come only
3757         * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
3758         * when it does not intersect with the cpuset restrictions) or the
3759         * caller can deal with a violated nodemask.
3760         */
3761        if (cpusets_enabled() && ac->nodemask &&
3762                        !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
3763                ac->nodemask = NULL;
3764                return true;
3765        }
3766
3767        /*
3768         * When updating a task's mems_allowed or mempolicy nodemask, it is
3769         * possible to race with parallel threads in such a way that our
3770         * allocation can fail while the mask is being updated. If we are about
3771         * to fail, check if the cpuset changed during allocation and if so,
3772         * retry.
3773         */
3774        if (read_mems_allowed_retry(cpuset_mems_cookie))
3775                return true;
3776
3777        return false;
3778}
3779
3780static inline struct page *
3781__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
3782                                                struct alloc_context *ac)
3783{
3784        bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
3785        const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
3786        struct page *page = NULL;
3787        unsigned int alloc_flags;
3788        unsigned long did_some_progress;
3789        enum compact_priority compact_priority;
3790        enum compact_result compact_result;
3791        int compaction_retries;
3792        int no_progress_loops;
3793        unsigned long alloc_start = jiffies;
3794        unsigned int stall_timeout = 10 * HZ;
3795        unsigned int cpuset_mems_cookie;
3796
3797        /*
3798         * In the slowpath, we sanity check order to avoid ever trying to
3799         * reclaim >= MAX_ORDER areas which will never succeed. Callers may
3800         * be using allocators in order of preference for an area that is
3801         * too large.
3802         */
3803        if (order >= MAX_ORDER) {
3804                WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
3805                return NULL;
3806        }
3807
3808        /*
3809         * We also sanity check to catch abuse of atomic reserves being used by
3810         * callers that are not in atomic context.
3811         */
3812        if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
3813                                (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
3814                gfp_mask &= ~__GFP_ATOMIC;
3815
3816retry_cpuset:
3817        compaction_retries = 0;
3818        no_progress_loops = 0;
3819        compact_priority = DEF_COMPACT_PRIORITY;
3820        cpuset_mems_cookie = read_mems_allowed_begin();
3821
3822        /*
3823         * The fast path uses conservative alloc_flags to succeed only until
3824         * kswapd needs to be woken up, and to avoid the cost of setting up
3825         * alloc_flags precisely. So we do that now.
3826         */
3827        alloc_flags = gfp_to_alloc_flags(gfp_mask);
3828
3829        /*
3830         * We need to recalculate the starting point for the zonelist iterator
3831         * because we might have used different nodemask in the fast path, or
3832         * there was a cpuset modification and we are retrying - otherwise we
3833         * could end up iterating over non-eligible zones endlessly.
3834         */
3835        ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
3836                                        ac->high_zoneidx, ac->nodemask);
3837        if (!ac->preferred_zoneref->zone)
3838                goto nopage;
3839
3840        if (gfp_mask & __GFP_KSWAPD_RECLAIM)
3841                wake_all_kswapds(order, ac);
3842
3843        /*
3844         * The adjusted alloc_flags might result in immediate success, so try
3845         * that first
3846         */
3847        page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3848        if (page)
3849                goto got_pg;
3850
3851        /*
3852         * For costly allocations, try direct compaction first, as it's likely
3853         * that we have enough base pages and don't need to reclaim. For non-
3854         * movable high-order allocations, do that as well, as compaction will
3855         * try prevent permanent fragmentation by migrating from blocks of the
3856         * same migratetype.
3857         * Don't try this for allocations that are allowed to ignore
3858         * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
3859         */
3860        if (can_direct_reclaim &&
3861                        (costly_order ||
3862                           (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
3863                        && !gfp_pfmemalloc_allowed(gfp_mask)) {
3864                page = __alloc_pages_direct_compact(gfp_mask, order,
3865                                                alloc_flags, ac,
3866                                                INIT_COMPACT_PRIORITY,
3867                                                &compact_result);
3868                if (page)
3869                        goto got_pg;
3870
3871                /*
3872                 * Checks for costly allocations with __GFP_NORETRY, which
3873                 * includes THP page fault allocations
3874                 */
3875                if (costly_order && (gfp_mask & __GFP_NORETRY)) {
3876                        /*
3877                         * If compaction is deferred for high-order allocations,
3878                         * it is because sync compaction recently failed. If
3879                         * this is the case and the caller requested a THP
3880                         * allocation, we do not want to heavily disrupt the
3881                         * system, so we fail the allocation instead of entering
3882                         * direct reclaim.
3883                         */
3884                        if (compact_result == COMPACT_DEFERRED)
3885                                goto nopage;
3886
3887                        /*
3888                         * Looks like reclaim/compaction is worth trying, but
3889                         * sync compaction could be very expensive, so keep
3890                         * using async compaction.
3891                         */
3892                        compact_priority = INIT_COMPACT_PRIORITY;
3893                }
3894        }
3895
3896retry:
3897        /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
3898        if (gfp_mask & __GFP_KSWAPD_RECLAIM)
3899                wake_all_kswapds(order, ac);
3900
3901        if (gfp_pfmemalloc_allowed(gfp_mask))
3902                alloc_flags = ALLOC_NO_WATERMARKS;
3903
3904        /*
3905         * Reset the zonelist iterators if memory policies can be ignored.
3906         * These allocations are high priority and system rather than user
3907         * orientated.
3908         */
3909        if (!(alloc_flags & ALLOC_CPUSET) || (alloc_flags & ALLOC_NO_WATERMARKS)) {
3910                ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
3911                ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
3912                                        ac->high_zoneidx, ac->nodemask);
3913        }
3914
3915        /* Attempt with potentially adjusted zonelist and alloc_flags */
3916        page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3917        if (page)
3918                goto got_pg;
3919
3920        /* Caller is not willing to reclaim, we can't balance anything */
3921        if (!can_direct_reclaim)
3922                goto nopage;
3923
3924        /* Make sure we know about allocations which stall for too long */
3925        if (time_after(jiffies, alloc_start + stall_timeout)) {
3926                warn_alloc(gfp_mask & ~__GFP_NOWARN, ac->nodemask,
3927                        "page allocation stalls for %ums, order:%u",
3928                        jiffies_to_msecs(jiffies-alloc_start), order);
3929                stall_timeout += 10 * HZ;
3930        }
3931
3932        /* Avoid recursion of direct reclaim */
3933        if (current->flags & PF_MEMALLOC)
3934                goto nopage;
3935
3936        /* Try direct reclaim and then allocating */
3937        page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
3938                                                        &did_some_progress);
3939        if (page)
3940                goto got_pg;
3941
3942        /* Try direct compaction and then allocating */
3943        page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
3944                                        compact_priority, &compact_result);
3945        if (page)
3946                goto got_pg;
3947
3948        /* Do not loop if specifically requested */
3949        if (gfp_mask & __GFP_NORETRY)
3950                goto nopage;
3951
3952        /*
3953         * Do not retry costly high order allocations unless they are
3954         * __GFP_RETRY_MAYFAIL
3955         */
3956        if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
3957                goto nopage;
3958
3959        if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
3960                                 did_some_progress > 0, &no_progress_loops))
3961                goto retry;
3962
3963        /*
3964         * It doesn't make any sense to retry for the compaction if the order-0
3965         * reclaim is not able to make any progress because the current
3966         * implementation of the compaction depends on the sufficient amount
3967         * of free memory (see __compaction_suitable)
3968         */
3969        if (did_some_progress > 0 &&
3970                        should_compact_retry(ac, order, alloc_flags,
3971                                compact_result, &compact_priority,
3972                                &compaction_retries))
3973                goto retry;
3974
3975
3976        /* Deal with possible cpuset update races before we start OOM killing */
3977        if (check_retry_cpuset(cpuset_mems_cookie, ac))
3978                goto retry_cpuset;
3979
3980        /* Reclaim has failed us, start killing things */
3981        page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
3982        if (page)
3983                goto got_pg;
3984
3985        /* Avoid allocations with no watermarks from looping endlessly */
3986        if (test_thread_flag(TIF_MEMDIE) &&
3987            (alloc_flags == ALLOC_NO_WATERMARKS ||
3988             (gfp_mask & __GFP_NOMEMALLOC)))
3989                goto nopage;
3990
3991        /* Retry as long as the OOM killer is making progress */
3992        if (did_some_progress) {
3993                no_progress_loops = 0;
3994                goto retry;
3995        }
3996
3997nopage:
3998        /* Deal with possible cpuset update races before we fail */
3999        if (check_retry_cpuset(cpuset_mems_cookie, ac))
4000                goto retry_cpuset;
4001
4002        /*
4003         * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
4004         * we always retry
4005         */
4006        if (gfp_mask & __GFP_NOFAIL) {
4007                /*
4008                 * All existing users of the __GFP_NOFAIL are blockable, so warn
4009                 * of any new users that actually require GFP_NOWAIT
4010                 */
4011                if (WARN_ON_ONCE(!can_direct_reclaim))
4012                        goto fail;
4013
4014                /*
4015                 * PF_MEMALLOC request from this context is rather bizarre
4016                 * because we cannot reclaim anything and only can loop waiting
4017                 * for somebody to do a work for us
4018                 */
4019                WARN_ON_ONCE(current->flags & PF_MEMALLOC);
4020
4021                /*
4022                 * non failing costly orders are a hard requirement which we
4023                 * are not prepared for much so let's warn about these users
4024                 * so that we can identify them and convert them to something
4025                 * else.
4026                 */
4027                WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
4028
4029                /*
4030                 * Help non-failing allocations by giving them access to memory
4031                 * reserves but do not use ALLOC_NO_WATERMARKS because this
4032                 * could deplete whole memory reserves which would just make
4033                 * the situation worse
4034                 */
4035                page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
4036                if (page)
4037                        goto got_pg;
4038
4039                cond_resched();
4040                goto retry;
4041        }
4042fail:
4043        warn_alloc(gfp_mask, ac->nodemask,
4044                        "page allocation failure: order:%u", order);
4045got_pg:
4046        return page;
4047}
4048
4049static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
4050                int preferred_nid, nodemask_t *nodemask,
4051                struct alloc_context *ac, gfp_t *alloc_mask,
4052                unsigned int *alloc_flags)
4053{
4054        ac->high_zoneidx = gfp_zone(gfp_mask);
4055        ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
4056        ac->nodemask = nodemask;
4057        ac->migratetype = gfpflags_to_migratetype(gfp_mask);
4058
4059        if (cpusets_enabled()) {
4060                *alloc_mask |= __GFP_HARDWALL;
4061                if (!ac->nodemask)
4062                        ac->nodemask = &cpuset_current_mems_allowed;
4063                else
4064                        *alloc_flags |= ALLOC_CPUSET;
4065        }
4066
4067        lockdep_trace_alloc(gfp_mask);
4068
4069        might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
4070
4071        if (should_fail_alloc_page(gfp_mask, order))
4072                return false;
4073
4074        if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE)
4075                *alloc_flags |= ALLOC_CMA;
4076
4077        return true;
4078}
4079
4080/* Determine whether to spread dirty pages and what the first usable zone */
4081static inline void finalise_ac(gfp_t gfp_mask,
4082                unsigned int order, struct alloc_context *ac)
4083{
4084        /* Dirty zone balancing only done in the fast path */
4085        ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
4086
4087        /*
4088         * The preferred zone is used for statistics but crucially it is
4089         * also used as the starting point for the zonelist iterator. It
4090         * may get reset for allocations that ignore memory policies.
4091         */
4092        ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4093                                        ac->high_zoneidx, ac->nodemask);
4094}
4095
4096/*
4097 * This is the 'heart' of the zoned buddy allocator.
4098 */
4099struct page *
4100__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
4101                                                        nodemask_t *nodemask)
4102{
4103        struct page *page;
4104        unsigned int alloc_flags = ALLOC_WMARK_LOW;
4105        gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */
4106        struct alloc_context ac = { };
4107
4108        gfp_mask &= gfp_allowed_mask;
4109        if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
4110                return NULL;
4111
4112        finalise_ac(gfp_mask, order, &ac);
4113
4114        /* First allocation attempt */
4115        page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
4116        if (likely(page))
4117                goto out;
4118
4119        /*
4120         * Apply scoped allocation constraints. This is mainly about GFP_NOFS
4121         * resp. GFP_NOIO which has to be inherited for all allocation requests
4122         * from a particular context which has been marked by
4123         * memalloc_no{fs,io}_{save,restore}.
4124         */
4125        alloc_mask = current_gfp_context(gfp_mask);
4126        ac.spread_dirty_pages = false;
4127
4128        /*
4129         * Restore the original nodemask if it was potentially replaced with
4130         * &cpuset_current_mems_allowed to optimize the fast-path attempt.
4131         */
4132        if (unlikely(ac.nodemask != nodemask))
4133                ac.nodemask = nodemask;
4134
4135        page = __alloc_pages_slowpath(alloc_mask, order, &ac);
4136
4137out:
4138        if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
4139            unlikely(memcg_kmem_charge(page, gfp_mask, order) != 0)) {
4140                __free_pages(page, order);
4141                page = NULL;
4142        }
4143
4144        if (kmemcheck_enabled && page)
4145                kmemcheck_pagealloc_alloc(page, order, gfp_mask);
4146
4147        trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
4148
4149        return page;
4150}
4151EXPORT_SYMBOL(__alloc_pages_nodemask);
4152
4153/*
4154 * Common helper functions.
4155 */
4156unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
4157{
4158        struct page *page;
4159
4160        /*
4161         * __get_free_pages() returns a 32-bit address, which cannot represent
4162         * a highmem page
4163         */
4164        VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
4165
4166        page = alloc_pages(gfp_mask, order);
4167        if (!page)
4168                return 0;
4169        return (unsigned long) page_address(page);
4170}
4171EXPORT_SYMBOL(__get_free_pages);
4172
4173unsigned long get_zeroed_page(gfp_t gfp_mask)
4174{
4175        return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
4176}
4177EXPORT_SYMBOL(get_zeroed_page);
4178
4179void __free_pages(struct page *page, unsigned int order)
4180{
4181        if (put_page_testzero(page)) {
4182                if (order == 0)
4183                        free_hot_cold_page(page, false);
4184                else
4185                        __free_pages_ok(page, order);
4186        }
4187}
4188
4189EXPORT_SYMBOL(__free_pages);
4190
4191void free_pages(unsigned long addr, unsigned int order)
4192{
4193        if (addr != 0) {
4194                VM_BUG_ON(!virt_addr_valid((void *)addr));
4195                __free_pages(virt_to_page((void *)addr), order);
4196        }
4197}
4198
4199EXPORT_SYMBOL(free_pages);
4200
4201/*
4202 * Page Fragment:
4203 *  An arbitrary-length arbitrary-offset area of memory which resides
4204 *  within a 0 or higher order page.  Multiple fragments within that page
4205 *  are individually refcounted, in the page's reference counter.
4206 *
4207 * The page_frag functions below provide a simple allocation framework for
4208 * page fragments.  This is used by the network stack and network device
4209 * drivers to provide a backing region of memory for use as either an
4210 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
4211 */
4212static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
4213                                             gfp_t gfp_mask)
4214{
4215        struct page *page = NULL;
4216        gfp_t gfp = gfp_mask;
4217
4218#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4219        gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
4220                    __GFP_NOMEMALLOC;
4221        page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
4222                                PAGE_FRAG_CACHE_MAX_ORDER);
4223        nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
4224#endif
4225        if (unlikely(!page))
4226                page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
4227
4228        nc->va = page ? page_address(page) : NULL;
4229
4230        return page;
4231}
4232
4233void __page_frag_cache_drain(struct page *page, unsigned int count)
4234{
4235        VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
4236
4237        if (page_ref_sub_and_test(page, count)) {
4238                unsigned int order = compound_order(page);
4239
4240                if (order == 0)
4241                        free_hot_cold_page(page, false);
4242                else
4243                        __free_pages_ok(page, order);
4244        }
4245}
4246EXPORT_SYMBOL(__page_frag_cache_drain);
4247
4248void *page_frag_alloc(struct page_frag_cache *nc,
4249                      unsigned int fragsz, gfp_t gfp_mask)
4250{
4251        unsigned int size = PAGE_SIZE;
4252        struct page *page;
4253        int offset;
4254
4255        if (unlikely(!nc->va)) {
4256refill:
4257                page = __page_frag_cache_refill(nc, gfp_mask);
4258                if (!page)
4259                        return NULL;
4260
4261#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4262                /* if size can vary use size else just use PAGE_SIZE */
4263                size = nc->size;
4264#endif
4265                /* Even if we own the page, we do not use atomic_set().
4266                 * This would break get_page_unless_zero() users.
4267                 */
4268                page_ref_add(page, size - 1);
4269
4270                /* reset page count bias and offset to start of new frag */
4271                nc->pfmemalloc = page_is_pfmemalloc(page);
4272                nc->pagecnt_bias = size;
4273                nc->offset = size;
4274        }
4275
4276        offset = nc->offset - fragsz;
4277        if (unlikely(offset < 0)) {
4278                page = virt_to_page(nc->va);
4279
4280                if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
4281                        goto refill;
4282
4283#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4284                /* if size can vary use size else just use PAGE_SIZE */
4285                size = nc->size;
4286#endif
4287                /* OK, page count is 0, we can safely set it */
4288                set_page_count(page, size);
4289
4290                /* reset page count bias and offset to start of new frag */
4291                nc->pagecnt_bias = size;
4292                offset = size - fragsz;
4293        }
4294
4295        nc->pagecnt_bias--;
4296        nc->offset = offset;
4297
4298        return nc->va + offset;
4299}
4300EXPORT_SYMBOL(page_frag_alloc);
4301
4302/*
4303 * Frees a page fragment allocated out of either a compound or order 0 page.
4304 */
4305void page_frag_free(void *addr)
4306{
4307        struct page *page = virt_to_head_page(addr);
4308
4309        if (unlikely(put_page_testzero(page)))
4310                __free_pages_ok(page, compound_order(page));
4311}
4312EXPORT_SYMBOL(page_frag_free);
4313
4314static void *make_alloc_exact(unsigned long addr, unsigned int order,
4315                size_t size)
4316{
4317        if (addr) {
4318                unsigned long alloc_end = addr + (PAGE_SIZE << order);
4319                unsigned long used = addr + PAGE_ALIGN(size);
4320
4321                split_page(virt_to_page((void *)addr), order);
4322                while (used < alloc_end) {
4323                        free_page(used);
4324                        used += PAGE_SIZE;
4325                }
4326        }
4327        return (void *)addr;
4328}
4329
4330/**
4331 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
4332 * @size: the number of bytes to allocate
4333 * @gfp_mask: GFP flags for the allocation
4334 *
4335 * This function is similar to alloc_pages(), except that it allocates the
4336 * minimum number of pages to satisfy the request.  alloc_pages() can only
4337 * allocate memory in power-of-two pages.
4338 *
4339 * This function is also limited by MAX_ORDER.
4340 *
4341 * Memory allocated by this function must be released by free_pages_exact().
4342 */
4343void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
4344{
4345        unsigned int order = get_order(size);
4346        unsigned long addr;
4347
4348        addr = __get_free_pages(gfp_mask, order);
4349        return make_alloc_exact(addr, order, size);
4350}
4351EXPORT_SYMBOL(alloc_pages_exact);
4352
4353/**
4354 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
4355 *                         pages on a node.
4356 * @nid: the preferred node ID where memory should be allocated
4357 * @size: the number of bytes to allocate
4358 * @gfp_mask: GFP flags for the allocation
4359 *
4360 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
4361 * back.
4362 */
4363void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
4364{
4365        unsigned int order = get_order(size);
4366        struct page *p = alloc_pages_node(nid, gfp_mask, order);
4367        if (!p)
4368                return NULL;
4369        return make_alloc_exact((unsigned long)page_address(p), order, size);
4370}
4371
4372/**
4373 * free_pages_exact - release memory allocated via alloc_pages_exact()
4374 * @virt: the value returned by alloc_pages_exact.
4375 * @size: size of allocation, same value as passed to alloc_pages_exact().
4376 *
4377 * Release the memory allocated by a previous call to alloc_pages_exact.
4378 */
4379void free_pages_exact(void *virt, size_t size)
4380{
4381        unsigned long addr = (unsigned long)virt;
4382        unsigned long end = addr + PAGE_ALIGN(size);
4383
4384        while (addr < end) {
4385                free_page(addr);
4386                addr += PAGE_SIZE;
4387        }
4388}
4389EXPORT_SYMBOL(free_pages_exact);
4390
4391/**
4392 * nr_free_zone_pages - count number of pages beyond high watermark
4393 * @offset: The zone index of the highest zone
4394 *
4395 * nr_free_zone_pages() counts the number of counts pages which are beyond the
4396 * high watermark within all zones at or below a given zone index.  For each
4397 * zone, the number of pages is calculated as:
4398 *
4399 *     nr_free_zone_pages = managed_pages - high_pages
4400 */
4401static unsigned long nr_free_zone_pages(int offset)
4402{
4403        struct zoneref *z;
4404        struct zone *zone;
4405
4406        /* Just pick one node, since fallback list is circular */
4407        unsigned long sum = 0;
4408
4409        struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
4410
4411        for_each_zone_zonelist(zone, z, zonelist, offset) {
4412                unsigned long size = zone->managed_pages;
4413                unsigned long high = high_wmark_pages(zone);
4414                if (size > high)
4415                        sum += size - high;
4416        }
4417
4418        return sum;
4419}
4420
4421/**
4422 * nr_free_buffer_pages - count number of pages beyond high watermark
4423 *
4424 * nr_free_buffer_pages() counts the number of pages which are beyond the high
4425 * watermark within ZONE_DMA and ZONE_NORMAL.
4426 */
4427unsigned long nr_free_buffer_pages(void)
4428{
4429        return nr_free_zone_pages(gfp_zone(GFP_USER));
4430}
4431EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
4432
4433/**
4434 * nr_free_pagecache_pages - count number of pages beyond high watermark
4435 *
4436 * nr_free_pagecache_pages() counts the number of pages which are beyond the
4437 * high watermark within all zones.
4438 */
4439unsigned long nr_free_pagecache_pages(void)
4440{
4441        return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
4442}
4443
4444static inline void show_node(struct zone *zone)
4445{
4446        if (IS_ENABLED(CONFIG_NUMA))
4447                printk("Node %d ", zone_to_nid(zone));
4448}
4449
4450long si_mem_available(void)
4451{
4452        long available;
4453        unsigned long pagecache;
4454        unsigned long wmark_low = 0;
4455        unsigned long pages[NR_LRU_LISTS];
4456        struct zone *zone;
4457        int lru;
4458
4459        for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
4460                pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
4461
4462        for_each_zone(zone)
4463                wmark_low += zone->watermark[WMARK_LOW];
4464
4465        /*
4466         * Estimate the amount of memory available for userspace allocations,
4467         * without causing swapping.
4468         */
4469        available = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
4470
4471        /*
4472         * Not all the page cache can be freed, otherwise the system will
4473         * start swapping. Assume at least half of the page cache, or the
4474         * low watermark worth of cache, needs to stay.
4475         */
4476        pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
4477        pagecache -= min(pagecache / 2, wmark_low);
4478        available += pagecache;
4479
4480        /*
4481         * Part of the reclaimable slab consists of items that are in use,
4482         * and cannot be freed. Cap this estimate at the low watermark.
4483         */
4484        available += global_node_page_state(NR_SLAB_RECLAIMABLE) -
4485                     min(global_node_page_state(NR_SLAB_RECLAIMABLE) / 2,
4486                         wmark_low);
4487
4488        if (available < 0)
4489                available = 0;
4490        return available;
4491}
4492EXPORT_SYMBOL_GPL(si_mem_available);
4493
4494void si_meminfo(struct sysinfo *val)
4495{
4496        val->totalram = totalram_pages;
4497        val->sharedram = global_node_page_state(NR_SHMEM);
4498        val->freeram = global_page_state(NR_FREE_PAGES);
4499        val->bufferram = nr_blockdev_pages();
4500        val->totalhigh = totalhigh_pages;
4501        val->freehigh = nr_free_highpages();
4502        val->mem_unit = PAGE_SIZE;
4503}
4504
4505EXPORT_SYMBOL(si_meminfo);
4506
4507#ifdef CONFIG_NUMA
4508void si_meminfo_node(struct sysinfo *val, int nid)
4509{
4510        int zone_type;          /* needs to be signed */
4511        unsigned long managed_pages = 0;
4512        unsigned long managed_highpages = 0;
4513        unsigned long free_highpages = 0;
4514        pg_data_t *pgdat = NODE_DATA(nid);
4515
4516        for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
4517                managed_pages += pgdat->node_zones[zone_type].managed_pages;
4518        val->totalram = managed_pages;
4519        val->sharedram = node_page_state(pgdat, NR_SHMEM);
4520        val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
4521#ifdef CONFIG_HIGHMEM
4522        for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
4523                struct zone *zone = &pgdat->node_zones[zone_type];
4524
4525                if (is_highmem(zone)) {
4526                        managed_highpages += zone->managed_pages;
4527                        free_highpages += zone_page_state(zone, NR_FREE_PAGES);
4528                }
4529        }
4530        val->totalhigh = managed_highpages;
4531        val->freehigh = free_highpages;
4532#else
4533        val->totalhigh = managed_highpages;
4534        val->freehigh = free_highpages;
4535#endif
4536        val->mem_unit = PAGE_SIZE;
4537}
4538#endif
4539
4540/*
4541 * Determine whether the node should be displayed or not, depending on whether
4542 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
4543 */
4544static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
4545{
4546        if (!(flags & SHOW_MEM_FILTER_NODES))
4547                return false;
4548
4549        /*
4550         * no node mask - aka implicit memory numa policy. Do not bother with
4551         * the synchronization - read_mems_allowed_begin - because we do not
4552         * have to be precise here.
4553         */
4554        if (!nodemask)
4555                nodemask = &cpuset_current_mems_allowed;
4556
4557        return !node_isset(nid, *nodemask);
4558}
4559
4560#define K(x) ((x) << (PAGE_SHIFT-10))
4561
4562static void show_migration_types(unsigned char type)
4563{
4564        static const char types[MIGRATE_TYPES] = {
4565                [MIGRATE_UNMOVABLE]     = 'U',
4566                [MIGRATE_MOVABLE]       = 'M',
4567                [MIGRATE_RECLAIMABLE]   = 'E',
4568                [MIGRATE_HIGHATOMIC]    = 'H',
4569#ifdef CONFIG_CMA
4570                [MIGRATE_CMA]           = 'C',
4571#endif
4572#ifdef CONFIG_MEMORY_ISOLATION
4573                [MIGRATE_ISOLATE]       = 'I',
4574#endif
4575        };
4576        char tmp[MIGRATE_TYPES + 1];
4577        char *p = tmp;
4578        int i;
4579
4580        for (i = 0; i < MIGRATE_TYPES; i++) {
4581                if (type & (1 << i))
4582                        *p++ = types[i];
4583        }
4584
4585        *p = '\0';
4586        printk(KERN_CONT "(%s) ", tmp);
4587}
4588
4589/*
4590 * Show free area list (used inside shift_scroll-lock stuff)
4591 * We also calculate the percentage fragmentation. We do this by counting the
4592 * memory on each free list with the exception of the first item on the list.
4593 *
4594 * Bits in @filter:
4595 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
4596 *   cpuset.
4597 */
4598void show_free_areas(unsigned int filter, nodemask_t *nodemask)
4599{
4600        unsigned long free_pcp = 0;
4601        int cpu;
4602        struct zone *zone;
4603        pg_data_t *pgdat;
4604
4605        for_each_populated_zone(zone) {
4606                if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
4607                        continue;
4608
4609                for_each_online_cpu(cpu)
4610                        free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
4611        }
4612
4613        printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
4614                " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
4615                " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"
4616                " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
4617                " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
4618                " free:%lu free_pcp:%lu free_cma:%lu\n",
4619                global_node_page_state(NR_ACTIVE_ANON),
4620                global_node_page_state(NR_INACTIVE_ANON),
4621                global_node_page_state(NR_ISOLATED_ANON),
4622                global_node_page_state(NR_ACTIVE_FILE),
4623                global_node_page_state(NR_INACTIVE_FILE),
4624                global_node_page_state(NR_ISOLATED_FILE),
4625                global_node_page_state(NR_UNEVICTABLE),
4626                global_node_page_state(NR_FILE_DIRTY),
4627                global_node_page_state(NR_WRITEBACK),
4628                global_node_page_state(NR_UNSTABLE_NFS),
4629                global_node_page_state(NR_SLAB_RECLAIMABLE),
4630                global_node_page_state(NR_SLAB_UNRECLAIMABLE),
4631                global_node_page_state(NR_FILE_MAPPED),
4632                global_node_page_state(NR_SHMEM),
4633                global_page_state(NR_PAGETABLE),
4634                global_page_state(NR_BOUNCE),
4635                global_page_state(NR_FREE_PAGES),
4636                free_pcp,
4637                global_page_state(NR_FREE_CMA_PAGES));
4638
4639        for_each_online_pgdat(pgdat) {
4640                if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
4641                        continue;
4642
4643                printk("Node %d"
4644                        " active_anon:%lukB"
4645                        " inactive_anon:%lukB"
4646                        " active_file:%lukB"
4647                        " inactive_file:%lukB"
4648                        " unevictable:%lukB"
4649                        " isolated(anon):%lukB"
4650                        " isolated(file):%lukB"
4651                        " mapped:%lukB"
4652                        " dirty:%lukB"
4653                        " writeback:%lukB"
4654                        " shmem:%lukB"
4655#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4656                        " shmem_thp: %lukB"
4657                        " shmem_pmdmapped: %lukB"
4658                        " anon_thp: %lukB"
4659#endif
4660                        " writeback_tmp:%lukB"
4661                        " unstable:%lukB"
4662                        " all_unreclaimable? %s"
4663                        "\n",
4664                        pgdat->node_id,
4665                        K(node_page_state(pgdat, NR_ACTIVE_ANON)),
4666                        K(node_page_state(pgdat, NR_INACTIVE_ANON)),
4667                        K(node_page_state(pgdat, NR_ACTIVE_FILE)),
4668                        K(node_page_state(pgdat, NR_INACTIVE_FILE)),
4669                        K(node_page_state(pgdat, NR_UNEVICTABLE)),
4670                        K(node_page_state(pgdat, NR_ISOLATED_ANON)),
4671                        K(node_page_state(pgdat, NR_ISOLATED_FILE)),
4672                        K(node_page_state(pgdat, NR_FILE_MAPPED)),
4673                        K(node_page_state(pgdat, NR_FILE_DIRTY)),
4674                        K(node_page_state(pgdat, NR_WRITEBACK)),
4675                        K(node_page_state(pgdat, NR_SHMEM)),
4676#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4677                        K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
4678                        K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
4679                                        * HPAGE_PMD_NR),
4680                        K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
4681#endif
4682                        K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
4683                        K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
4684                        pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
4685                                "yes" : "no");
4686        }
4687
4688        for_each_populated_zone(zone) {
4689                int i;
4690
4691                if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
4692                        continue;
4693
4694                free_pcp = 0;
4695                for_each_online_cpu(cpu)
4696                        free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
4697
4698                show_node(zone);
4699                printk(KERN_CONT
4700                        "%s"
4701                        " free:%lukB"
4702                        " min:%lukB"
4703                        " low:%lukB"
4704                        " high:%lukB"
4705                        " active_anon:%lukB"
4706                        " inactive_anon:%lukB"
4707                        " active_file:%lukB"
4708                        " inactive_file:%lukB"
4709                        " unevictable:%lukB"
4710                        " writepending:%lukB"
4711                        " present:%lukB"
4712                        " managed:%lukB"
4713                        " mlocked:%lukB"
4714                        " kernel_stack:%lukB"
4715                        " pagetables:%lukB"
4716                        " bounce:%lukB"
4717                        " free_pcp:%lukB"
4718                        " local_pcp:%ukB"
4719                        " free_cma:%lukB"
4720                        "\n",
4721                        zone->name,
4722                        K(zone_page_state(zone, NR_FREE_PAGES)),
4723                        K(min_wmark_pages(zone)),
4724                        K(low_wmark_pages(zone)),
4725                        K(high_wmark_pages(zone)),
4726                        K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
4727                        K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
4728                        K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
4729                        K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
4730                        K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
4731                        K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
4732                        K(zone->present_pages),
4733                        K(zone->managed_pages),
4734                        K(zone_page_state(zone, NR_MLOCK)),
4735                        zone_page_state(zone, NR_KERNEL_STACK_KB),
4736                        K(zone_page_state(zone, NR_PAGETABLE)),
4737                        K(zone_page_state(zone, NR_BOUNCE)),
4738                        K(free_pcp),
4739                        K(this_cpu_read(zone->pageset->pcp.count)),
4740                        K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
4741                printk("lowmem_reserve[]:");
4742                for (i = 0; i < MAX_NR_ZONES; i++)
4743                        printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
4744                printk(KERN_CONT "\n");
4745        }
4746
4747        for_each_populated_zone(zone) {
4748                unsigned int order;
4749                unsigned long nr[MAX_ORDER], flags, total = 0;
4750                unsigned char types[MAX_ORDER];
4751
4752                if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
4753                        continue;
4754                show_node(zone);
4755                printk(KERN_CONT "%s: ", zone->name);
4756
4757                spin_lock_irqsave(&zone->lock, flags);
4758                for (order = 0; order < MAX_ORDER; order++) {
4759                        struct free_area *area = &zone->free_area[order];
4760                        int type;
4761
4762                        nr[order] = area->nr_free;
4763                        total += nr[order] << order;
4764
4765                        types[order] = 0;
4766                        for (type = 0; type < MIGRATE_TYPES; type++) {
4767                                if (!list_empty(&area->free_list[type]))
4768                                        types[order] |= 1 << type;
4769                        }
4770                }
4771                spin_unlock_irqrestore(&zone->lock, flags);
4772                for (order = 0; order < MAX_ORDER; order++) {
4773                        printk(KERN_CONT "%lu*%lukB ",
4774                               nr[order], K(1UL) << order);
4775                        if (nr[order])
4776                                show_migration_types(types[order]);
4777                }
4778                printk(KERN_CONT "= %lukB\n", K(total));
4779        }
4780
4781        hugetlb_show_meminfo();
4782
4783        printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
4784
4785        show_swap_cache_info();
4786}
4787
4788static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
4789{
4790        zoneref->zone = zone;
4791        zoneref->zone_idx = zone_idx(zone);
4792}
4793
4794/*
4795 * Builds allocation fallback zone lists.
4796 *
4797 * Add all populated zones of a node to the zonelist.
4798 */
4799static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
4800                                int nr_zones)
4801{
4802        struct zone *zone;
4803        enum zone_type zone_type = MAX_NR_ZONES;
4804
4805        do {
4806                zone_type--;
4807                zone = pgdat->node_zones + zone_type;
4808                if (managed_zone(zone)) {
4809                        zoneref_set_zone(zone,
4810                                &zonelist->_zonerefs[nr_zones++]);
4811                        check_highest_zone(zone_type);
4812                }
4813        } while (zone_type);
4814
4815        return nr_zones;
4816}
4817
4818
4819/*
4820 *  zonelist_order:
4821 *  0 = automatic detection of better ordering.
4822 *  1 = order by ([node] distance, -zonetype)
4823 *  2 = order by (-zonetype, [node] distance)
4824 *
4825 *  If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
4826 *  the same zonelist. So only NUMA can configure this param.
4827 */
4828#define ZONELIST_ORDER_DEFAULT  0
4829#define ZONELIST_ORDER_NODE     1
4830#define ZONELIST_ORDER_ZONE     2
4831
4832/* zonelist order in the kernel.
4833 * set_zonelist_order() will set this to NODE or ZONE.
4834 */
4835static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
4836static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
4837
4838
4839#ifdef CONFIG_NUMA
4840/* The value user specified ....changed by config */
4841static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
4842/* string for sysctl */
4843#define NUMA_ZONELIST_ORDER_LEN 16
4844char numa_zonelist_order[16] = "default";
4845
4846/*
4847 * interface for configure zonelist ordering.
4848 * command line option "numa_zonelist_order"
4849 *      = "[dD]efault   - default, automatic configuration.
4850 *      = "[nN]ode      - order by node locality, then by zone within node
4851 *      = "[zZ]one      - order by zone, then by locality within zone
4852 */
4853
4854static int __parse_numa_zonelist_order(char *s)
4855{
4856        if (*s == 'd' || *s == 'D') {
4857                user_zonelist_order = ZONELIST_ORDER_DEFAULT;
4858        } else if (*s == 'n' || *s == 'N') {
4859                user_zonelist_order = ZONELIST_ORDER_NODE;
4860        } else if (*s == 'z' || *s == 'Z') {
4861                user_zonelist_order = ZONELIST_ORDER_ZONE;
4862        } else {
4863                pr_warn("Ignoring invalid numa_zonelist_order value:  %s\n", s);
4864                return -EINVAL;
4865        }
4866        return 0;
4867}
4868
4869static __init int setup_numa_zonelist_order(char *s)
4870{
4871        int ret;
4872
4873        if (!s)
4874                return 0;
4875
4876        ret = __parse_numa_zonelist_order(s);
4877        if (ret == 0)
4878                strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
4879
4880        return ret;
4881}
4882early_param("numa_zonelist_order", setup_numa_zonelist_order);
4883
4884/*
4885 * sysctl handler for numa_zonelist_order
4886 */
4887int numa_zonelist_order_handler(struct ctl_table *table, int write,
4888                void __user *buffer, size_t *length,
4889                loff_t *ppos)
4890{
4891        char saved_string[NUMA_ZONELIST_ORDER_LEN];
4892        int ret;
4893        static DEFINE_MUTEX(zl_order_mutex);
4894
4895        mutex_lock(&zl_order_mutex);
4896        if (write) {
4897                if (strlen((char *)table->data) >= NUMA_ZONELIST_ORDER_LEN) {
4898                        ret = -EINVAL;
4899                        goto out;
4900                }
4901                strcpy(saved_string, (char *)table->data);
4902        }
4903        ret = proc_dostring(table, write, buffer, length, ppos);
4904        if (ret)
4905                goto out;
4906        if (write) {
4907                int oldval = user_zonelist_order;
4908
4909                ret = __parse_numa_zonelist_order((char *)table->data);
4910                if (ret) {
4911                        /*
4912                         * bogus value.  restore saved string
4913                         */
4914                        strncpy((char *)table->data, saved_string,
4915                                NUMA_ZONELIST_ORDER_LEN);
4916                        user_zonelist_order = oldval;
4917                } else if (oldval != user_zonelist_order) {
4918                        mem_hotplug_begin();
4919                        mutex_lock(&zonelists_mutex);
4920                        build_all_zonelists(NULL, NULL);
4921                        mutex_unlock(&zonelists_mutex);
4922                        mem_hotplug_done();
4923                }
4924        }
4925out:
4926        mutex_unlock(&zl_order_mutex);
4927        return ret;
4928}
4929
4930
4931#define MAX_NODE_LOAD (nr_online_nodes)
4932static int node_load[MAX_NUMNODES];
4933
4934/**
4935 * find_next_best_node - find the next node that should appear in a given node's fallback list
4936 * @node: node whose fallback list we're appending
4937 * @used_node_mask: nodemask_t of already used nodes
4938 *
4939 * We use a number of factors to determine which is the next node that should
4940 * appear on a given node's fallback list.  The node should not have appeared
4941 * already in @node's fallback list, and it should be the next closest node
4942 * according to the distance array (which contains arbitrary distance values
4943 * from each node to each node in the system), and should also prefer nodes
4944 * with no CPUs, since presumably they'll have very little allocation pressure
4945 * on them otherwise.
4946 * It returns -1 if no node is found.
4947 */
4948static int find_next_best_node(int node, nodemask_t *used_node_mask)
4949{
4950        int n, val;
4951        int min_val = INT_MAX;
4952        int best_node = NUMA_NO_NODE;
4953        const struct cpumask *tmp = cpumask_of_node(0);
4954
4955        /* Use the local node if we haven't already */
4956        if (!node_isset(node, *used_node_mask)) {
4957                node_set(node, *used_node_mask);
4958                return node;
4959        }
4960
4961        for_each_node_state(n, N_MEMORY) {
4962
4963                /* Don't want a node to appear more than once */
4964                if (node_isset(n, *used_node_mask))
4965                        continue;
4966
4967                /* Use the distance array to find the distance */
4968                val = node_distance(node, n);
4969
4970                /* Penalize nodes under us ("prefer the next node") */
4971                val += (n < node);
4972
4973                /* Give preference to headless and unused nodes */
4974                tmp = cpumask_of_node(n);
4975                if (!cpumask_empty(tmp))
4976                        val += PENALTY_FOR_NODE_WITH_CPUS;
4977
4978                /* Slight preference for less loaded node */
4979                val *= (MAX_NODE_LOAD*MAX_NUMNODES);
4980                val += node_load[n];
4981
4982                if (val < min_val) {
4983                        min_val = val;
4984                        best_node = n;
4985                }
4986        }
4987
4988        if (best_node >= 0)
4989                node_set(best_node, *used_node_mask);
4990
4991        return best_node;
4992}
4993
4994
4995/*
4996 * Build zonelists ordered by node and zones within node.
4997 * This results in maximum locality--normal zone overflows into local
4998 * DMA zone, if any--but risks exhausting DMA zone.
4999 */
5000static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
5001{
5002        int j;
5003        struct zonelist *zonelist;
5004
5005        zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
5006        for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
5007                ;
5008        j = build_zonelists_node(NODE_DATA(node), zonelist, j);
5009        zonelist->_zonerefs[j].zone = NULL;
5010        zonelist->_zonerefs[j].zone_idx = 0;
5011}
5012
5013/*
5014 * Build gfp_thisnode zonelists
5015 */
5016static void build_thisnode_zonelists(pg_data_t *pgdat)
5017{
5018        int j;
5019        struct zonelist *zonelist;
5020
5021        zonelist = &pgdat->node_zonelists[ZONELIST_NOFALLBACK];
5022        j = build_zonelists_node(pgdat, zonelist, 0);
5023        zonelist->_zonerefs[j].zone = NULL;
5024        zonelist->_zonerefs[j].zone_idx = 0;
5025}
5026
5027/*
5028 * Build zonelists ordered by zone and nodes within zones.
5029 * This results in conserving DMA zone[s] until all Normal memory is
5030 * exhausted, but results in overflowing to remote node while memory
5031 * may still exist in local DMA zone.
5032 */
5033static int node_order[MAX_NUMNODES];
5034
5035static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
5036{
5037        int pos, j, node;
5038        int zone_type;          /* needs to be signed */
5039        struct zone *z;
5040        struct zonelist *zonelist;
5041
5042        zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
5043        pos = 0;
5044        for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
5045                for (j = 0; j < nr_nodes; j++) {
5046                        node = node_order[j];
5047                        z = &NODE_DATA(node)->node_zones[zone_type];
5048                        if (managed_zone(z)) {
5049                                zoneref_set_zone(z,
5050                                        &zonelist->_zonerefs[pos++]);
5051                                check_highest_zone(zone_type);
5052                        }
5053                }
5054        }
5055        zonelist->_zonerefs[pos].zone = NULL;
5056        zonelist->_zonerefs[pos].zone_idx = 0;
5057}
5058
5059#if defined(CONFIG_64BIT)
5060/*
5061 * Devices that require DMA32/DMA are relatively rare and do not justify a
5062 * penalty to every machine in case the specialised case applies. Default
5063 * to Node-ordering on 64-bit NUMA machines
5064 */
5065static int default_zonelist_order(void)
5066{
5067        return ZONELIST_ORDER_NODE;
5068}
5069#else
5070/*
5071 * On 32-bit, the Normal zone needs to be preserved for allocations accessible
5072 * by the kernel. If processes running on node 0 deplete the low memory zone
5073 * then reclaim will occur more frequency increasing stalls and potentially
5074 * be easier to OOM if a large percentage of the zone is under writeback or
5075 * dirty. The problem is significantly worse if CONFIG_HIGHPTE is not set.
5076 * Hence, default to zone ordering on 32-bit.
5077 */
5078static int default_zonelist_order(void)
5079{
5080        return ZONELIST_ORDER_ZONE;
5081}
5082#endif /* CONFIG_64BIT */
5083
5084static void set_zonelist_order(void)
5085{
5086        if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
5087                current_zonelist_order = default_zonelist_order();
5088        else
5089                current_zonelist_order = user_zonelist_order;
5090}
5091
5092static void build_zonelists(pg_data_t *pgdat)
5093{
5094        int i, node, load;
5095        nodemask_t used_mask;
5096        int local_node, prev_node;
5097        struct zonelist *zonelist;
5098        unsigned int order = current_zonelist_order;
5099
5100        /* initialize zonelists */
5101        for (i = 0; i < MAX_ZONELISTS; i++) {
5102                zonelist = pgdat->node_zonelists + i;
5103                zonelist->_zonerefs[0].zone = NULL;
5104                zonelist->_zonerefs[0].zone_idx = 0;
5105        }
5106
5107        /* NUMA-aware ordering of nodes */
5108        local_node = pgdat->node_id;
5109        load = nr_online_nodes;
5110        prev_node = local_node;
5111        nodes_clear(used_mask);
5112
5113        memset(node_order, 0, sizeof(node_order));
5114        i = 0;
5115
5116        while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
5117                /*
5118                 * We don't want to pressure a particular node.
5119                 * So adding penalty to the first node in same
5120                 * distance group to make it round-robin.
5121                 */
5122                if (node_distance(local_node, node) !=
5123                    node_distance(local_node, prev_node))
5124                        node_load[node] = load;
5125
5126                prev_node = node;
5127                load--;
5128                if (order == ZONELIST_ORDER_NODE)
5129                        build_zonelists_in_node_order(pgdat, node);
5130                else
5131                        node_order[i++] = node; /* remember order */
5132        }
5133
5134        if (order == ZONELIST_ORDER_ZONE) {
5135                /* calculate node order -- i.e., DMA last! */
5136                build_zonelists_in_zone_order(pgdat, i);
5137        }
5138
5139        build_thisnode_zonelists(pgdat);
5140}
5141
5142#ifdef CONFIG_HAVE_MEMORYLESS_NODES
5143/*
5144 * Return node id of node used for "local" allocations.
5145 * I.e., first node id of first zone in arg node's generic zonelist.
5146 * Used for initializing percpu 'numa_mem', which is used primarily
5147 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
5148 */
5149int local_memory_node(int node)
5150{
5151        struct zoneref *z;
5152
5153        z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
5154                                   gfp_zone(GFP_KERNEL),
5155                                   NULL);
5156        return z->zone->node;
5157}
5158#endif
5159
5160static void setup_min_unmapped_ratio(void);
5161static void setup_min_slab_ratio(void);
5162#else   /* CONFIG_NUMA */
5163
5164static void set_zonelist_order(void)
5165{
5166        current_zonelist_order = ZONELIST_ORDER_ZONE;
5167}
5168
5169static void build_zonelists(pg_data_t *pgdat)
5170{
5171        int node, local_node;
5172        enum zone_type j;
5173        struct zonelist *zonelist;
5174
5175        local_node = pgdat->node_id;
5176
5177        zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
5178        j = build_zonelists_node(pgdat, zonelist, 0);
5179
5180        /*
5181         * Now we build the zonelist so that it contains the zones
5182         * of all the other nodes.
5183         * We don't want to pressure a particular node, so when
5184         * building the zones for node N, we make sure that the
5185         * zones coming right after the local ones are those from
5186         * node N+1 (modulo N)
5187         */
5188        for (node = local_node + 1; node < MAX_NUMNODES; node++) {
5189                if (!node_online(node))
5190                        continue;
5191                j = build_zonelists_node(NODE_DATA(node), zonelist, j);
5192        }
5193        for (node = 0; node < local_node; node++) {
5194                if (!node_online(node))
5195                        continue;
5196                j = build_zonelists_node(NODE_DATA(node), zonelist, j);
5197        }
5198
5199        zonelist->_zonerefs[j].zone = NULL;
5200        zonelist->_zonerefs[j].zone_idx = 0;
5201}
5202
5203#endif  /* CONFIG_NUMA */
5204
5205/*
5206 * Boot pageset table. One per cpu which is going to be used for all
5207 * zones and all nodes. The parameters will be set in such a way
5208 * that an item put on a list will immediately be handed over to
5209 * the buddy list. This is safe since pageset manipulation is done
5210 * with interrupts disabled.
5211 *
5212 * The boot_pagesets must be kept even after bootup is complete for
5213 * unused processors and/or zones. They do play a role for bootstrapping
5214 * hotplugged processors.
5215 *
5216 * zoneinfo_show() and maybe other functions do
5217 * not check if the processor is online before following the pageset pointer.
5218 * Other parts of the kernel may not check if the zone is available.
5219 */
5220static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
5221static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
5222static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
5223static void setup_zone_pageset(struct zone *zone);
5224
5225/*
5226 * Global mutex to protect against size modification of zonelists
5227 * as well as to serialize pageset setup for the new populated zone.
5228 */
5229DEFINE_MUTEX(zonelists_mutex);
5230
5231/* return values int ....just for stop_machine() */
5232static int __build_all_zonelists(void *data)
5233{
5234        int nid;
5235        int cpu;
5236        pg_data_t *self = data;
5237
5238#ifdef CONFIG_NUMA
5239        memset(node_load, 0, sizeof(node_load));
5240#endif
5241
5242        if (self && !node_online(self->node_id)) {
5243                build_zonelists(self);
5244        }
5245
5246        for_each_online_node(nid) {
5247                pg_data_t *pgdat = NODE_DATA(nid);
5248
5249                build_zonelists(pgdat);
5250        }
5251
5252        /*
5253         * Initialize the boot_pagesets that are going to be used
5254         * for bootstrapping processors. The real pagesets for
5255         * each zone will be allocated later when the per cpu
5256         * allocator is available.
5257         *
5258         * boot_pagesets are used also for bootstrapping offline
5259         * cpus if the system is already booted because the pagesets
5260         * are needed to initialize allocators on a specific cpu too.
5261         * F.e. the percpu allocator needs the page allocator which
5262         * needs the percpu allocator in order to allocate its pagesets
5263         * (a chicken-egg dilemma).
5264         */
5265        for_each_possible_cpu(cpu) {
5266                setup_pageset(&per_cpu(boot_pageset, cpu), 0);
5267
5268#ifdef CONFIG_HAVE_MEMORYLESS_NODES
5269                /*
5270                 * We now know the "local memory node" for each node--
5271                 * i.e., the node of the first zone in the generic zonelist.
5272                 * Set up numa_mem percpu variable for on-line cpus.  During
5273                 * boot, only the boot cpu should be on-line;  we'll init the
5274                 * secondary cpus' numa_mem as they come on-line.  During
5275                 * node/memory hotplug, we'll fixup all on-line cpus.
5276                 */
5277                if (cpu_online(cpu))
5278                        set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
5279#endif
5280        }
5281
5282        return 0;
5283}
5284
5285static noinline void __init
5286build_all_zonelists_init(void)
5287{
5288        __build_all_zonelists(NULL);
5289        mminit_verify_zonelist();
5290        cpuset_init_current_mems_allowed();
5291}
5292
5293/*
5294 * Called with zonelists_mutex held always
5295 * unless system_state == SYSTEM_BOOTING.
5296 *
5297 * __ref due to (1) call of __meminit annotated setup_zone_pageset
5298 * [we're only called with non-NULL zone through __meminit paths] and
5299 * (2) call of __init annotated helper build_all_zonelists_init
5300 * [protected by SYSTEM_BOOTING].
5301 */
5302void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
5303{
5304        set_zonelist_order();
5305
5306        if (system_state == SYSTEM_BOOTING) {
5307                build_all_zonelists_init();
5308        } else {
5309#ifdef CONFIG_MEMORY_HOTPLUG
5310                if (zone)
5311                        setup_zone_pageset(zone);
5312#endif
5313                /* we have to stop all cpus to guarantee there is no user
5314                   of zonelist */
5315                stop_machine_cpuslocked(__build_all_zonelists, pgdat, NULL);
5316                /* cpuset refresh routine should be here */
5317        }
5318        vm_total_pages = nr_free_pagecache_pages();
5319        /*
5320         * Disable grouping by mobility if the number of pages in the
5321         * system is too low to allow the mechanism to work. It would be
5322         * more accurate, but expensive to check per-zone. This check is
5323         * made on memory-hotadd so a system can start with mobility
5324         * disabled and enable it later
5325         */
5326        if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
5327                page_group_by_mobility_disabled = 1;
5328        else
5329                page_group_by_mobility_disabled = 0;
5330
5331        pr_info("Built %i zonelists in %s order, mobility grouping %s.  Total pages: %ld\n",
5332                nr_online_nodes,
5333                zonelist_order_name[current_zonelist_order],
5334                page_group_by_mobility_disabled ? "off" : "on",
5335                vm_total_pages);
5336#ifdef CONFIG_NUMA
5337        pr_info("Policy zone: %s\n", zone_names[policy_zone]);
5338#endif
5339}
5340
5341/*
5342 * Initially all pages are reserved - free ones are freed
5343 * up by free_all_bootmem() once the early boot process is
5344 * done. Non-atomic initialization, single-pass.
5345 */
5346void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
5347                unsigned long start_pfn, enum memmap_context context)
5348{
5349        struct vmem_altmap *altmap = to_vmem_altmap(__pfn_to_phys(start_pfn));
5350        unsigned long end_pfn = start_pfn + size;
5351        pg_data_t *pgdat = NODE_DATA(nid);
5352        unsigned long pfn;
5353        unsigned long nr_initialised = 0;
5354#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5355        struct memblock_region *r = NULL, *tmp;
5356#endif
5357
5358        if (highest_memmap_pfn < end_pfn - 1)
5359                highest_memmap_pfn = end_pfn - 1;
5360
5361        /*
5362         * Honor reservation requested by the driver for this ZONE_DEVICE
5363         * memory
5364         */
5365        if (altmap && start_pfn == altmap->base_pfn)
5366                start_pfn += altmap->reserve;
5367
5368        for (pfn = start_pfn; pfn < end_pfn; pfn++) {
5369                /*
5370                 * There can be holes in boot-time mem_map[]s handed to this
5371                 * function.  They do not exist on hotplugged memory.
5372                 */
5373                if (context != MEMMAP_EARLY)
5374                        goto not_early;
5375
5376                if (!early_pfn_valid(pfn)) {
5377#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5378                        /*
5379                         * Skip to the pfn preceding the next valid one (or
5380                         * end_pfn), such that we hit a valid pfn (or end_pfn)
5381                         * on our next iteration of the loop.
5382                         */
5383                        pfn = memblock_next_valid_pfn(pfn, end_pfn) - 1;
5384#endif
5385                        continue;
5386                }
5387                if (!early_pfn_in_nid(pfn, nid))
5388                        continue;
5389                if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised))
5390                        break;
5391
5392#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5393                /*
5394                 * Check given memblock attribute by firmware which can affect
5395                 * kernel memory layout.  If zone==ZONE_MOVABLE but memory is
5396                 * mirrored, it's an overlapped memmap init. skip it.
5397                 */
5398                if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
5399                        if (!r || pfn >= memblock_region_memory_end_pfn(r)) {
5400                                for_each_memblock(memory, tmp)
5401                                        if (pfn < memblock_region_memory_end_pfn(tmp))
5402                                                break;
5403                                r = tmp;
5404                        }
5405                        if (pfn >= memblock_region_memory_base_pfn(r) &&
5406                            memblock_is_mirror(r)) {
5407                                /* already initialized as NORMAL */
5408                                pfn = memblock_region_memory_end_pfn(r);
5409                                continue;
5410                        }
5411                }
5412#endif
5413
5414not_early:
5415                /*
5416                 * Mark the block movable so that blocks are reserved for
5417                 * movable at startup. This will force kernel allocations
5418                 * to reserve their blocks rather than leaking throughout
5419                 * the address space during boot when many long-lived
5420                 * kernel allocations are made.
5421                 *
5422                 * bitmap is created for zone's valid pfn range. but memmap
5423                 * can be created for invalid pages (for alignment)
5424                 * check here not to call set_pageblock_migratetype() against
5425                 * pfn out of zone.
5426                 */
5427                if (!(pfn & (pageblock_nr_pages - 1))) {
5428                        struct page *page = pfn_to_page(pfn);
5429
5430                        __init_single_page(page, pfn, zone, nid);
5431                        set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5432                } else {
5433                        __init_single_pfn(pfn, zone, nid);
5434                }
5435        }
5436}
5437
5438static void __meminit zone_init_free_lists(struct zone *zone)
5439{
5440        unsigned int order, t;
5441        for_each_migratetype_order(order, t) {
5442                INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
5443                zone->free_area[order].nr_free = 0;
5444        }
5445}
5446
5447#ifndef __HAVE_ARCH_MEMMAP_INIT
5448#define memmap_init(size, nid, zone, start_pfn) \
5449        memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
5450#endif
5451
5452static int zone_batchsize(struct zone *zone)
5453{
5454#ifdef CONFIG_MMU
5455        int batch;
5456
5457        /*
5458         * The per-cpu-pages pools are set to around 1000th of the
5459         * size of the zone.  But no more than 1/2 of a meg.
5460         *
5461         * OK, so we don't know how big the cache is.  So guess.
5462         */
5463        batch = zone->managed_pages / 1024;
5464        if (batch * PAGE_SIZE > 512 * 1024)
5465                batch = (512 * 1024) / PAGE_SIZE;
5466        batch /= 4;             /* We effectively *= 4 below */
5467        if (batch < 1)
5468                batch = 1;
5469
5470        /*
5471         * Clamp the batch to a 2^n - 1 value. Having a power
5472         * of 2 value was found to be more likely to have
5473         * suboptimal cache aliasing properties in some cases.
5474         *
5475         * For example if 2 tasks are alternately allocating
5476         * batches of pages, one task can end up with a lot
5477         * of pages of one half of the possible page colors
5478         * and the other with pages of the other colors.
5479         */
5480        batch = rounddown_pow_of_two(batch + batch/2) - 1;
5481
5482        return batch;
5483
5484#else
5485        /* The deferral and batching of frees should be suppressed under NOMMU
5486         * conditions.
5487         *
5488         * The problem is that NOMMU needs to be able to allocate large chunks
5489         * of contiguous memory as there's no hardware page translation to
5490         * assemble apparent contiguous memory from discontiguous pages.
5491         *
5492         * Queueing large contiguous runs of pages for batching, however,
5493         * causes the pages to actually be freed in smaller chunks.  As there
5494         * can be a significant delay between the individual batches being
5495         * recycled, this leads to the once large chunks of space being
5496         * fragmented and becoming unavailable for high-order allocations.
5497         */
5498        return 0;
5499#endif
5500}
5501
5502/*
5503 * pcp->high and pcp->batch values are related and dependent on one another:
5504 * ->batch must never be higher then ->high.
5505 * The following function updates them in a safe manner without read side
5506 * locking.
5507 *
5508 * Any new users of pcp->batch and pcp->high should ensure they can cope with
5509 * those fields changing asynchronously (acording the the above rule).
5510 *
5511 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
5512 * outside of boot time (or some other assurance that no concurrent updaters
5513 * exist).
5514 */
5515static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
5516                unsigned long batch)
5517{
5518       /* start with a fail safe value for batch */
5519        pcp->batch = 1;
5520        smp_wmb();
5521
5522       /* Update high, then batch, in order */
5523        pcp->high = high;
5524        smp_wmb();
5525
5526        pcp->batch = batch;
5527}
5528
5529/* a companion to pageset_set_high() */
5530static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
5531{
5532        pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
5533}
5534
5535static void pageset_init(struct per_cpu_pageset *p)
5536{
5537        struct per_cpu_pages *pcp;
5538        int migratetype;
5539
5540        memset(p, 0, sizeof(*p));
5541
5542        pcp = &p->pcp;
5543        pcp->count = 0;
5544        for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
5545                INIT_LIST_HEAD(&pcp->lists[migratetype]);
5546}
5547
5548static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
5549{
5550        pageset_init(p);
5551        pageset_set_batch(p, batch);
5552}
5553
5554/*
5555 * pageset_set_high() sets the high water mark for hot per_cpu_pagelist
5556 * to the value high for the pageset p.
5557 */
5558static void pageset_set_high(struct per_cpu_pageset *p,
5559                                unsigned long high)
5560{
5561        unsigned long batch = max(1UL, high / 4);
5562        if ((high / 4) > (PAGE_SHIFT * 8))
5563                batch = PAGE_SHIFT * 8;
5564
5565        pageset_update(&p->pcp, high, batch);
5566}
5567
5568static void pageset_set_high_and_batch(struct zone *zone,
5569                                       struct per_cpu_pageset *pcp)
5570{
5571        if (percpu_pagelist_fraction)
5572                pageset_set_high(pcp,
5573                        (zone->managed_pages /
5574                                percpu_pagelist_fraction));
5575        else
5576                pageset_set_batch(pcp, zone_batchsize(zone));
5577}
5578
5579static void __meminit zone_pageset_init(struct zone *zone, int cpu)
5580{
5581        struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
5582
5583        pageset_init(pcp);
5584        pageset_set_high_and_batch(zone, pcp);
5585}
5586
5587static void __meminit setup_zone_pageset(struct zone *zone)
5588{
5589        int cpu;
5590        zone->pageset = alloc_percpu(struct per_cpu_pageset);
5591        for_each_possible_cpu(cpu)
5592                zone_pageset_init(zone, cpu);
5593}
5594
5595/*
5596 * Allocate per cpu pagesets and initialize them.
5597 * Before this call only boot pagesets were available.
5598 */
5599void __init setup_per_cpu_pageset(void)
5600{
5601        struct pglist_data *pgdat;
5602        struct zone *zone;
5603
5604        for_each_populated_zone(zone)
5605                setup_zone_pageset(zone);
5606
5607        for_each_online_pgdat(pgdat)
5608                pgdat->per_cpu_nodestats =
5609                        alloc_percpu(struct per_cpu_nodestat);
5610}
5611
5612static __meminit void zone_pcp_init(struct zone *zone)
5613{
5614        /*
5615         * per cpu subsystem is not up at this point. The following code
5616         * relies on the ability of the linker to provide the
5617         * offset of a (static) per cpu variable into the per cpu area.
5618         */
5619        zone->pageset = &boot_pageset;
5620
5621        if (populated_zone(zone))
5622                printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%u\n",
5623                        zone->name, zone->present_pages,
5624                                         zone_batchsize(zone));
5625}
5626
5627void __meminit init_currently_empty_zone(struct zone *zone,
5628                                        unsigned long zone_start_pfn,
5629                                        unsigned long size)
5630{
5631        struct pglist_data *pgdat = zone->zone_pgdat;
5632
5633        pgdat->nr_zones = zone_idx(zone) + 1;
5634
5635        zone->zone_start_pfn = zone_start_pfn;
5636
5637        mminit_dprintk(MMINIT_TRACE, "memmap_init",
5638                        "Initialising map node %d zone %lu pfns %lu -> %lu\n",
5639                        pgdat->node_id,
5640                        (unsigned long)zone_idx(zone),
5641                        zone_start_pfn, (zone_start_pfn + size));
5642
5643        zone_init_free_lists(zone);
5644        zone->initialized = 1;
5645}
5646
5647#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5648#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
5649
5650/*
5651 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
5652 */
5653int __meminit __early_pfn_to_nid(unsigned long pfn,
5654                                        struct mminit_pfnnid_cache *state)
5655{
5656        unsigned long start_pfn, end_pfn;
5657        int nid;
5658
5659        if (state->last_start <= pfn && pfn < state->last_end)
5660                return state->last_nid;
5661
5662        nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
5663        if (nid != -1) {
5664                state->last_start = start_pfn;
5665                state->last_end = end_pfn;
5666                state->last_nid = nid;
5667        }
5668
5669        return nid;
5670}
5671#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
5672
5673/**
5674 * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
5675 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
5676 * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid
5677 *
5678 * If an architecture guarantees that all ranges registered contain no holes
5679 * and may be freed, this this function may be used instead of calling
5680 * memblock_free_early_nid() manually.
5681 */
5682void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
5683{
5684        unsigned long start_pfn, end_pfn;
5685        int i, this_nid;
5686
5687        for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
5688                start_pfn = min(start_pfn, max_low_pfn);
5689                end_pfn = min(end_pfn, max_low_pfn);
5690
5691                if (start_pfn < end_pfn)
5692                        memblock_free_early_nid(PFN_PHYS(start_pfn),
5693                                        (end_pfn - start_pfn) << PAGE_SHIFT,
5694                                        this_nid);
5695        }
5696}
5697
5698/**
5699 * sparse_memory_present_with_active_regions - Call memory_present for each active range
5700 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
5701 *
5702 * If an architecture guarantees that all ranges registered contain no holes and may
5703 * be freed, this function may be used instead of calling memory_present() manually.
5704 */
5705void __init sparse_memory_present_with_active_regions(int nid)
5706{
5707        unsigned long start_pfn, end_pfn;
5708        int i, this_nid;
5709
5710        for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
5711                memory_present(this_nid, start_pfn, end_pfn);
5712}
5713
5714/**
5715 * get_pfn_range_for_nid - Return the start and end page frames for a node
5716 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
5717 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
5718 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
5719 *
5720 * It returns the start and end page frame of a node based on information
5721 * provided by memblock_set_node(). If called for a node
5722 * with no available memory, a warning is printed and the start and end
5723 * PFNs will be 0.
5724 */
5725void __meminit get_pfn_range_for_nid(unsigned int nid,
5726                        unsigned long *start_pfn, unsigned long *end_pfn)
5727{
5728        unsigned long this_start_pfn, this_end_pfn;
5729        int i;
5730
5731        *start_pfn = -1UL;
5732        *end_pfn = 0;
5733
5734        for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
5735                *start_pfn = min(*start_pfn, this_start_pfn);
5736                *end_pfn = max(*end_pfn, this_end_pfn);
5737        }
5738
5739        if (*start_pfn == -1UL)
5740                *start_pfn = 0;
5741}
5742
5743/*
5744 * This finds a zone that can be used for ZONE_MOVABLE pages. The
5745 * assumption is made that zones within a node are ordered in monotonic
5746 * increasing memory addresses so that the "highest" populated zone is used
5747 */
5748static void __init find_usable_zone_for_movable(void)
5749{
5750        int zone_index;
5751        for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
5752                if (zone_index == ZONE_MOVABLE)
5753                        continue;
5754
5755                if (arch_zone_highest_possible_pfn[zone_index] >
5756                                arch_zone_lowest_possible_pfn[zone_index])
5757                        break;
5758        }
5759
5760        VM_BUG_ON(zone_index == -1);
5761        movable_zone = zone_index;
5762}
5763
5764/*
5765 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
5766 * because it is sized independent of architecture. Unlike the other zones,
5767 * the starting point for ZONE_MOVABLE is not fixed. It may be different
5768 * in each node depending on the size of each node and how evenly kernelcore
5769 * is distributed. This helper function adjusts the zone ranges
5770 * provided by the architecture for a given node by using the end of the
5771 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
5772 * zones within a node are in order of monotonic increases memory addresses
5773 */
5774static void __meminit adjust_zone_range_for_zone_movable(int nid,
5775                                        unsigned long zone_type,
5776                                        unsigned long node_start_pfn,
5777                                        unsigned long node_end_pfn,
5778                                        unsigned long *zone_start_pfn,
5779                                        unsigned long *zone_end_pfn)
5780{
5781        /* Only adjust if ZONE_MOVABLE is on this node */
5782        if (zone_movable_pfn[nid]) {
5783                /* Size ZONE_MOVABLE */
5784                if (zone_type == ZONE_MOVABLE) {
5785                        *zone_start_pfn = zone_movable_pfn[nid];
5786                        *zone_end_pfn = min(node_end_pfn,
5787                                arch_zone_highest_possible_pfn[movable_zone]);
5788
5789                /* Adjust for ZONE_MOVABLE starting within this range */
5790                } else if (!mirrored_kernelcore &&
5791                        *zone_start_pfn < zone_movable_pfn[nid] &&
5792                        *zone_end_pfn > zone_movable_pfn[nid]) {
5793                        *zone_end_pfn = zone_movable_pfn[nid];
5794
5795                /* Check if this whole range is within ZONE_MOVABLE */
5796                } else if (*zone_start_pfn >= zone_movable_pfn[nid])
5797                        *zone_start_pfn = *zone_end_pfn;
5798        }
5799}
5800
5801/*
5802 * Return the number of pages a zone spans in a node, including holes
5803 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
5804 */
5805static unsigned long __meminit zone_spanned_pages_in_node(int nid,
5806                                        unsigned long zone_type,
5807                                        unsigned long node_start_pfn,
5808                                        unsigned long node_end_pfn,
5809                                        unsigned long *zone_start_pfn,
5810                                        unsigned long *zone_end_pfn,
5811                                        unsigned long *ignored)
5812{
5813        /* When hotadd a new node from cpu_up(), the node should be empty */
5814        if (!node_start_pfn && !node_end_pfn)
5815                return 0;
5816
5817        /* Get the start and end of the zone */
5818        *zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
5819        *zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
5820        adjust_zone_range_for_zone_movable(nid, zone_type,
5821                                node_start_pfn, node_end_pfn,
5822                                zone_start_pfn, zone_end_pfn);
5823
5824        /* Check that this node has pages within the zone's required range */
5825        if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
5826                return 0;
5827
5828        /* Move the zone boundaries inside the node if necessary */
5829        *zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
5830        *zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
5831
5832        /* Return the spanned pages */
5833        return *zone_end_pfn - *zone_start_pfn;
5834}
5835
5836/*
5837 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
5838 * then all holes in the requested range will be accounted for.
5839 */
5840unsigned long __meminit __absent_pages_in_range(int nid,
5841                                unsigned long range_start_pfn,
5842                                unsigned long range_end_pfn)
5843{
5844        unsigned long nr_absent = range_end_pfn - range_start_pfn;
5845        unsigned long start_pfn, end_pfn;
5846        int i;
5847
5848        for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
5849                start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
5850                end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
5851                nr_absent -= end_pfn - start_pfn;
5852        }
5853        return nr_absent;
5854}
5855
5856/**
5857 * absent_pages_in_range - Return number of page frames in holes within a range
5858 * @start_pfn: The start PFN to start searching for holes
5859 * @end_pfn: The end PFN to stop searching for holes
5860 *
5861 * It returns the number of pages frames in memory holes within a range.
5862 */
5863unsigned long __init absent_pages_in_range(unsigned long start_pfn,
5864                                                        unsigned long end_pfn)
5865{
5866        return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
5867}
5868
5869/* Return the number of page frames in holes in a zone on a node */
5870static unsigned long __meminit zone_absent_pages_in_node(int nid,
5871                                        unsigned long zone_type,
5872                                        unsigned long node_start_pfn,
5873                                        unsigned long node_end_pfn,
5874                                        unsigned long *ignored)
5875{
5876        unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
5877        unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
5878        unsigned long zone_start_pfn, zone_end_pfn;
5879        unsigned long nr_absent;
5880
5881        /* When hotadd a new node from cpu_up(), the node should be empty */
5882        if (!node_start_pfn && !node_end_pfn)
5883                return 0;
5884
5885        zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
5886        zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
5887
5888        adjust_zone_range_for_zone_movable(nid, zone_type,
5889                        node_start_pfn, node_end_pfn,
5890                        &zone_start_pfn, &zone_end_pfn);
5891        nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
5892
5893        /*
5894         * ZONE_MOVABLE handling.
5895         * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
5896         * and vice versa.
5897         */
5898        if (mirrored_kernelcore && zone_movable_pfn[nid]) {
5899                unsigned long start_pfn, end_pfn;
5900                struct memblock_region *r;
5901
5902                for_each_memblock(memory, r) {
5903                        start_pfn = clamp(memblock_region_memory_base_pfn(r),
5904                                          zone_start_pfn, zone_end_pfn);
5905                        end_pfn = clamp(memblock_region_memory_end_pfn(r),
5906                                        zone_start_pfn, zone_end_pfn);
5907
5908                        if (zone_type == ZONE_MOVABLE &&
5909                            memblock_is_mirror(r))
5910                                nr_absent += end_pfn - start_pfn;
5911
5912                        if (zone_type == ZONE_NORMAL &&
5913                            !memblock_is_mirror(r))
5914                                nr_absent += end_pfn - start_pfn;
5915                }
5916        }
5917
5918        return nr_absent;
5919}
5920
5921#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
5922static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
5923                                        unsigned long zone_type,
5924                                        unsigned long node_start_pfn,
5925                                        unsigned long node_end_pfn,
5926                                        unsigned long *zone_start_pfn,
5927                                        unsigned long *zone_end_pfn,
5928                                        unsigned long *zones_size)
5929{
5930        unsigned int zone;
5931
5932        *zone_start_pfn = node_start_pfn;
5933        for (zone = 0; zone < zone_type; zone++)
5934                *zone_start_pfn += zones_size[zone];
5935
5936        *zone_end_pfn = *zone_start_pfn + zones_size[zone_type];
5937
5938        return zones_size[zone_type];
5939}
5940
5941static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
5942                                                unsigned long zone_type,
5943                                                unsigned long node_start_pfn,
5944                                                unsigned long node_end_pfn,
5945                                                unsigned long *zholes_size)
5946{
5947        if (!zholes_size)
5948                return 0;
5949
5950        return zholes_size[zone_type];
5951}
5952
5953#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
5954
5955static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
5956                                                unsigned long node_start_pfn,
5957                                                unsigned long node_end_pfn,
5958                                                unsigned long *zones_size,
5959                                                unsigned long *zholes_size)
5960{
5961        unsigned long realtotalpages = 0, totalpages = 0;
5962        enum zone_type i;
5963
5964        for (i = 0; i < MAX_NR_ZONES; i++) {
5965                struct zone *zone = pgdat->node_zones + i;
5966                unsigned long zone_start_pfn, zone_end_pfn;
5967                unsigned long size, real_size;
5968
5969                size = zone_spanned_pages_in_node(pgdat->node_id, i,
5970                                                  node_start_pfn,
5971                                                  node_end_pfn,
5972                                                  &zone_start_pfn,
5973                                                  &zone_end_pfn,
5974                                                  zones_size);
5975                real_size = size - zone_absent_pages_in_node(pgdat->node_id, i,
5976                                                  node_start_pfn, node_end_pfn,
5977                                                  zholes_size);
5978                if (size)
5979                        zone->zone_start_pfn = zone_start_pfn;
5980                else
5981                        zone->zone_start_pfn = 0;
5982                zone->spanned_pages = size;
5983                zone->present_pages = real_size;
5984
5985                totalpages += size;
5986                realtotalpages += real_size;
5987        }
5988
5989        pgdat->node_spanned_pages = totalpages;
5990        pgdat->node_present_pages = realtotalpages;
5991        printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
5992                                                        realtotalpages);
5993}
5994
5995#ifndef CONFIG_SPARSEMEM
5996/*
5997 * Calculate the size of the zone->blockflags rounded to an unsigned long
5998 * Start by making sure zonesize is a multiple of pageblock_order by rounding
5999 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
6000 * round what is now in bits to nearest long in bits, then return it in
6001 * bytes.
6002 */
6003static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
6004{
6005        unsigned long usemapsize;
6006
6007        zonesize += zone_start_pfn & (pageblock_nr_pages-1);
6008        usemapsize = roundup(zonesize, pageblock_nr_pages);
6009        usemapsize = usemapsize >> pageblock_order;
6010        usemapsize *= NR_PAGEBLOCK_BITS;
6011        usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
6012
6013        return usemapsize / 8;
6014}
6015
6016static void __init setup_usemap(struct pglist_data *pgdat,
6017                                struct zone *zone,
6018                                unsigned long zone_start_pfn,
6019                                unsigned long zonesize)
6020{
6021        unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
6022        zone->pageblock_flags = NULL;
6023        if (usemapsize)
6024                zone->pageblock_flags =
6025                        memblock_virt_alloc_node_nopanic(usemapsize,
6026                                                         pgdat->node_id);
6027}
6028#else
6029static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
6030                                unsigned long zone_start_pfn, unsigned long zonesize) {}
6031#endif /* CONFIG_SPARSEMEM */
6032
6033#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
6034
6035/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
6036void __paginginit set_pageblock_order(void)
6037{
6038        unsigned int order;
6039
6040        /* Check that pageblock_nr_pages has not already been setup */
6041        if (pageblock_order)
6042                return;
6043
6044        if (HPAGE_SHIFT > PAGE_SHIFT)
6045                order = HUGETLB_PAGE_ORDER;
6046        else
6047                order = MAX_ORDER - 1;
6048
6049        /*
6050         * Assume the largest contiguous order of interest is a huge page.
6051         * This value may be variable depending on boot parameters on IA64 and
6052         * powerpc.
6053         */
6054        pageblock_order = order;
6055}
6056#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
6057
6058/*
6059 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
6060 * is unused as pageblock_order is set at compile-time. See
6061 * include/linux/pageblock-flags.h for the values of pageblock_order based on
6062 * the kernel config
6063 */
6064void __paginginit set_pageblock_order(void)
6065{
6066}
6067
6068#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
6069
6070static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
6071                                                   unsigned long present_pages)
6072{
6073        unsigned long pages = spanned_pages;
6074
6075        /*
6076         * Provide a more accurate estimation if there are holes within
6077         * the zone and SPARSEMEM is in use. If there are holes within the
6078         * zone, each populated memory region may cost us one or two extra
6079         * memmap pages due to alignment because memmap pages for each
6080         * populated regions may not be naturally aligned on page boundary.
6081         * So the (present_pages >> 4) heuristic is a tradeoff for that.
6082         */
6083        if (spanned_pages > present_pages + (present_pages >> 4) &&
6084            IS_ENABLED(CONFIG_SPARSEMEM))
6085                pages = present_pages;
6086
6087        return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
6088}
6089
6090/*
6091 * Set up the zone data structures:
6092 *   - mark all pages reserved
6093 *   - mark all memory queues empty
6094 *   - clear the memory bitmaps
6095 *
6096 * NOTE: pgdat should get zeroed by caller.
6097 */
6098static void __paginginit free_area_init_core(struct pglist_data *pgdat)
6099{
6100        enum zone_type j;
6101        int nid = pgdat->node_id;
6102
6103        pgdat_resize_init(pgdat);
6104#ifdef CONFIG_NUMA_BALANCING
6105        spin_lock_init(&pgdat->numabalancing_migrate_lock);
6106        pgdat->numabalancing_migrate_nr_pages = 0;
6107        pgdat->numabalancing_migrate_next_window = jiffies;
6108#endif
6109#ifdef CONFIG_TRANSPARENT_HUGEPAGE
6110        spin_lock_init(&pgdat->split_queue_lock);
6111        INIT_LIST_HEAD(&pgdat->split_queue);
6112        pgdat->split_queue_len = 0;
6113#endif
6114        init_waitqueue_head(&pgdat->kswapd_wait);
6115        init_waitqueue_head(&pgdat->pfmemalloc_wait);
6116#ifdef CONFIG_COMPACTION
6117        init_waitqueue_head(&pgdat->kcompactd_wait);
6118#endif
6119        pgdat_page_ext_init(pgdat);
6120        spin_lock_init(&pgdat->lru_lock);
6121        lruvec_init(node_lruvec(pgdat));
6122
6123        pgdat->per_cpu_nodestats = &boot_nodestats;
6124
6125        for (j = 0; j < MAX_NR_ZONES; j++) {
6126                struct zone *zone = pgdat->node_zones + j;
6127                unsigned long size, realsize, freesize, memmap_pages;
6128                unsigned long zone_start_pfn = zone->zone_start_pfn;
6129
6130                size = zone->spanned_pages;
6131                realsize = freesize = zone->present_pages;
6132
6133                /*
6134                 * Adjust freesize so that it accounts for how much memory
6135                 * is used by this zone for memmap. This affects the watermark
6136                 * and per-cpu initialisations
6137                 */
6138                memmap_pages = calc_memmap_size(size, realsize);
6139                if (!is_highmem_idx(j)) {
6140                        if (freesize >= memmap_pages) {
6141                                freesize -= memmap_pages;
6142                                if (memmap_pages)
6143                                        printk(KERN_DEBUG
6144                                               "  %s zone: %lu pages used for memmap\n",
6145                                               zone_names[j], memmap_pages);
6146                        } else
6147                                pr_warn("  %s zone: %lu pages exceeds freesize %lu\n",
6148                                        zone_names[j], memmap_pages, freesize);
6149                }
6150
6151                /* Account for reserved pages */
6152                if (j == 0 && freesize > dma_reserve) {
6153                        freesize -= dma_reserve;
6154                        printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
6155                                        zone_names[0], dma_reserve);
6156                }
6157
6158                if (!is_highmem_idx(j))
6159                        nr_kernel_pages += freesize;
6160                /* Charge for highmem memmap if there are enough kernel pages */
6161                else if (nr_kernel_pages > memmap_pages * 2)
6162                        nr_kernel_pages -= memmap_pages;
6163                nr_all_pages += freesize;
6164
6165                /*
6166                 * Set an approximate value for lowmem here, it will be adjusted
6167                 * when the bootmem allocator frees pages into the buddy system.
6168                 * And all highmem pages will be managed by the buddy system.
6169                 */
6170                zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
6171#ifdef CONFIG_NUMA
6172                zone->node = nid;
6173#endif
6174                zone->name = zone_names[j];
6175                zone->zone_pgdat = pgdat;
6176                spin_lock_init(&zone->lock);
6177                zone_seqlock_init(zone);
6178                zone_pcp_init(zone);
6179
6180                if (!size)
6181                        continue;
6182
6183                set_pageblock_order();
6184                setup_usemap(pgdat, zone, zone_start_pfn, size);
6185                init_currently_empty_zone(zone, zone_start_pfn, size);
6186                memmap_init(size, nid, j, zone_start_pfn);
6187        }
6188}
6189
6190static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
6191{
6192        unsigned long __maybe_unused start = 0;
6193        unsigned long __maybe_unused offset = 0;
6194
6195        /* Skip empty nodes */
6196        if (!pgdat->node_spanned_pages)
6197                return;
6198
6199#ifdef CONFIG_FLAT_NODE_MEM_MAP
6200        start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
6201        offset = pgdat->node_start_pfn - start;
6202        /* ia64 gets its own node_mem_map, before this, without bootmem */
6203        if (!pgdat->node_mem_map) {
6204                unsigned long size, end;
6205                struct page *map;
6206
6207                /*
6208                 * The zone's endpoints aren't required to be MAX_ORDER
6209                 * aligned but the node_mem_map endpoints must be in order
6210                 * for the buddy allocator to function correctly.
6211                 */
6212                end = pgdat_end_pfn(pgdat);
6213                end = ALIGN(end, MAX_ORDER_NR_PAGES);
6214                size =  (end - start) * sizeof(struct page);
6215                map = alloc_remap(pgdat->node_id, size);
6216                if (!map)
6217                        map = memblock_virt_alloc_node_nopanic(size,
6218                                                               pgdat->node_id);
6219                pgdat->node_mem_map = map + offset;
6220        }
6221#ifndef CONFIG_NEED_MULTIPLE_NODES
6222        /*
6223         * With no DISCONTIG, the global mem_map is just set as node 0's
6224         */
6225        if (pgdat == NODE_DATA(0)) {
6226                mem_map = NODE_DATA(0)->node_mem_map;
6227#if defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) || defined(CONFIG_FLATMEM)
6228                if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
6229                        mem_map -= offset;
6230#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
6231        }
6232#endif
6233#endif /* CONFIG_FLAT_NODE_MEM_MAP */
6234}
6235
6236void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
6237                unsigned long node_start_pfn, unsigned long *zholes_size)
6238{
6239        pg_data_t *pgdat = NODE_DATA(nid);
6240        unsigned long start_pfn = 0;
6241        unsigned long end_pfn = 0;
6242
6243        /* pg_data_t should be reset to zero when it's allocated */
6244        WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx);
6245
6246        pgdat->node_id = nid;
6247        pgdat->node_start_pfn = node_start_pfn;
6248        pgdat->per_cpu_nodestats = NULL;
6249#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
6250        get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
6251        pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
6252                (u64)start_pfn << PAGE_SHIFT,
6253                end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
6254#else
6255        start_pfn = node_start_pfn;
6256#endif
6257        calculate_node_totalpages(pgdat, start_pfn, end_pfn,
6258                                  zones_size, zholes_size);
6259
6260        alloc_node_mem_map(pgdat);
6261#ifdef CONFIG_FLAT_NODE_MEM_MAP
6262        printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
6263                nid, (unsigned long)pgdat,
6264                (unsigned long)pgdat->node_mem_map);
6265#endif
6266
6267        reset_deferred_meminit(pgdat);
6268        free_area_init_core(pgdat);
6269}
6270
6271#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
6272
6273#if MAX_NUMNODES > 1
6274/*
6275 * Figure out the number of possible node ids.
6276 */
6277void __init setup_nr_node_ids(void)
6278{
6279        unsigned int highest;
6280
6281        highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
6282        nr_node_ids = highest + 1;
6283}
6284#endif
6285
6286/**
6287 * node_map_pfn_alignment - determine the maximum internode alignment
6288 *
6289 * This function should be called after node map is populated and sorted.
6290 * It calculates the maximum power of two alignment which can distinguish
6291 * all the nodes.
6292 *
6293 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
6294 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)).  If the
6295 * nodes are shifted by 256MiB, 256MiB.  Note that if only the last node is
6296 * shifted, 1GiB is enough and this function will indicate so.
6297 *
6298 * This is used to test whether pfn -> nid mapping of the chosen memory
6299 * model has fine enough granularity to avoid incorrect mapping for the
6300 * populated node map.
6301 *
6302 * Returns the determined alignment in pfn's.  0 if there is no alignment
6303 * requirement (single node).
6304 */
6305unsigned long __init node_map_pfn_alignment(void)
6306{
6307        unsigned long accl_mask = 0, last_end = 0;
6308        unsigned long start, end, mask;
6309        int last_nid = -1;
6310        int i, nid;
6311
6312        for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
6313                if (!start || last_nid < 0 || last_nid == nid) {
6314                        last_nid = nid;
6315                        last_end = end;
6316                        continue;
6317                }
6318
6319                /*
6320                 * Start with a mask granular enough to pin-point to the
6321                 * start pfn and tick off bits one-by-one until it becomes
6322                 * too coarse to separate the current node from the last.
6323                 */
6324                mask = ~((1 << __ffs(start)) - 1);
6325                while (mask && last_end <= (start & (mask << 1)))
6326                        mask <<= 1;
6327
6328                /* accumulate all internode masks */
6329                accl_mask |= mask;
6330        }
6331
6332        /* convert mask to number of pages */
6333        return ~accl_mask + 1;
6334}
6335
6336/* Find the lowest pfn for a node */
6337static unsigned long __init find_min_pfn_for_node(int nid)
6338{
6339        unsigned long min_pfn = ULONG_MAX;
6340        unsigned long start_pfn;
6341        int i;
6342
6343        for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
6344                min_pfn = min(min_pfn, start_pfn);
6345
6346        if (min_pfn == ULONG_MAX) {
6347                pr_warn("Could not find start_pfn for node %d\n", nid);
6348                return 0;
6349        }
6350
6351        return min_pfn;
6352}
6353
6354/**
6355 * find_min_pfn_with_active_regions - Find the minimum PFN registered
6356 *
6357 * It returns the minimum PFN based on information provided via
6358 * memblock_set_node().
6359 */
6360unsigned long __init find_min_pfn_with_active_regions(void)
6361{
6362        return find_min_pfn_for_node(MAX_NUMNODES);
6363}
6364
6365/*
6366 * early_calculate_totalpages()
6367 * Sum pages in active regions for movable zone.
6368 * Populate N_MEMORY for calculating usable_nodes.
6369 */
6370static unsigned long __init early_calculate_totalpages(void)
6371{
6372        unsigned long totalpages = 0;
6373        unsigned long start_pfn, end_pfn;
6374        int i, nid;
6375
6376        for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
6377                unsigned long pages = end_pfn - start_pfn;
6378
6379                totalpages += pages;
6380                if (pages)
6381                        node_set_state(nid, N_MEMORY);
6382        }
6383        return totalpages;
6384}
6385
6386/*
6387 * Find the PFN the Movable zone begins in each node. Kernel memory
6388 * is spread evenly between nodes as long as the nodes have enough
6389 * memory. When they don't, some nodes will have more kernelcore than
6390 * others
6391 */
6392static void __init find_zone_movable_pfns_for_nodes(void)
6393{
6394        int i, nid;
6395        unsigned long usable_startpfn;
6396        unsigned long kernelcore_node, kernelcore_remaining;
6397        /* save the state before borrow the nodemask */
6398        nodemask_t saved_node_state = node_states[N_MEMORY];
6399        unsigned long totalpages = early_calculate_totalpages();
6400        int usable_nodes = nodes_weight(node_states[N_MEMORY]);
6401        struct memblock_region *r;
6402
6403        /* Need to find movable_zone earlier when movable_node is specified. */
6404        find_usable_zone_for_movable();
6405
6406        /*
6407         * If movable_node is specified, ignore kernelcore and movablecore
6408         * options.
6409         */
6410        if (movable_node_is_enabled()) {
6411                for_each_memblock(memory, r) {
6412                        if (!memblock_is_hotpluggable(r))
6413                                continue;
6414
6415                        nid = r->nid;
6416
6417                        usable_startpfn = PFN_DOWN(r->base);
6418                        zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
6419                                min(usable_startpfn, zone_movable_pfn[nid]) :
6420                                usable_startpfn;
6421                }
6422
6423                goto out2;
6424        }
6425
6426        /*
6427         * If kernelcore=mirror is specified, ignore movablecore option
6428         */
6429        if (mirrored_kernelcore) {
6430                bool mem_below_4gb_not_mirrored = false;
6431
6432                for_each_memblock(memory, r) {
6433                        if (memblock_is_mirror(r))
6434                                continue;
6435
6436                        nid = r->nid;
6437
6438                        usable_startpfn = memblock_region_memory_base_pfn(r);
6439
6440                        if (usable_startpfn < 0x100000) {
6441                                mem_below_4gb_not_mirrored = true;
6442                                continue;
6443                        }
6444
6445                        zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
6446                                min(usable_startpfn, zone_movable_pfn[nid]) :
6447                                usable_startpfn;
6448                }
6449
6450                if (mem_below_4gb_not_mirrored)
6451                        pr_warn("This configuration results in unmirrored kernel memory.");
6452
6453                goto out2;
6454        }
6455
6456        /*
6457         * If movablecore=nn[KMG] was specified, calculate what size of
6458         * kernelcore that corresponds so that memory usable for
6459         * any allocation type is evenly spread. If both kernelcore
6460         * and movablecore are specified, then the value of kernelcore
6461         * will be used for required_kernelcore if it's greater than
6462         * what movablecore would have allowed.
6463         */
6464        if (required_movablecore) {
6465                unsigned long corepages;
6466
6467                /*
6468                 * Round-up so that ZONE_MOVABLE is at least as large as what
6469                 * was requested by the user
6470                 */
6471                required_movablecore =
6472                        roundup(required_movablecore, MAX_ORDER_NR_PAGES);
6473                required_movablecore = min(totalpages, required_movablecore);
6474                corepages = totalpages - required_movablecore;
6475
6476                required_kernelcore = max(required_kernelcore, corepages);
6477        }
6478
6479        /*
6480         * If kernelcore was not specified or kernelcore size is larger
6481         * than totalpages, there is no ZONE_MOVABLE.
6482         */
6483        if (!required_kernelcore || required_kernelcore >= totalpages)
6484                goto out;
6485
6486        /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
6487        usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
6488
6489restart:
6490        /* Spread kernelcore memory as evenly as possible throughout nodes */
6491        kernelcore_node = required_kernelcore / usable_nodes;
6492        for_each_node_state(nid, N_MEMORY) {
6493                unsigned long start_pfn, end_pfn;
6494
6495                /*
6496                 * Recalculate kernelcore_node if the division per node
6497                 * now exceeds what is necessary to satisfy the requested
6498                 * amount of memory for the kernel
6499                 */
6500                if (required_kernelcore < kernelcore_node)
6501                        kernelcore_node = required_kernelcore / usable_nodes;
6502
6503                /*
6504                 * As the map is walked, we track how much memory is usable
6505                 * by the kernel using kernelcore_remaining. When it is
6506                 * 0, the rest of the node is usable by ZONE_MOVABLE
6507                 */
6508                kernelcore_remaining = kernelcore_node;
6509
6510                /* Go through each range of PFNs within this node */
6511                for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
6512                        unsigned long size_pages;
6513
6514                        start_pfn = max(start_pfn, zone_movable_pfn[nid]);
6515                        if (start_pfn >= end_pfn)
6516                                continue;
6517
6518                        /* Account for what is only usable for kernelcore */
6519                        if (start_pfn < usable_startpfn) {
6520                                unsigned long kernel_pages;
6521                                kernel_pages = min(end_pfn, usable_startpfn)
6522                                                                - start_pfn;
6523
6524                                kernelcore_remaining -= min(kernel_pages,
6525                                                        kernelcore_remaining);
6526                                required_kernelcore -= min(kernel_pages,
6527                                                        required_kernelcore);
6528
6529                                /* Continue if range is now fully accounted */
6530                                if (end_pfn <= usable_startpfn) {
6531
6532                                        /*
6533                                         * Push zone_movable_pfn to the end so
6534                                         * that if we have to rebalance
6535                                         * kernelcore across nodes, we will
6536                                         * not double account here
6537                                         */
6538                                        zone_movable_pfn[nid] = end_pfn;
6539                                        continue;
6540                                }
6541                                start_pfn = usable_startpfn;
6542                        }
6543
6544                        /*
6545                         * The usable PFN range for ZONE_MOVABLE is from
6546                         * start_pfn->end_pfn. Calculate size_pages as the
6547                         * number of pages used as kernelcore
6548                         */
6549                        size_pages = end_pfn - start_pfn;
6550                        if (size_pages > kernelcore_remaining)
6551                                size_pages = kernelcore_remaining;
6552                        zone_movable_pfn[nid] = start_pfn + size_pages;
6553
6554                        /*
6555                         * Some kernelcore has been met, update counts and
6556                         * break if the kernelcore for this node has been
6557                         * satisfied
6558                         */
6559                        required_kernelcore -= min(required_kernelcore,
6560                                                                size_pages);
6561                        kernelcore_remaining -= size_pages;
6562                        if (!kernelcore_remaining)
6563                                break;
6564                }
6565        }
6566
6567        /*
6568         * If there is still required_kernelcore, we do another pass with one
6569         * less node in the count. This will push zone_movable_pfn[nid] further
6570         * along on the nodes that still have memory until kernelcore is
6571         * satisfied
6572         */
6573        usable_nodes--;
6574        if (usable_nodes && required_kernelcore > usable_nodes)
6575                goto restart;
6576
6577out2:
6578        /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
6579        for (nid = 0; nid < MAX_NUMNODES; nid++)
6580                zone_movable_pfn[nid] =
6581                        roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
6582
6583out:
6584        /* restore the node_state */
6585        node_states[N_MEMORY] = saved_node_state;
6586}
6587
6588/* Any regular or high memory on that node ? */
6589static void check_for_memory(pg_data_t *pgdat, int nid)
6590{
6591        enum zone_type zone_type;
6592
6593        if (N_MEMORY == N_NORMAL_MEMORY)
6594                return;
6595
6596        for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
6597                struct zone *zone = &pgdat->node_zones[zone_type];
6598                if (populated_zone(zone)) {
6599                        node_set_state(nid, N_HIGH_MEMORY);
6600                        if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
6601                            zone_type <= ZONE_NORMAL)
6602                                node_set_state(nid, N_NORMAL_MEMORY);
6603                        break;
6604                }
6605        }
6606}
6607
6608/**
6609 * free_area_init_nodes - Initialise all pg_data_t and zone data
6610 * @max_zone_pfn: an array of max PFNs for each zone
6611 *
6612 * This will call free_area_init_node() for each active node in the system.
6613 * Using the page ranges provided by memblock_set_node(), the size of each
6614 * zone in each node and their holes is calculated. If the maximum PFN
6615 * between two adjacent zones match, it is assumed that the zone is empty.
6616 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
6617 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
6618 * starts where the previous one ended. For example, ZONE_DMA32 starts
6619 * at arch_max_dma_pfn.
6620 */
6621void __init free_area_init_nodes(unsigned long *max_zone_pfn)
6622{
6623        unsigned long start_pfn, end_pfn;
6624        int i, nid;
6625
6626        /* Record where the zone boundaries are */
6627        memset(arch_zone_lowest_possible_pfn, 0,
6628                                sizeof(arch_zone_lowest_possible_pfn));
6629        memset(arch_zone_highest_possible_pfn, 0,
6630                                sizeof(arch_zone_highest_possible_pfn));
6631
6632        start_pfn = find_min_pfn_with_active_regions();
6633
6634        for (i = 0; i < MAX_NR_ZONES; i++) {
6635                if (i == ZONE_MOVABLE)
6636                        continue;
6637
6638                end_pfn = max(max_zone_pfn[i], start_pfn);
6639                arch_zone_lowest_possible_pfn[i] = start_pfn;
6640                arch_zone_highest_possible_pfn[i] = end_pfn;
6641
6642                start_pfn = end_pfn;
6643        }
6644
6645        /* Find the PFNs that ZONE_MOVABLE begins at in each node */
6646        memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
6647        find_zone_movable_pfns_for_nodes();
6648
6649        /* Print out the zone ranges */
6650        pr_info("Zone ranges:\n");
6651        for (i = 0; i < MAX_NR_ZONES; i++) {
6652                if (i == ZONE_MOVABLE)
6653                        continue;
6654                pr_info("  %-8s ", zone_names[i]);
6655                if (arch_zone_lowest_possible_pfn[i] ==
6656                                arch_zone_highest_possible_pfn[i])
6657                        pr_cont("empty\n");
6658                else
6659                        pr_cont("[mem %#018Lx-%#018Lx]\n",
6660                                (u64)arch_zone_lowest_possible_pfn[i]
6661                                        << PAGE_SHIFT,
6662                                ((u64)arch_zone_highest_possible_pfn[i]
6663                                        << PAGE_SHIFT) - 1);
6664        }
6665
6666        /* Print out the PFNs ZONE_MOVABLE begins at in each node */
6667        pr_info("Movable zone start for each node\n");
6668        for (i = 0; i < MAX_NUMNODES; i++) {
6669                if (zone_movable_pfn[i])
6670                        pr_info("  Node %d: %#018Lx\n", i,
6671                               (u64)zone_movable_pfn[i] << PAGE_SHIFT);
6672        }
6673
6674        /* Print out the early node map */
6675        pr_info("Early memory node ranges\n");
6676        for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
6677                pr_info("  node %3d: [mem %#018Lx-%#018Lx]\n", nid,
6678                        (u64)start_pfn << PAGE_SHIFT,
6679                        ((u64)end_pfn << PAGE_SHIFT) - 1);
6680
6681        /* Initialise every node */
6682        mminit_verify_pageflags_layout();
6683        setup_nr_node_ids();
6684        for_each_online_node(nid) {
6685                pg_data_t *pgdat = NODE_DATA(nid);
6686                free_area_init_node(nid, NULL,
6687                                find_min_pfn_for_node(nid), NULL);
6688
6689                /* Any memory on that node */
6690                if (pgdat->node_present_pages)
6691                        node_set_state(nid, N_MEMORY);
6692                check_for_memory(pgdat, nid);
6693        }
6694}
6695
6696static int __init cmdline_parse_core(char *p, unsigned long *core)
6697{
6698        unsigned long long coremem;
6699        if (!p)
6700                return -EINVAL;
6701
6702        coremem = memparse(p, &p);
6703        *core = coremem >> PAGE_SHIFT;
6704
6705        /* Paranoid check that UL is enough for the coremem value */
6706        WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
6707
6708        return 0;
6709}
6710
6711/*
6712 * kernelcore=size sets the amount of memory for use for allocations that
6713 * cannot be reclaimed or migrated.
6714 */
6715static int __init cmdline_parse_kernelcore(char *p)
6716{
6717        /* parse kernelcore=mirror */
6718        if (parse_option_str(p, "mirror")) {
6719                mirrored_kernelcore = true;
6720                return 0;
6721        }
6722
6723        return cmdline_parse_core(p, &required_kernelcore);
6724}
6725
6726/*
6727 * movablecore=size sets the amount of memory for use for allocations that
6728 * can be reclaimed or migrated.
6729 */
6730static int __init cmdline_parse_movablecore(char *p)
6731{
6732        return cmdline_parse_core(p, &required_movablecore);
6733}
6734
6735early_param("kernelcore", cmdline_parse_kernelcore);
6736early_param("movablecore", cmdline_parse_movablecore);
6737
6738#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
6739
6740void adjust_managed_page_count(struct page *page, long count)
6741{
6742        spin_lock(&managed_page_count_lock);
6743        page_zone(page)->managed_pages += count;
6744        totalram_pages += count;
6745#ifdef CONFIG_HIGHMEM
6746        if (PageHighMem(page))
6747                totalhigh_pages += count;
6748#endif
6749        spin_unlock(&managed_page_count_lock);
6750}
6751EXPORT_SYMBOL(adjust_managed_page_count);
6752
6753unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
6754{
6755        void *pos;
6756        unsigned long pages = 0;
6757
6758        start = (void *)PAGE_ALIGN((unsigned long)start);
6759        end = (void *)((unsigned long)end & PAGE_MASK);
6760        for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
6761                if ((unsigned int)poison <= 0xFF)
6762                        memset(pos, poison, PAGE_SIZE);
6763                free_reserved_page(virt_to_page(pos));
6764        }
6765
6766        if (pages && s)
6767                pr_info("Freeing %s memory: %ldK\n",
6768                        s, pages << (PAGE_SHIFT - 10));
6769
6770        return pages;
6771}
6772EXPORT_SYMBOL(free_reserved_area);
6773
6774#ifdef  CONFIG_HIGHMEM
6775void free_highmem_page(struct page *page)
6776{
6777        __free_reserved_page(page);
6778        totalram_pages++;
6779        page_zone(page)->managed_pages++;
6780        totalhigh_pages++;
6781}
6782#endif
6783
6784
6785void __init mem_init_print_info(const char *str)
6786{
6787        unsigned long physpages, codesize, datasize, rosize, bss_size;
6788        unsigned long init_code_size, init_data_size;
6789
6790        physpages = get_num_physpages();
6791        codesize = _etext - _stext;
6792        datasize = _edata - _sdata;
6793        rosize = __end_rodata - __start_rodata;
6794        bss_size = __bss_stop - __bss_start;
6795        init_data_size = __init_end - __init_begin;
6796        init_code_size = _einittext - _sinittext;
6797
6798        /*
6799         * Detect special cases and adjust section sizes accordingly:
6800         * 1) .init.* may be embedded into .data sections
6801         * 2) .init.text.* may be out of [__init_begin, __init_end],
6802         *    please refer to arch/tile/kernel/vmlinux.lds.S.
6803         * 3) .rodata.* may be embedded into .text or .data sections.
6804         */
6805#define adj_init_size(start, end, size, pos, adj) \
6806        do { \
6807                if (start <= pos && pos < end && size > adj) \
6808                        size -= adj; \
6809        } while (0)
6810
6811        adj_init_size(__init_begin, __init_end, init_data_size,
6812                     _sinittext, init_code_size);
6813        adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
6814        adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
6815        adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
6816        adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
6817
6818#undef  adj_init_size
6819
6820        pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
6821#ifdef  CONFIG_HIGHMEM
6822                ", %luK highmem"
6823#endif
6824                "%s%s)\n",
6825                nr_free_pages() << (PAGE_SHIFT - 10),
6826                physpages << (PAGE_SHIFT - 10),
6827                codesize >> 10, datasize >> 10, rosize >> 10,
6828                (init_data_size + init_code_size) >> 10, bss_size >> 10,
6829                (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT - 10),
6830                totalcma_pages << (PAGE_SHIFT - 10),
6831#ifdef  CONFIG_HIGHMEM
6832                totalhigh_pages << (PAGE_SHIFT - 10),
6833#endif
6834                str ? ", " : "", str ? str : "");
6835}
6836
6837/**
6838 * set_dma_reserve - set the specified number of pages reserved in the first zone
6839 * @new_dma_reserve: The number of pages to mark reserved
6840 *
6841 * The per-cpu batchsize and zone watermarks are determined by managed_pages.
6842 * In the DMA zone, a significant percentage may be consumed by kernel image
6843 * and other unfreeable allocations which can skew the watermarks badly. This
6844 * function may optionally be used to account for unfreeable pages in the
6845 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
6846 * smaller per-cpu batchsize.
6847 */
6848void __init set_dma_reserve(unsigned long new_dma_reserve)
6849{
6850        dma_reserve = new_dma_reserve;
6851}
6852
6853void __init free_area_init(unsigned long *zones_size)
6854{
6855        free_area_init_node(0, zones_size,
6856                        __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
6857}
6858
6859static int page_alloc_cpu_dead(unsigned int cpu)
6860{
6861
6862        lru_add_drain_cpu(cpu);
6863        drain_pages(cpu);
6864
6865        /*
6866         * Spill the event counters of the dead processor
6867         * into the current processors event counters.
6868         * This artificially elevates the count of the current
6869         * processor.
6870         */
6871        vm_events_fold_cpu(cpu);
6872
6873        /*
6874         * Zero the differential counters of the dead processor
6875         * so that the vm statistics are consistent.
6876         *
6877         * This is only okay since the processor is dead and cannot
6878         * race with what we are doing.
6879         */
6880        cpu_vm_stats_fold(cpu);
6881        return 0;
6882}
6883
6884void __init page_alloc_init(void)
6885{
6886        int ret;
6887
6888        ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC_DEAD,
6889                                        "mm/page_alloc:dead", NULL,
6890                                        page_alloc_cpu_dead);
6891        WARN_ON(ret < 0);
6892}
6893
6894/*
6895 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
6896 *      or min_free_kbytes changes.
6897 */
6898static void calculate_totalreserve_pages(void)
6899{
6900        struct pglist_data *pgdat;
6901        unsigned long reserve_pages = 0;
6902        enum zone_type i, j;
6903
6904        for_each_online_pgdat(pgdat) {
6905
6906                pgdat->totalreserve_pages = 0;
6907
6908                for (i = 0; i < MAX_NR_ZONES; i++) {
6909                        struct zone *zone = pgdat->node_zones + i;
6910                        long max = 0;
6911
6912                        /* Find valid and maximum lowmem_reserve in the zone */
6913                        for (j = i; j < MAX_NR_ZONES; j++) {
6914                                if (zone->lowmem_reserve[j] > max)
6915                                        max = zone->lowmem_reserve[j];
6916                        }
6917
6918                        /* we treat the high watermark as reserved pages. */
6919                        max += high_wmark_pages(zone);
6920
6921                        if (max > zone->managed_pages)
6922                                max = zone->managed_pages;
6923
6924                        pgdat->totalreserve_pages += max;
6925
6926                        reserve_pages += max;
6927                }
6928        }
6929        totalreserve_pages = reserve_pages;
6930}
6931
6932/*
6933 * setup_per_zone_lowmem_reserve - called whenever
6934 *      sysctl_lowmem_reserve_ratio changes.  Ensures that each zone
6935 *      has a correct pages reserved value, so an adequate number of
6936 *      pages are left in the zone after a successful __alloc_pages().
6937 */
6938static void setup_per_zone_lowmem_reserve(void)
6939{
6940        struct pglist_data *pgdat;
6941        enum zone_type j, idx;
6942
6943        for_each_online_pgdat(pgdat) {
6944                for (j = 0; j < MAX_NR_ZONES; j++) {
6945                        struct zone *zone = pgdat->node_zones + j;
6946                        unsigned long managed_pages = zone->managed_pages;
6947
6948                        zone->lowmem_reserve[j] = 0;
6949
6950                        idx = j;
6951                        while (idx) {
6952                                struct zone *lower_zone;
6953
6954                                idx--;
6955
6956                                if (sysctl_lowmem_reserve_ratio[idx] < 1)
6957                                        sysctl_lowmem_reserve_ratio[idx] = 1;
6958
6959                                lower_zone = pgdat->node_zones + idx;
6960                                lower_zone->lowmem_reserve[j] = managed_pages /
6961                                        sysctl_lowmem_reserve_ratio[idx];
6962                                managed_pages += lower_zone->managed_pages;
6963                        }
6964                }
6965        }
6966
6967        /* update totalreserve_pages */
6968        calculate_totalreserve_pages();
6969}
6970
6971static void __setup_per_zone_wmarks(void)
6972{
6973        unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
6974        unsigned long lowmem_pages = 0;
6975        struct zone *zone;
6976        unsigned long flags;
6977
6978        /* Calculate total number of !ZONE_HIGHMEM pages */
6979        for_each_zone(zone) {
6980                if (!is_highmem(zone))
6981                        lowmem_pages += zone->managed_pages;
6982        }
6983
6984        for_each_zone(zone) {
6985                u64 tmp;
6986
6987                spin_lock_irqsave(&zone->lock, flags);
6988                tmp = (u64)pages_min * zone->managed_pages;
6989                do_div(tmp, lowmem_pages);
6990                if (is_highmem(zone)) {
6991                        /*
6992                         * __GFP_HIGH and PF_MEMALLOC allocations usually don't
6993                         * need highmem pages, so cap pages_min to a small
6994                         * value here.
6995                         *
6996                         * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
6997                         * deltas control asynch page reclaim, and so should
6998                         * not be capped for highmem.
6999                         */
7000                        unsigned long min_pages;
7001
7002                        min_pages = zone->managed_pages / 1024;
7003                        min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
7004                        zone->watermark[WMARK_MIN] = min_pages;
7005                } else {
7006                        /*
7007                         * If it's a lowmem zone, reserve a number of pages
7008                         * proportionate to the zone's size.
7009                         */
7010                        zone->watermark[WMARK_MIN] = tmp;
7011                }
7012
7013                /*
7014                 * Set the kswapd watermarks distance according to the
7015                 * scale factor in proportion to available memory, but
7016                 * ensure a minimum size on small systems.
7017                 */
7018                tmp = max_t(u64, tmp >> 2,
7019                            mult_frac(zone->managed_pages,
7020                                      watermark_scale_factor, 10000));
7021
7022                zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + tmp;
7023                zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
7024
7025                spin_unlock_irqrestore(&zone->lock, flags);
7026        }
7027
7028        /* update totalreserve_pages */
7029        calculate_totalreserve_pages();
7030}
7031
7032/**
7033 * setup_per_zone_wmarks - called when min_free_kbytes changes
7034 * or when memory is hot-{added|removed}
7035 *
7036 * Ensures that the watermark[min,low,high] values for each zone are set
7037 * correctly with respect to min_free_kbytes.
7038 */
7039void setup_per_zone_wmarks(void)
7040{
7041        mutex_lock(&zonelists_mutex);
7042        __setup_per_zone_wmarks();
7043        mutex_unlock(&zonelists_mutex);
7044}
7045
7046/*
7047 * Initialise min_free_kbytes.
7048 *
7049 * For small machines we want it small (128k min).  For large machines
7050 * we want it large (64MB max).  But it is not linear, because network
7051 * bandwidth does not increase linearly with machine size.  We use
7052 *
7053 *      min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
7054 *      min_free_kbytes = sqrt(lowmem_kbytes * 16)
7055 *
7056 * which yields
7057 *
7058 * 16MB:        512k
7059 * 32MB:        724k
7060 * 64MB:        1024k
7061 * 128MB:       1448k
7062 * 256MB:       2048k
7063 * 512MB:       2896k
7064 * 1024MB:      4096k
7065 * 2048MB:      5792k
7066 * 4096MB:      8192k
7067 * 8192MB:      11584k
7068 * 16384MB:     16384k
7069 */
7070int __meminit init_per_zone_wmark_min(void)
7071{
7072        unsigned long lowmem_kbytes;
7073        int new_min_free_kbytes;
7074
7075        lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
7076        new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
7077
7078        if (new_min_free_kbytes > user_min_free_kbytes) {
7079                min_free_kbytes = new_min_free_kbytes;
7080                if (min_free_kbytes < 128)
7081                        min_free_kbytes = 128;
7082                if (min_free_kbytes > 65536)
7083                        min_free_kbytes = 65536;
7084        } else {
7085                pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
7086                                new_min_free_kbytes, user_min_free_kbytes);
7087        }
7088        setup_per_zone_wmarks();
7089        refresh_zone_stat_thresholds();
7090        setup_per_zone_lowmem_reserve();
7091
7092#ifdef CONFIG_NUMA
7093        setup_min_unmapped_ratio();
7094        setup_min_slab_ratio();
7095#endif
7096
7097        return 0;
7098}
7099core_initcall(init_per_zone_wmark_min)
7100
7101/*
7102 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
7103 *      that we can call two helper functions whenever min_free_kbytes
7104 *      changes.
7105 */
7106int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
7107        void __user *buffer, size_t *length, loff_t *ppos)
7108{
7109        int rc;
7110
7111        rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7112        if (rc)
7113                return rc;
7114
7115        if (write) {
7116                user_min_free_kbytes = min_free_kbytes;
7117                setup_per_zone_wmarks();
7118        }
7119        return 0;
7120}
7121
7122int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
7123        void __user *buffer, size_t *length, loff_t *ppos)
7124{
7125        int rc;
7126
7127        rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7128        if (rc)
7129                return rc;
7130
7131        if (write)
7132                setup_per_zone_wmarks();
7133
7134        return 0;
7135}
7136
7137#ifdef CONFIG_NUMA
7138static void setup_min_unmapped_ratio(void)
7139{
7140        pg_data_t *pgdat;
7141        struct zone *zone;
7142
7143        for_each_online_pgdat(pgdat)
7144                pgdat->min_unmapped_pages = 0;
7145
7146        for_each_zone(zone)
7147                zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages *
7148                                sysctl_min_unmapped_ratio) / 100;
7149}
7150
7151
7152int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
7153        void __user *buffer, size_t *length, loff_t *ppos)
7154{
7155        int rc;
7156
7157        rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7158        if (rc)
7159                return rc;
7160
7161        setup_min_unmapped_ratio();
7162
7163        return 0;
7164}
7165
7166static void setup_min_slab_ratio(void)
7167{
7168        pg_data_t *pgdat;
7169        struct zone *zone;
7170
7171        for_each_online_pgdat(pgdat)
7172                pgdat->min_slab_pages = 0;
7173
7174        for_each_zone(zone)
7175                zone->zone_pgdat->min_slab_pages += (zone->managed_pages *
7176                                sysctl_min_slab_ratio) / 100;
7177}
7178
7179int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
7180        void __user *buffer, size_t *length, loff_t *ppos)
7181{
7182        int rc;
7183
7184        rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7185        if (rc)
7186                return rc;
7187
7188        setup_min_slab_ratio();
7189
7190        return 0;
7191}
7192#endif
7193
7194/*
7195 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
7196 *      proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
7197 *      whenever sysctl_lowmem_reserve_ratio changes.
7198 *
7199 * The reserve ratio obviously has absolutely no relation with the
7200 * minimum watermarks. The lowmem reserve ratio can only make sense
7201 * if in function of the boot time zone sizes.
7202 */
7203int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
7204        void __user *buffer, size_t *length, loff_t *ppos)
7205{
7206        proc_dointvec_minmax(table, write, buffer, length, ppos);
7207        setup_per_zone_lowmem_reserve();
7208        return 0;
7209}
7210
7211/*
7212 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
7213 * cpu.  It is the fraction of total pages in each zone that a hot per cpu
7214 * pagelist can have before it gets flushed back to buddy allocator.
7215 */
7216int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
7217        void __user *buffer, size_t *length, loff_t *ppos)
7218{
7219        struct zone *zone;
7220        int old_percpu_pagelist_fraction;
7221        int ret;
7222
7223        mutex_lock(&pcp_batch_high_lock);
7224        old_percpu_pagelist_fraction = percpu_pagelist_fraction;
7225
7226        ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
7227        if (!write || ret < 0)
7228                goto out;
7229
7230        /* Sanity checking to avoid pcp imbalance */
7231        if (percpu_pagelist_fraction &&
7232            percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
7233                percpu_pagelist_fraction = old_percpu_pagelist_fraction;
7234                ret = -EINVAL;
7235                goto out;
7236        }
7237
7238        /* No change? */
7239        if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
7240                goto out;
7241
7242        for_each_populated_zone(zone) {
7243                unsigned int cpu;
7244
7245                for_each_possible_cpu(cpu)
7246                        pageset_set_high_and_batch(zone,
7247                                        per_cpu_ptr(zone->pageset, cpu));
7248        }
7249out:
7250        mutex_unlock(&pcp_batch_high_lock);
7251        return ret;
7252}
7253
7254#ifdef CONFIG_NUMA
7255int hashdist = HASHDIST_DEFAULT;
7256
7257static int __init set_hashdist(char *str)
7258{
7259        if (!str)
7260                return 0;
7261        hashdist = simple_strtoul(str, &str, 0);
7262        return 1;
7263}
7264__setup("hashdist=", set_hashdist);
7265#endif
7266
7267#ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
7268/*
7269 * Returns the number of pages that arch has reserved but
7270 * is not known to alloc_large_system_hash().
7271 */
7272static unsigned long __init arch_reserved_kernel_pages(void)
7273{
7274        return 0;
7275}
7276#endif
7277
7278/*
7279 * Adaptive scale is meant to reduce sizes of hash tables on large memory
7280 * machines. As memory size is increased the scale is also increased but at
7281 * slower pace.  Starting from ADAPT_SCALE_BASE (64G), every time memory
7282 * quadruples the scale is increased by one, which means the size of hash table
7283 * only doubles, instead of quadrupling as well.
7284 * Because 32-bit systems cannot have large physical memory, where this scaling
7285 * makes sense, it is disabled on such platforms.
7286 */
7287#if __BITS_PER_LONG > 32
7288#define ADAPT_SCALE_BASE        (64ul << 30)
7289#define ADAPT_SCALE_SHIFT       2
7290#define ADAPT_SCALE_NPAGES      (ADAPT_SCALE_BASE >> PAGE_SHIFT)
7291#endif
7292
7293/*
7294 * allocate a large system hash table from bootmem
7295 * - it is assumed that the hash table must contain an exact power-of-2
7296 *   quantity of entries
7297 * - limit is the number of hash buckets, not the total allocation size
7298 */
7299void *__init alloc_large_system_hash(const char *tablename,
7300                                     unsigned long bucketsize,
7301                                     unsigned long numentries,
7302                                     int scale,
7303                                     int flags,
7304                                     unsigned int *_hash_shift,
7305                                     unsigned int *_hash_mask,
7306                                     unsigned long low_limit,
7307                                     unsigned long high_limit)
7308{
7309        unsigned long long max = high_limit;
7310        unsigned long log2qty, size;
7311        void *table = NULL;
7312        gfp_t gfp_flags;
7313
7314        /* allow the kernel cmdline to have a say */
7315        if (!numentries) {
7316                /* round applicable memory size up to nearest megabyte */
7317                numentries = nr_kernel_pages;
7318                numentries -= arch_reserved_kernel_pages();
7319
7320                /* It isn't necessary when PAGE_SIZE >= 1MB */
7321                if (PAGE_SHIFT < 20)
7322                        numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
7323
7324#if __BITS_PER_LONG > 32
7325                if (!high_limit) {
7326                        unsigned long adapt;
7327
7328                        for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
7329                             adapt <<= ADAPT_SCALE_SHIFT)
7330                                scale++;
7331                }
7332#endif
7333
7334                /* limit to 1 bucket per 2^scale bytes of low memory */
7335                if (scale > PAGE_SHIFT)
7336                        numentries >>= (scale - PAGE_SHIFT);
7337                else
7338                        numentries <<= (PAGE_SHIFT - scale);
7339
7340                /* Make sure we've got at least a 0-order allocation.. */
7341                if (unlikely(flags & HASH_SMALL)) {
7342                        /* Makes no sense without HASH_EARLY */
7343                        WARN_ON(!(flags & HASH_EARLY));
7344                        if (!(numentries >> *_hash_shift)) {
7345                                numentries = 1UL << *_hash_shift;
7346                                BUG_ON(!numentries);
7347                        }
7348                } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
7349                        numentries = PAGE_SIZE / bucketsize;
7350        }
7351        numentries = roundup_pow_of_two(numentries);
7352
7353        /* limit allocation size to 1/16 total memory by default */
7354        if (max == 0) {
7355                max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
7356                do_div(max, bucketsize);
7357        }
7358        max = min(max, 0x80000000ULL);
7359
7360        if (numentries < low_limit)
7361                numentries = low_limit;
7362        if (numentries > max)
7363                numentries = max;
7364
7365        log2qty = ilog2(numentries);
7366
7367        /*
7368         * memblock allocator returns zeroed memory already, so HASH_ZERO is
7369         * currently not used when HASH_EARLY is specified.
7370         */
7371        gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
7372        do {
7373                size = bucketsize << log2qty;
7374                if (flags & HASH_EARLY)
7375                        table = memblock_virt_alloc_nopanic(size, 0);
7376                else if (hashdist)
7377                        table = __vmalloc(size, gfp_flags, PAGE_KERNEL);
7378                else {
7379                        /*
7380                         * If bucketsize is not a power-of-two, we may free
7381                         * some pages at the end of hash table which
7382                         * alloc_pages_exact() automatically does
7383                         */
7384                        if (get_order(size) < MAX_ORDER) {
7385                                table = alloc_pages_exact(size, gfp_flags);
7386                                kmemleak_alloc(table, size, 1, gfp_flags);
7387                        }
7388                }
7389        } while (!table && size > PAGE_SIZE && --log2qty);
7390
7391        if (!table)
7392                panic("Failed to allocate %s hash table\n", tablename);
7393
7394        pr_info("%s hash table entries: %ld (order: %d, %lu bytes)\n",
7395                tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size);
7396
7397        if (_hash_shift)
7398                *_hash_shift = log2qty;
7399        if (_hash_mask)
7400                *_hash_mask = (1 << log2qty) - 1;
7401
7402        return table;
7403}
7404
7405/*
7406 * This function checks whether pageblock includes unmovable pages or not.
7407 * If @count is not zero, it is okay to include less @count unmovable pages
7408 *
7409 * PageLRU check without isolation or lru_lock could race so that
7410 * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
7411 * check without lock_page also may miss some movable non-lru pages at
7412 * race condition. So you can't expect this function should be exact.
7413 */
7414bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
7415                         bool skip_hwpoisoned_pages)
7416{
7417        unsigned long pfn, iter, found;
7418        int mt;
7419
7420        /*
7421         * For avoiding noise data, lru_add_drain_all() should be called
7422         * If ZONE_MOVABLE, the zone never contains unmovable pages
7423         */
7424        if (zone_idx(zone) == ZONE_MOVABLE)
7425                return false;
7426        mt = get_pageblock_migratetype(page);
7427        if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
7428                return false;
7429
7430        pfn = page_to_pfn(page);
7431        for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
7432                unsigned long check = pfn + iter;
7433
7434                if (!pfn_valid_within(check))
7435                        continue;
7436
7437                page = pfn_to_page(check);
7438
7439                /*
7440                 * Hugepages are not in LRU lists, but they're movable.
7441                 * We need not scan over tail pages bacause we don't
7442                 * handle each tail page individually in migration.
7443                 */
7444                if (PageHuge(page)) {
7445                        iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
7446                        continue;
7447                }
7448
7449                /*
7450                 * We can't use page_count without pin a page
7451                 * because another CPU can free compound page.
7452                 * This check already skips compound tails of THP
7453                 * because their page->_refcount is zero at all time.
7454                 */
7455                if (!page_ref_count(page)) {
7456                        if (PageBuddy(page))
7457                                iter += (1 << page_order(page)) - 1;
7458                        continue;
7459                }
7460
7461                /*
7462                 * The HWPoisoned page may be not in buddy system, and
7463                 * page_count() is not 0.
7464                 */
7465                if (skip_hwpoisoned_pages && PageHWPoison(page))
7466                        continue;
7467
7468                if (__PageMovable(page))
7469                        continue;
7470
7471                if (!PageLRU(page))
7472                        found++;
7473                /*
7474                 * If there are RECLAIMABLE pages, we need to check
7475                 * it.  But now, memory offline itself doesn't call
7476                 * shrink_node_slabs() and it still to be fixed.
7477                 */
7478                /*
7479                 * If the page is not RAM, page_count()should be 0.
7480                 * we don't need more check. This is an _used_ not-movable page.
7481                 *
7482                 * The problematic thing here is PG_reserved pages. PG_reserved
7483                 * is set to both of a memory hole page and a _used_ kernel
7484                 * page at boot.
7485                 */
7486                if (found > count)
7487                        return true;
7488        }
7489        return false;
7490}
7491
7492bool is_pageblock_removable_nolock(struct page *page)
7493{
7494        struct zone *zone;
7495        unsigned long pfn;
7496
7497        /*
7498         * We have to be careful here because we are iterating over memory
7499         * sections which are not zone aware so we might end up outside of
7500         * the zone but still within the section.
7501         * We have to take care about the node as well. If the node is offline
7502         * its NODE_DATA will be NULL - see page_zone.
7503         */
7504        if (!node_online(page_to_nid(page)))
7505                return false;
7506
7507        zone = page_zone(page);
7508        pfn = page_to_pfn(page);
7509        if (!zone_spans_pfn(zone, pfn))
7510                return false;
7511
7512        return !has_unmovable_pages(zone, page, 0, true);
7513}
7514
7515#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
7516
7517static unsigned long pfn_max_align_down(unsigned long pfn)
7518{
7519        return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
7520                             pageblock_nr_pages) - 1);
7521}
7522
7523static unsigned long pfn_max_align_up(unsigned long pfn)
7524{
7525        return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
7526                                pageblock_nr_pages));
7527}
7528
7529/* [start, end) must belong to a single zone. */
7530static int __alloc_contig_migrate_range(struct compact_control *cc,
7531                                        unsigned long start, unsigned long end)
7532{
7533        /* This function is based on compact_zone() from compaction.c. */
7534        unsigned long nr_reclaimed;
7535        unsigned long pfn = start;
7536        unsigned int tries = 0;
7537        int ret = 0;
7538
7539        migrate_prep();
7540
7541        while (pfn < end || !list_empty(&cc->migratepages)) {
7542                if (fatal_signal_pending(current)) {
7543                        ret = -EINTR;
7544                        break;
7545                }
7546
7547                if (list_empty(&cc->migratepages)) {
7548                        cc->nr_migratepages = 0;
7549                        pfn = isolate_migratepages_range(cc, pfn, end);
7550                        if (!pfn) {
7551                                ret = -EINTR;
7552                                break;
7553                        }
7554                        tries = 0;
7555                } else if (++tries == 5) {
7556                        ret = ret < 0 ? ret : -EBUSY;
7557                        break;
7558                }
7559
7560                nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
7561                                                        &cc->migratepages);
7562                cc->nr_migratepages -= nr_reclaimed;
7563
7564                ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
7565                                    NULL, 0, cc->mode, MR_CMA);
7566        }
7567        if (ret < 0) {
7568                putback_movable_pages(&cc->migratepages);
7569                return ret;
7570        }
7571        return 0;
7572}
7573
7574/**
7575 * alloc_contig_range() -- tries to allocate given range of pages
7576 * @start:      start PFN to allocate
7577 * @end:        one-past-the-last PFN to allocate
7578 * @migratetype:        migratetype of the underlaying pageblocks (either
7579 *                      #MIGRATE_MOVABLE or #MIGRATE_CMA).  All pageblocks
7580 *                      in range must have the same migratetype and it must
7581 *                      be either of the two.
7582 * @gfp_mask:   GFP mask to use during compaction
7583 *
7584 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
7585 * aligned, however it's the caller's responsibility to guarantee that
7586 * we are the only thread that changes migrate type of pageblocks the
7587 * pages fall in.
7588 *
7589 * The PFN range must belong to a single zone.
7590 *
7591 * Returns zero on success or negative error code.  On success all
7592 * pages which PFN is in [start, end) are allocated for the caller and
7593 * need to be freed with free_contig_range().
7594 */
7595int alloc_contig_range(unsigned long start, unsigned long end,
7596                       unsigned migratetype, gfp_t gfp_mask)
7597{
7598        unsigned long outer_start, outer_end;
7599        unsigned int order;
7600        int ret = 0;
7601
7602        struct compact_control cc = {
7603                .nr_migratepages = 0,
7604                .order = -1,
7605                .zone = page_zone(pfn_to_page(start)),
7606                .mode = MIGRATE_SYNC,
7607                .ignore_skip_hint = true,
7608                .gfp_mask = current_gfp_context(gfp_mask),
7609        };
7610        INIT_LIST_HEAD(&cc.migratepages);
7611
7612        /*
7613         * What we do here is we mark all pageblocks in range as
7614         * MIGRATE_ISOLATE.  Because pageblock and max order pages may
7615         * have different sizes, and due to the way page allocator
7616         * work, we align the range to biggest of the two pages so
7617         * that page allocator won't try to merge buddies from
7618         * different pageblocks and change MIGRATE_ISOLATE to some
7619         * other migration type.
7620         *
7621         * Once the pageblocks are marked as MIGRATE_ISOLATE, we
7622         * migrate the pages from an unaligned range (ie. pages that
7623         * we are interested in).  This will put all the pages in
7624         * range back to page allocator as MIGRATE_ISOLATE.
7625         *
7626         * When this is done, we take the pages in range from page
7627         * allocator removing them from the buddy system.  This way
7628         * page allocator will never consider using them.
7629         *
7630         * This lets us mark the pageblocks back as
7631         * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
7632         * aligned range but not in the unaligned, original range are
7633         * put back to page allocator so that buddy can use them.
7634         */
7635
7636        ret = start_isolate_page_range(pfn_max_align_down(start),
7637                                       pfn_max_align_up(end), migratetype,
7638                                       false);
7639        if (ret)
7640                return ret;
7641
7642        /*
7643         * In case of -EBUSY, we'd like to know which page causes problem.
7644         * So, just fall through. We will check it in test_pages_isolated().
7645         */
7646        ret = __alloc_contig_migrate_range(&cc, start, end);
7647        if (ret && ret != -EBUSY)
7648                goto done;
7649
7650        /*
7651         * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
7652         * aligned blocks that are marked as MIGRATE_ISOLATE.  What's
7653         * more, all pages in [start, end) are free in page allocator.
7654         * What we are going to do is to allocate all pages from
7655         * [start, end) (that is remove them from page allocator).
7656         *
7657         * The only problem is that pages at the beginning and at the
7658         * end of interesting range may be not aligned with pages that
7659         * page allocator holds, ie. they can be part of higher order
7660         * pages.  Because of this, we reserve the bigger range and
7661         * once this is done free the pages we are not interested in.
7662         *
7663         * We don't have to hold zone->lock here because the pages are
7664         * isolated thus they won't get removed from buddy.
7665         */
7666
7667        lru_add_drain_all();
7668        drain_all_pages(cc.zone);
7669
7670        order = 0;
7671        outer_start = start;
7672        while (!PageBuddy(pfn_to_page(outer_start))) {
7673                if (++order >= MAX_ORDER) {
7674                        outer_start = start;
7675                        break;
7676                }
7677                outer_start &= ~0UL << order;
7678        }
7679
7680        if (outer_start != start) {
7681                order = page_order(pfn_to_page(outer_start));
7682
7683                /*
7684                 * outer_start page could be small order buddy page and
7685                 * it doesn't include start page. Adjust outer_start
7686                 * in this case to report failed page properly
7687                 * on tracepoint in test_pages_isolated()
7688                 */
7689                if (outer_start + (1UL << order) <= start)
7690                        outer_start = start;
7691        }
7692
7693        /* Make sure the range is really isolated. */
7694        if (test_pages_isolated(outer_start, end, false)) {
7695                pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
7696                        __func__, outer_start, end);
7697                ret = -EBUSY;
7698                goto done;
7699        }
7700
7701        /* Grab isolated pages from freelists. */
7702        outer_end = isolate_freepages_range(&cc, outer_start, end);
7703        if (!outer_end) {
7704                ret = -EBUSY;
7705                goto done;
7706        }
7707
7708        /* Free head and tail (if any) */
7709        if (start != outer_start)
7710                free_contig_range(outer_start, start - outer_start);
7711        if (end != outer_end)
7712                free_contig_range(end, outer_end - end);
7713
7714done:
7715        undo_isolate_page_range(pfn_max_align_down(start),
7716                                pfn_max_align_up(end), migratetype);
7717        return ret;
7718}
7719
7720void free_contig_range(unsigned long pfn, unsigned nr_pages)
7721{
7722        unsigned int count = 0;
7723
7724        for (; nr_pages--; pfn++) {
7725                struct page *page = pfn_to_page(pfn);
7726
7727                count += page_count(page) != 1;
7728                __free_page(page);
7729        }
7730        WARN(count != 0, "%d pages are still in use!\n", count);
7731}
7732#endif
7733
7734#ifdef CONFIG_MEMORY_HOTPLUG
7735/*
7736 * The zone indicated has a new number of managed_pages; batch sizes and percpu
7737 * page high values need to be recalulated.
7738 */
7739void __meminit zone_pcp_update(struct zone *zone)
7740{
7741        unsigned cpu;
7742        mutex_lock(&pcp_batch_high_lock);
7743        for_each_possible_cpu(cpu)
7744                pageset_set_high_and_batch(zone,
7745                                per_cpu_ptr(zone->pageset, cpu));
7746        mutex_unlock(&pcp_batch_high_lock);
7747}
7748#endif
7749
7750void zone_pcp_reset(struct zone *zone)
7751{
7752        unsigned long flags;
7753        int cpu;
7754        struct per_cpu_pageset *pset;
7755
7756        /* avoid races with drain_pages()  */
7757        local_irq_save(flags);
7758        if (zone->pageset != &boot_pageset) {
7759                for_each_online_cpu(cpu) {
7760                        pset = per_cpu_ptr(zone->pageset, cpu);
7761                        drain_zonestat(zone, pset);
7762                }
7763                free_percpu(zone->pageset);
7764                zone->pageset = &boot_pageset;
7765        }
7766        local_irq_restore(flags);
7767}
7768
7769#ifdef CONFIG_MEMORY_HOTREMOVE
7770/*
7771 * All pages in the range must be in a single zone and isolated
7772 * before calling this.
7773 */
7774void
7775__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
7776{
7777        struct page *page;
7778        struct zone *zone;
7779        unsigned int order, i;
7780        unsigned long pfn;
7781        unsigned long flags;
7782        /* find the first valid pfn */
7783        for (pfn = start_pfn; pfn < end_pfn; pfn++)
7784                if (pfn_valid(pfn))
7785                        break;
7786        if (pfn == end_pfn)
7787                return;
7788        offline_mem_sections(pfn, end_pfn);
7789        zone = page_zone(pfn_to_page(pfn));
7790        spin_lock_irqsave(&zone->lock, flags);
7791        pfn = start_pfn;
7792        while (pfn < end_pfn) {
7793                if (!pfn_valid(pfn)) {
7794                        pfn++;
7795                        continue;
7796                }
7797                page = pfn_to_page(pfn);
7798                /*
7799                 * The HWPoisoned page may be not in buddy system, and
7800                 * page_count() is not 0.
7801                 */
7802                if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
7803                        pfn++;
7804                        SetPageReserved(page);
7805                        continue;
7806                }
7807
7808                BUG_ON(page_count(page));
7809                BUG_ON(!PageBuddy(page));
7810                order = page_order(page);
7811#ifdef CONFIG_DEBUG_VM
7812                pr_info("remove from free list %lx %d %lx\n",
7813                        pfn, 1 << order, end_pfn);
7814#endif
7815                list_del(&page->lru);
7816                rmv_page_order(page);
7817                zone->free_area[order].nr_free--;
7818                for (i = 0; i < (1 << order); i++)
7819                        SetPageReserved((page+i));
7820                pfn += (1 << order);
7821        }
7822        spin_unlock_irqrestore(&zone->lock, flags);
7823}
7824#endif
7825
7826bool is_free_buddy_page(struct page *page)
7827{
7828        struct zone *zone = page_zone(page);
7829        unsigned long pfn = page_to_pfn(page);
7830        unsigned long flags;
7831        unsigned int order;
7832
7833        spin_lock_irqsave(&zone->lock, flags);
7834        for (order = 0; order < MAX_ORDER; order++) {
7835                struct page *page_head = page - (pfn & ((1 << order) - 1));
7836
7837                if (PageBuddy(page_head) && page_order(page_head) >= order)
7838                        break;
7839        }
7840        spin_unlock_irqrestore(&zone->lock, flags);
7841
7842        return order < MAX_ORDER;
7843}
7844