linux/mm/page_alloc.c
<<
>>
Prefs
   1/*
   2 *  linux/mm/page_alloc.c
   3 *
   4 *  Manages the free list, the system allocates free pages here.
   5 *  Note that kmalloc() lives in slab.c
   6 *
   7 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   8 *  Swap reorganised 29.12.95, Stephen Tweedie
   9 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  10 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
  11 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
  12 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
  13 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
  14 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
  15 */
  16
  17#include <linux/stddef.h>
  18#include <linux/mm.h>
  19#include <linux/highmem.h>
  20#include <linux/swap.h>
  21#include <linux/interrupt.h>
  22#include <linux/pagemap.h>
  23#include <linux/jiffies.h>
  24#include <linux/memblock.h>
  25#include <linux/compiler.h>
  26#include <linux/kernel.h>
  27#include <linux/kasan.h>
  28#include <linux/module.h>
  29#include <linux/suspend.h>
  30#include <linux/pagevec.h>
  31#include <linux/blkdev.h>
  32#include <linux/slab.h>
  33#include <linux/ratelimit.h>
  34#include <linux/oom.h>
  35#include <linux/topology.h>
  36#include <linux/sysctl.h>
  37#include <linux/cpu.h>
  38#include <linux/cpuset.h>
  39#include <linux/memory_hotplug.h>
  40#include <linux/nodemask.h>
  41#include <linux/vmalloc.h>
  42#include <linux/vmstat.h>
  43#include <linux/mempolicy.h>
  44#include <linux/memremap.h>
  45#include <linux/stop_machine.h>
  46#include <linux/sort.h>
  47#include <linux/pfn.h>
  48#include <linux/backing-dev.h>
  49#include <linux/fault-inject.h>
  50#include <linux/page-isolation.h>
  51#include <linux/page_ext.h>
  52#include <linux/debugobjects.h>
  53#include <linux/kmemleak.h>
  54#include <linux/compaction.h>
  55#include <trace/events/kmem.h>
  56#include <trace/events/oom.h>
  57#include <linux/prefetch.h>
  58#include <linux/mm_inline.h>
  59#include <linux/migrate.h>
  60#include <linux/hugetlb.h>
  61#include <linux/sched/rt.h>
  62#include <linux/sched/mm.h>
  63#include <linux/page_owner.h>
  64#include <linux/kthread.h>
  65#include <linux/memcontrol.h>
  66#include <linux/ftrace.h>
  67#include <linux/lockdep.h>
  68#include <linux/nmi.h>
  69#include <linux/psi.h>
  70
  71#include <asm/sections.h>
  72#include <asm/tlbflush.h>
  73#include <asm/div64.h>
  74#include "internal.h"
  75
  76/* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
  77static DEFINE_MUTEX(pcp_batch_high_lock);
  78#define MIN_PERCPU_PAGELIST_FRACTION    (8)
  79
  80#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
  81DEFINE_PER_CPU(int, numa_node);
  82EXPORT_PER_CPU_SYMBOL(numa_node);
  83#endif
  84
  85DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);
  86
  87#ifdef CONFIG_HAVE_MEMORYLESS_NODES
  88/*
  89 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
  90 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
  91 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
  92 * defined in <linux/topology.h>.
  93 */
  94DEFINE_PER_CPU(int, _numa_mem_);                /* Kernel "local memory" node */
  95EXPORT_PER_CPU_SYMBOL(_numa_mem_);
  96int _node_numa_mem_[MAX_NUMNODES];
  97#endif
  98
  99/* work_structs for global per-cpu drains */
 100struct pcpu_drain {
 101        struct zone *zone;
 102        struct work_struct work;
 103};
 104DEFINE_MUTEX(pcpu_drain_mutex);
 105DEFINE_PER_CPU(struct pcpu_drain, pcpu_drain);
 106
 107#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
 108volatile unsigned long latent_entropy __latent_entropy;
 109EXPORT_SYMBOL(latent_entropy);
 110#endif
 111
 112/*
 113 * Array of node states.
 114 */
 115nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
 116        [N_POSSIBLE] = NODE_MASK_ALL,
 117        [N_ONLINE] = { { [0] = 1UL } },
 118#ifndef CONFIG_NUMA
 119        [N_NORMAL_MEMORY] = { { [0] = 1UL } },
 120#ifdef CONFIG_HIGHMEM
 121        [N_HIGH_MEMORY] = { { [0] = 1UL } },
 122#endif
 123        [N_MEMORY] = { { [0] = 1UL } },
 124        [N_CPU] = { { [0] = 1UL } },
 125#endif  /* NUMA */
 126};
 127EXPORT_SYMBOL(node_states);
 128
 129atomic_long_t _totalram_pages __read_mostly;
 130EXPORT_SYMBOL(_totalram_pages);
 131unsigned long totalreserve_pages __read_mostly;
 132unsigned long totalcma_pages __read_mostly;
 133
 134int percpu_pagelist_fraction;
 135gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
 136
 137/*
 138 * A cached value of the page's pageblock's migratetype, used when the page is
 139 * put on a pcplist. Used to avoid the pageblock migratetype lookup when
 140 * freeing from pcplists in most cases, at the cost of possibly becoming stale.
 141 * Also the migratetype set in the page does not necessarily match the pcplist
 142 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
 143 * other index - this ensures that it will be put on the correct CMA freelist.
 144 */
 145static inline int get_pcppage_migratetype(struct page *page)
 146{
 147        return page->index;
 148}
 149
 150static inline void set_pcppage_migratetype(struct page *page, int migratetype)
 151{
 152        page->index = migratetype;
 153}
 154
 155#ifdef CONFIG_PM_SLEEP
 156/*
 157 * The following functions are used by the suspend/hibernate code to temporarily
 158 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
 159 * while devices are suspended.  To avoid races with the suspend/hibernate code,
 160 * they should always be called with system_transition_mutex held
 161 * (gfp_allowed_mask also should only be modified with system_transition_mutex
 162 * held, unless the suspend/hibernate code is guaranteed not to run in parallel
 163 * with that modification).
 164 */
 165
 166static gfp_t saved_gfp_mask;
 167
 168void pm_restore_gfp_mask(void)
 169{
 170        WARN_ON(!mutex_is_locked(&system_transition_mutex));
 171        if (saved_gfp_mask) {
 172                gfp_allowed_mask = saved_gfp_mask;
 173                saved_gfp_mask = 0;
 174        }
 175}
 176
 177void pm_restrict_gfp_mask(void)
 178{
 179        WARN_ON(!mutex_is_locked(&system_transition_mutex));
 180        WARN_ON(saved_gfp_mask);
 181        saved_gfp_mask = gfp_allowed_mask;
 182        gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
 183}
 184
 185bool pm_suspended_storage(void)
 186{
 187        if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
 188                return false;
 189        return true;
 190}
 191#endif /* CONFIG_PM_SLEEP */
 192
 193#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
 194unsigned int pageblock_order __read_mostly;
 195#endif
 196
 197static void __free_pages_ok(struct page *page, unsigned int order);
 198
 199/*
 200 * results with 256, 32 in the lowmem_reserve sysctl:
 201 *      1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
 202 *      1G machine -> (16M dma, 784M normal, 224M high)
 203 *      NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
 204 *      HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
 205 *      HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
 206 *
 207 * TBD: should special case ZONE_DMA32 machines here - in those we normally
 208 * don't need any ZONE_NORMAL reservation
 209 */
 210int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = {
 211#ifdef CONFIG_ZONE_DMA
 212        [ZONE_DMA] = 256,
 213#endif
 214#ifdef CONFIG_ZONE_DMA32
 215        [ZONE_DMA32] = 256,
 216#endif
 217        [ZONE_NORMAL] = 32,
 218#ifdef CONFIG_HIGHMEM
 219        [ZONE_HIGHMEM] = 0,
 220#endif
 221        [ZONE_MOVABLE] = 0,
 222};
 223
 224EXPORT_SYMBOL(totalram_pages);
 225
 226static char * const zone_names[MAX_NR_ZONES] = {
 227#ifdef CONFIG_ZONE_DMA
 228         "DMA",
 229#endif
 230#ifdef CONFIG_ZONE_DMA32
 231         "DMA32",
 232#endif
 233         "Normal",
 234#ifdef CONFIG_HIGHMEM
 235         "HighMem",
 236#endif
 237         "Movable",
 238#ifdef CONFIG_ZONE_DEVICE
 239         "Device",
 240#endif
 241};
 242
 243const char * const migratetype_names[MIGRATE_TYPES] = {
 244        "Unmovable",
 245        "Movable",
 246        "Reclaimable",
 247        "HighAtomic",
 248#ifdef CONFIG_CMA
 249        "CMA",
 250#endif
 251#ifdef CONFIG_MEMORY_ISOLATION
 252        "Isolate",
 253#endif
 254};
 255
 256compound_page_dtor * const compound_page_dtors[] = {
 257        NULL,
 258        free_compound_page,
 259#ifdef CONFIG_HUGETLB_PAGE
 260        free_huge_page,
 261#endif
 262#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 263        free_transhuge_page,
 264#endif
 265};
 266
 267int min_free_kbytes = 1024;
 268int user_min_free_kbytes = -1;
 269int watermark_boost_factor __read_mostly = 15000;
 270int watermark_scale_factor = 10;
 271
 272static unsigned long nr_kernel_pages __initdata;
 273static unsigned long nr_all_pages __initdata;
 274static unsigned long dma_reserve __initdata;
 275
 276#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 277static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata;
 278static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata;
 279static unsigned long required_kernelcore __initdata;
 280static unsigned long required_kernelcore_percent __initdata;
 281static unsigned long required_movablecore __initdata;
 282static unsigned long required_movablecore_percent __initdata;
 283static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata;
 284static bool mirrored_kernelcore __meminitdata;
 285
 286/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
 287int movable_zone;
 288EXPORT_SYMBOL(movable_zone);
 289#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
 290
 291#if MAX_NUMNODES > 1
 292int nr_node_ids __read_mostly = MAX_NUMNODES;
 293int nr_online_nodes __read_mostly = 1;
 294EXPORT_SYMBOL(nr_node_ids);
 295EXPORT_SYMBOL(nr_online_nodes);
 296#endif
 297
 298int page_group_by_mobility_disabled __read_mostly;
 299
 300#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
 301/*
 302 * During boot we initialize deferred pages on-demand, as needed, but once
 303 * page_alloc_init_late() has finished, the deferred pages are all initialized,
 304 * and we can permanently disable that path.
 305 */
 306static DEFINE_STATIC_KEY_TRUE(deferred_pages);
 307
 308/*
 309 * Calling kasan_free_pages() only after deferred memory initialization
 310 * has completed. Poisoning pages during deferred memory init will greatly
 311 * lengthen the process and cause problem in large memory systems as the
 312 * deferred pages initialization is done with interrupt disabled.
 313 *
 314 * Assuming that there will be no reference to those newly initialized
 315 * pages before they are ever allocated, this should have no effect on
 316 * KASAN memory tracking as the poison will be properly inserted at page
 317 * allocation time. The only corner case is when pages are allocated by
 318 * on-demand allocation and then freed again before the deferred pages
 319 * initialization is done, but this is not likely to happen.
 320 */
 321static inline void kasan_free_nondeferred_pages(struct page *page, int order)
 322{
 323        if (!static_branch_unlikely(&deferred_pages))
 324                kasan_free_pages(page, order);
 325}
 326
 327/* Returns true if the struct page for the pfn is uninitialised */
 328static inline bool __meminit early_page_uninitialised(unsigned long pfn)
 329{
 330        int nid = early_pfn_to_nid(pfn);
 331
 332        if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
 333                return true;
 334
 335        return false;
 336}
 337
 338/*
 339 * Returns true when the remaining initialisation should be deferred until
 340 * later in the boot cycle when it can be parallelised.
 341 */
 342static bool __meminit
 343defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
 344{
 345        static unsigned long prev_end_pfn, nr_initialised;
 346
 347        /*
 348         * prev_end_pfn static that contains the end of previous zone
 349         * No need to protect because called very early in boot before smp_init.
 350         */
 351        if (prev_end_pfn != end_pfn) {
 352                prev_end_pfn = end_pfn;
 353                nr_initialised = 0;
 354        }
 355
 356        /* Always populate low zones for address-constrained allocations */
 357        if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
 358                return false;
 359
 360        /*
 361         * We start only with one section of pages, more pages are added as
 362         * needed until the rest of deferred pages are initialized.
 363         */
 364        nr_initialised++;
 365        if ((nr_initialised > PAGES_PER_SECTION) &&
 366            (pfn & (PAGES_PER_SECTION - 1)) == 0) {
 367                NODE_DATA(nid)->first_deferred_pfn = pfn;
 368                return true;
 369        }
 370        return false;
 371}
 372#else
 373#define kasan_free_nondeferred_pages(p, o)      kasan_free_pages(p, o)
 374
 375static inline bool early_page_uninitialised(unsigned long pfn)
 376{
 377        return false;
 378}
 379
 380static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
 381{
 382        return false;
 383}
 384#endif
 385
 386/* Return a pointer to the bitmap storing bits affecting a block of pages */
 387static inline unsigned long *get_pageblock_bitmap(struct page *page,
 388                                                        unsigned long pfn)
 389{
 390#ifdef CONFIG_SPARSEMEM
 391        return __pfn_to_section(pfn)->pageblock_flags;
 392#else
 393        return page_zone(page)->pageblock_flags;
 394#endif /* CONFIG_SPARSEMEM */
 395}
 396
 397static inline int pfn_to_bitidx(struct page *page, unsigned long pfn)
 398{
 399#ifdef CONFIG_SPARSEMEM
 400        pfn &= (PAGES_PER_SECTION-1);
 401        return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
 402#else
 403        pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
 404        return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
 405#endif /* CONFIG_SPARSEMEM */
 406}
 407
 408/**
 409 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
 410 * @page: The page within the block of interest
 411 * @pfn: The target page frame number
 412 * @end_bitidx: The last bit of interest to retrieve
 413 * @mask: mask of bits that the caller is interested in
 414 *
 415 * Return: pageblock_bits flags
 416 */
 417static __always_inline unsigned long __get_pfnblock_flags_mask(struct page *page,
 418                                        unsigned long pfn,
 419                                        unsigned long end_bitidx,
 420                                        unsigned long mask)
 421{
 422        unsigned long *bitmap;
 423        unsigned long bitidx, word_bitidx;
 424        unsigned long word;
 425
 426        bitmap = get_pageblock_bitmap(page, pfn);
 427        bitidx = pfn_to_bitidx(page, pfn);
 428        word_bitidx = bitidx / BITS_PER_LONG;
 429        bitidx &= (BITS_PER_LONG-1);
 430
 431        word = bitmap[word_bitidx];
 432        bitidx += end_bitidx;
 433        return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
 434}
 435
 436unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
 437                                        unsigned long end_bitidx,
 438                                        unsigned long mask)
 439{
 440        return __get_pfnblock_flags_mask(page, pfn, end_bitidx, mask);
 441}
 442
 443static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
 444{
 445        return __get_pfnblock_flags_mask(page, pfn, PB_migrate_end, MIGRATETYPE_MASK);
 446}
 447
 448/**
 449 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
 450 * @page: The page within the block of interest
 451 * @flags: The flags to set
 452 * @pfn: The target page frame number
 453 * @end_bitidx: The last bit of interest
 454 * @mask: mask of bits that the caller is interested in
 455 */
 456void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
 457                                        unsigned long pfn,
 458                                        unsigned long end_bitidx,
 459                                        unsigned long mask)
 460{
 461        unsigned long *bitmap;
 462        unsigned long bitidx, word_bitidx;
 463        unsigned long old_word, word;
 464
 465        BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
 466        BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits));
 467
 468        bitmap = get_pageblock_bitmap(page, pfn);
 469        bitidx = pfn_to_bitidx(page, pfn);
 470        word_bitidx = bitidx / BITS_PER_LONG;
 471        bitidx &= (BITS_PER_LONG-1);
 472
 473        VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
 474
 475        bitidx += end_bitidx;
 476        mask <<= (BITS_PER_LONG - bitidx - 1);
 477        flags <<= (BITS_PER_LONG - bitidx - 1);
 478
 479        word = READ_ONCE(bitmap[word_bitidx]);
 480        for (;;) {
 481                old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
 482                if (word == old_word)
 483                        break;
 484                word = old_word;
 485        }
 486}
 487
 488void set_pageblock_migratetype(struct page *page, int migratetype)
 489{
 490        if (unlikely(page_group_by_mobility_disabled &&
 491                     migratetype < MIGRATE_PCPTYPES))
 492                migratetype = MIGRATE_UNMOVABLE;
 493
 494        set_pageblock_flags_group(page, (unsigned long)migratetype,
 495                                        PB_migrate, PB_migrate_end);
 496}
 497
 498#ifdef CONFIG_DEBUG_VM
 499static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
 500{
 501        int ret = 0;
 502        unsigned seq;
 503        unsigned long pfn = page_to_pfn(page);
 504        unsigned long sp, start_pfn;
 505
 506        do {
 507                seq = zone_span_seqbegin(zone);
 508                start_pfn = zone->zone_start_pfn;
 509                sp = zone->spanned_pages;
 510                if (!zone_spans_pfn(zone, pfn))
 511                        ret = 1;
 512        } while (zone_span_seqretry(zone, seq));
 513
 514        if (ret)
 515                pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
 516                        pfn, zone_to_nid(zone), zone->name,
 517                        start_pfn, start_pfn + sp);
 518
 519        return ret;
 520}
 521
 522static int page_is_consistent(struct zone *zone, struct page *page)
 523{
 524        if (!pfn_valid_within(page_to_pfn(page)))
 525                return 0;
 526        if (zone != page_zone(page))
 527                return 0;
 528
 529        return 1;
 530}
 531/*
 532 * Temporary debugging check for pages not lying within a given zone.
 533 */
 534static int __maybe_unused bad_range(struct zone *zone, struct page *page)
 535{
 536        if (page_outside_zone_boundaries(zone, page))
 537                return 1;
 538        if (!page_is_consistent(zone, page))
 539                return 1;
 540
 541        return 0;
 542}
 543#else
 544static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
 545{
 546        return 0;
 547}
 548#endif
 549
 550static void bad_page(struct page *page, const char *reason,
 551                unsigned long bad_flags)
 552{
 553        static unsigned long resume;
 554        static unsigned long nr_shown;
 555        static unsigned long nr_unshown;
 556
 557        /*
 558         * Allow a burst of 60 reports, then keep quiet for that minute;
 559         * or allow a steady drip of one report per second.
 560         */
 561        if (nr_shown == 60) {
 562                if (time_before(jiffies, resume)) {
 563                        nr_unshown++;
 564                        goto out;
 565                }
 566                if (nr_unshown) {
 567                        pr_alert(
 568                              "BUG: Bad page state: %lu messages suppressed\n",
 569                                nr_unshown);
 570                        nr_unshown = 0;
 571                }
 572                nr_shown = 0;
 573        }
 574        if (nr_shown++ == 0)
 575                resume = jiffies + 60 * HZ;
 576
 577        pr_alert("BUG: Bad page state in process %s  pfn:%05lx\n",
 578                current->comm, page_to_pfn(page));
 579        __dump_page(page, reason);
 580        bad_flags &= page->flags;
 581        if (bad_flags)
 582                pr_alert("bad because of flags: %#lx(%pGp)\n",
 583                                                bad_flags, &bad_flags);
 584        dump_page_owner(page);
 585
 586        print_modules();
 587        dump_stack();
 588out:
 589        /* Leave bad fields for debug, except PageBuddy could make trouble */
 590        page_mapcount_reset(page); /* remove PageBuddy */
 591        add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
 592}
 593
 594/*
 595 * Higher-order pages are called "compound pages".  They are structured thusly:
 596 *
 597 * The first PAGE_SIZE page is called the "head page" and have PG_head set.
 598 *
 599 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
 600 * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
 601 *
 602 * The first tail page's ->compound_dtor holds the offset in array of compound
 603 * page destructors. See compound_page_dtors.
 604 *
 605 * The first tail page's ->compound_order holds the order of allocation.
 606 * This usage means that zero-order pages may not be compound.
 607 */
 608
 609void free_compound_page(struct page *page)
 610{
 611        __free_pages_ok(page, compound_order(page));
 612}
 613
 614void prep_compound_page(struct page *page, unsigned int order)
 615{
 616        int i;
 617        int nr_pages = 1 << order;
 618
 619        set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
 620        set_compound_order(page, order);
 621        __SetPageHead(page);
 622        for (i = 1; i < nr_pages; i++) {
 623                struct page *p = page + i;
 624                set_page_count(p, 0);
 625                p->mapping = TAIL_MAPPING;
 626                set_compound_head(p, page);
 627        }
 628        atomic_set(compound_mapcount_ptr(page), -1);
 629}
 630
 631#ifdef CONFIG_DEBUG_PAGEALLOC
 632unsigned int _debug_guardpage_minorder;
 633bool _debug_pagealloc_enabled __read_mostly
 634                        = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
 635EXPORT_SYMBOL(_debug_pagealloc_enabled);
 636bool _debug_guardpage_enabled __read_mostly;
 637
 638static int __init early_debug_pagealloc(char *buf)
 639{
 640        if (!buf)
 641                return -EINVAL;
 642        return kstrtobool(buf, &_debug_pagealloc_enabled);
 643}
 644early_param("debug_pagealloc", early_debug_pagealloc);
 645
 646static bool need_debug_guardpage(void)
 647{
 648        /* If we don't use debug_pagealloc, we don't need guard page */
 649        if (!debug_pagealloc_enabled())
 650                return false;
 651
 652        if (!debug_guardpage_minorder())
 653                return false;
 654
 655        return true;
 656}
 657
 658static void init_debug_guardpage(void)
 659{
 660        if (!debug_pagealloc_enabled())
 661                return;
 662
 663        if (!debug_guardpage_minorder())
 664                return;
 665
 666        _debug_guardpage_enabled = true;
 667}
 668
 669struct page_ext_operations debug_guardpage_ops = {
 670        .need = need_debug_guardpage,
 671        .init = init_debug_guardpage,
 672};
 673
 674static int __init debug_guardpage_minorder_setup(char *buf)
 675{
 676        unsigned long res;
 677
 678        if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
 679                pr_err("Bad debug_guardpage_minorder value\n");
 680                return 0;
 681        }
 682        _debug_guardpage_minorder = res;
 683        pr_info("Setting debug_guardpage_minorder to %lu\n", res);
 684        return 0;
 685}
 686early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
 687
 688static inline bool set_page_guard(struct zone *zone, struct page *page,
 689                                unsigned int order, int migratetype)
 690{
 691        struct page_ext *page_ext;
 692
 693        if (!debug_guardpage_enabled())
 694                return false;
 695
 696        if (order >= debug_guardpage_minorder())
 697                return false;
 698
 699        page_ext = lookup_page_ext(page);
 700        if (unlikely(!page_ext))
 701                return false;
 702
 703        __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
 704
 705        INIT_LIST_HEAD(&page->lru);
 706        set_page_private(page, order);
 707        /* Guard pages are not available for any usage */
 708        __mod_zone_freepage_state(zone, -(1 << order), migratetype);
 709
 710        return true;
 711}
 712
 713static inline void clear_page_guard(struct zone *zone, struct page *page,
 714                                unsigned int order, int migratetype)
 715{
 716        struct page_ext *page_ext;
 717
 718        if (!debug_guardpage_enabled())
 719                return;
 720
 721        page_ext = lookup_page_ext(page);
 722        if (unlikely(!page_ext))
 723                return;
 724
 725        __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
 726
 727        set_page_private(page, 0);
 728        if (!is_migrate_isolate(migratetype))
 729                __mod_zone_freepage_state(zone, (1 << order), migratetype);
 730}
 731#else
 732struct page_ext_operations debug_guardpage_ops;
 733static inline bool set_page_guard(struct zone *zone, struct page *page,
 734                        unsigned int order, int migratetype) { return false; }
 735static inline void clear_page_guard(struct zone *zone, struct page *page,
 736                                unsigned int order, int migratetype) {}
 737#endif
 738
 739static inline void set_page_order(struct page *page, unsigned int order)
 740{
 741        set_page_private(page, order);
 742        __SetPageBuddy(page);
 743}
 744
 745static inline void rmv_page_order(struct page *page)
 746{
 747        __ClearPageBuddy(page);
 748        set_page_private(page, 0);
 749}
 750
 751/*
 752 * This function checks whether a page is free && is the buddy
 753 * we can coalesce a page and its buddy if
 754 * (a) the buddy is not in a hole (check before calling!) &&
 755 * (b) the buddy is in the buddy system &&
 756 * (c) a page and its buddy have the same order &&
 757 * (d) a page and its buddy are in the same zone.
 758 *
 759 * For recording whether a page is in the buddy system, we set PageBuddy.
 760 * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
 761 *
 762 * For recording page's order, we use page_private(page).
 763 */
 764static inline int page_is_buddy(struct page *page, struct page *buddy,
 765                                                        unsigned int order)
 766{
 767        if (page_is_guard(buddy) && page_order(buddy) == order) {
 768                if (page_zone_id(page) != page_zone_id(buddy))
 769                        return 0;
 770
 771                VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
 772
 773                return 1;
 774        }
 775
 776        if (PageBuddy(buddy) && page_order(buddy) == order) {
 777                /*
 778                 * zone check is done late to avoid uselessly
 779                 * calculating zone/node ids for pages that could
 780                 * never merge.
 781                 */
 782                if (page_zone_id(page) != page_zone_id(buddy))
 783                        return 0;
 784
 785                VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
 786
 787                return 1;
 788        }
 789        return 0;
 790}
 791
 792/*
 793 * Freeing function for a buddy system allocator.
 794 *
 795 * The concept of a buddy system is to maintain direct-mapped table
 796 * (containing bit values) for memory blocks of various "orders".
 797 * The bottom level table contains the map for the smallest allocatable
 798 * units of memory (here, pages), and each level above it describes
 799 * pairs of units from the levels below, hence, "buddies".
 800 * At a high level, all that happens here is marking the table entry
 801 * at the bottom level available, and propagating the changes upward
 802 * as necessary, plus some accounting needed to play nicely with other
 803 * parts of the VM system.
 804 * At each level, we keep a list of pages, which are heads of continuous
 805 * free pages of length of (1 << order) and marked with PageBuddy.
 806 * Page's order is recorded in page_private(page) field.
 807 * So when we are allocating or freeing one, we can derive the state of the
 808 * other.  That is, if we allocate a small block, and both were
 809 * free, the remainder of the region must be split into blocks.
 810 * If a block is freed, and its buddy is also free, then this
 811 * triggers coalescing into a block of larger size.
 812 *
 813 * -- nyc
 814 */
 815
 816static inline void __free_one_page(struct page *page,
 817                unsigned long pfn,
 818                struct zone *zone, unsigned int order,
 819                int migratetype)
 820{
 821        unsigned long combined_pfn;
 822        unsigned long uninitialized_var(buddy_pfn);
 823        struct page *buddy;
 824        unsigned int max_order;
 825
 826        max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
 827
 828        VM_BUG_ON(!zone_is_initialized(zone));
 829        VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
 830
 831        VM_BUG_ON(migratetype == -1);
 832        if (likely(!is_migrate_isolate(migratetype)))
 833                __mod_zone_freepage_state(zone, 1 << order, migratetype);
 834
 835        VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
 836        VM_BUG_ON_PAGE(bad_range(zone, page), page);
 837
 838continue_merging:
 839        while (order < max_order - 1) {
 840                buddy_pfn = __find_buddy_pfn(pfn, order);
 841                buddy = page + (buddy_pfn - pfn);
 842
 843                if (!pfn_valid_within(buddy_pfn))
 844                        goto done_merging;
 845                if (!page_is_buddy(page, buddy, order))
 846                        goto done_merging;
 847                /*
 848                 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
 849                 * merge with it and move up one order.
 850                 */
 851                if (page_is_guard(buddy)) {
 852                        clear_page_guard(zone, buddy, order, migratetype);
 853                } else {
 854                        list_del(&buddy->lru);
 855                        zone->free_area[order].nr_free--;
 856                        rmv_page_order(buddy);
 857                }
 858                combined_pfn = buddy_pfn & pfn;
 859                page = page + (combined_pfn - pfn);
 860                pfn = combined_pfn;
 861                order++;
 862        }
 863        if (max_order < MAX_ORDER) {
 864                /* If we are here, it means order is >= pageblock_order.
 865                 * We want to prevent merge between freepages on isolate
 866                 * pageblock and normal pageblock. Without this, pageblock
 867                 * isolation could cause incorrect freepage or CMA accounting.
 868                 *
 869                 * We don't want to hit this code for the more frequent
 870                 * low-order merging.
 871                 */
 872                if (unlikely(has_isolate_pageblock(zone))) {
 873                        int buddy_mt;
 874
 875                        buddy_pfn = __find_buddy_pfn(pfn, order);
 876                        buddy = page + (buddy_pfn - pfn);
 877                        buddy_mt = get_pageblock_migratetype(buddy);
 878
 879                        if (migratetype != buddy_mt
 880                                        && (is_migrate_isolate(migratetype) ||
 881                                                is_migrate_isolate(buddy_mt)))
 882                                goto done_merging;
 883                }
 884                max_order++;
 885                goto continue_merging;
 886        }
 887
 888done_merging:
 889        set_page_order(page, order);
 890
 891        /*
 892         * If this is not the largest possible page, check if the buddy
 893         * of the next-highest order is free. If it is, it's possible
 894         * that pages are being freed that will coalesce soon. In case,
 895         * that is happening, add the free page to the tail of the list
 896         * so it's less likely to be used soon and more likely to be merged
 897         * as a higher order page
 898         */
 899        if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn)) {
 900                struct page *higher_page, *higher_buddy;
 901                combined_pfn = buddy_pfn & pfn;
 902                higher_page = page + (combined_pfn - pfn);
 903                buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
 904                higher_buddy = higher_page + (buddy_pfn - combined_pfn);
 905                if (pfn_valid_within(buddy_pfn) &&
 906                    page_is_buddy(higher_page, higher_buddy, order + 1)) {
 907                        list_add_tail(&page->lru,
 908                                &zone->free_area[order].free_list[migratetype]);
 909                        goto out;
 910                }
 911        }
 912
 913        list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
 914out:
 915        zone->free_area[order].nr_free++;
 916}
 917
 918/*
 919 * A bad page could be due to a number of fields. Instead of multiple branches,
 920 * try and check multiple fields with one check. The caller must do a detailed
 921 * check if necessary.
 922 */
 923static inline bool page_expected_state(struct page *page,
 924                                        unsigned long check_flags)
 925{
 926        if (unlikely(atomic_read(&page->_mapcount) != -1))
 927                return false;
 928
 929        if (unlikely((unsigned long)page->mapping |
 930                        page_ref_count(page) |
 931#ifdef CONFIG_MEMCG
 932                        (unsigned long)page->mem_cgroup |
 933#endif
 934                        (page->flags & check_flags)))
 935                return false;
 936
 937        return true;
 938}
 939
 940static void free_pages_check_bad(struct page *page)
 941{
 942        const char *bad_reason;
 943        unsigned long bad_flags;
 944
 945        bad_reason = NULL;
 946        bad_flags = 0;
 947
 948        if (unlikely(atomic_read(&page->_mapcount) != -1))
 949                bad_reason = "nonzero mapcount";
 950        if (unlikely(page->mapping != NULL))
 951                bad_reason = "non-NULL mapping";
 952        if (unlikely(page_ref_count(page) != 0))
 953                bad_reason = "nonzero _refcount";
 954        if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
 955                bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
 956                bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
 957        }
 958#ifdef CONFIG_MEMCG
 959        if (unlikely(page->mem_cgroup))
 960                bad_reason = "page still charged to cgroup";
 961#endif
 962        bad_page(page, bad_reason, bad_flags);
 963}
 964
 965static inline int free_pages_check(struct page *page)
 966{
 967        if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
 968                return 0;
 969
 970        /* Something has gone sideways, find it */
 971        free_pages_check_bad(page);
 972        return 1;
 973}
 974
 975static int free_tail_pages_check(struct page *head_page, struct page *page)
 976{
 977        int ret = 1;
 978
 979        /*
 980         * We rely page->lru.next never has bit 0 set, unless the page
 981         * is PageTail(). Let's make sure that's true even for poisoned ->lru.
 982         */
 983        BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
 984
 985        if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
 986                ret = 0;
 987                goto out;
 988        }
 989        switch (page - head_page) {
 990        case 1:
 991                /* the first tail page: ->mapping may be compound_mapcount() */
 992                if (unlikely(compound_mapcount(page))) {
 993                        bad_page(page, "nonzero compound_mapcount", 0);
 994                        goto out;
 995                }
 996                break;
 997        case 2:
 998                /*
 999                 * the second tail page: ->mapping is
1000                 * deferred_list.next -- ignore value.
1001                 */
1002                break;
1003        default:
1004                if (page->mapping != TAIL_MAPPING) {
1005                        bad_page(page, "corrupted mapping in tail page", 0);
1006                        goto out;
1007                }
1008                break;
1009        }
1010        if (unlikely(!PageTail(page))) {
1011                bad_page(page, "PageTail not set", 0);
1012                goto out;
1013        }
1014        if (unlikely(compound_head(page) != head_page)) {
1015                bad_page(page, "compound_head not consistent", 0);
1016                goto out;
1017        }
1018        ret = 0;
1019out:
1020        page->mapping = NULL;
1021        clear_compound_head(page);
1022        return ret;
1023}
1024
1025static __always_inline bool free_pages_prepare(struct page *page,
1026                                        unsigned int order, bool check_free)
1027{
1028        int bad = 0;
1029
1030        VM_BUG_ON_PAGE(PageTail(page), page);
1031
1032        trace_mm_page_free(page, order);
1033
1034        /*
1035         * Check tail pages before head page information is cleared to
1036         * avoid checking PageCompound for order-0 pages.
1037         */
1038        if (unlikely(order)) {
1039                bool compound = PageCompound(page);
1040                int i;
1041
1042                VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1043
1044                if (compound)
1045                        ClearPageDoubleMap(page);
1046                for (i = 1; i < (1 << order); i++) {
1047                        if (compound)
1048                                bad += free_tail_pages_check(page, page + i);
1049                        if (unlikely(free_pages_check(page + i))) {
1050                                bad++;
1051                                continue;
1052                        }
1053                        (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1054                }
1055        }
1056        if (PageMappingFlags(page))
1057                page->mapping = NULL;
1058        if (memcg_kmem_enabled() && PageKmemcg(page))
1059                memcg_kmem_uncharge(page, order);
1060        if (check_free)
1061                bad += free_pages_check(page);
1062        if (bad)
1063                return false;
1064
1065        page_cpupid_reset_last(page);
1066        page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1067        reset_page_owner(page, order);
1068
1069        if (!PageHighMem(page)) {
1070                debug_check_no_locks_freed(page_address(page),
1071                                           PAGE_SIZE << order);
1072                debug_check_no_obj_freed(page_address(page),
1073                                           PAGE_SIZE << order);
1074        }
1075        arch_free_page(page, order);
1076        kernel_poison_pages(page, 1 << order, 0);
1077        kernel_map_pages(page, 1 << order, 0);
1078        kasan_free_nondeferred_pages(page, order);
1079
1080        return true;
1081}
1082
1083#ifdef CONFIG_DEBUG_VM
1084static inline bool free_pcp_prepare(struct page *page)
1085{
1086        return free_pages_prepare(page, 0, true);
1087}
1088
1089static inline bool bulkfree_pcp_prepare(struct page *page)
1090{
1091        return false;
1092}
1093#else
1094static bool free_pcp_prepare(struct page *page)
1095{
1096        return free_pages_prepare(page, 0, false);
1097}
1098
1099static bool bulkfree_pcp_prepare(struct page *page)
1100{
1101        return free_pages_check(page);
1102}
1103#endif /* CONFIG_DEBUG_VM */
1104
1105static inline void prefetch_buddy(struct page *page)
1106{
1107        unsigned long pfn = page_to_pfn(page);
1108        unsigned long buddy_pfn = __find_buddy_pfn(pfn, 0);
1109        struct page *buddy = page + (buddy_pfn - pfn);
1110
1111        prefetch(buddy);
1112}
1113
1114/*
1115 * Frees a number of pages from the PCP lists
1116 * Assumes all pages on list are in same zone, and of same order.
1117 * count is the number of pages to free.
1118 *
1119 * If the zone was previously in an "all pages pinned" state then look to
1120 * see if this freeing clears that state.
1121 *
1122 * And clear the zone's pages_scanned counter, to hold off the "all pages are
1123 * pinned" detection logic.
1124 */
1125static void free_pcppages_bulk(struct zone *zone, int count,
1126                                        struct per_cpu_pages *pcp)
1127{
1128        int migratetype = 0;
1129        int batch_free = 0;
1130        int prefetch_nr = 0;
1131        bool isolated_pageblocks;
1132        struct page *page, *tmp;
1133        LIST_HEAD(head);
1134
1135        while (count) {
1136                struct list_head *list;
1137
1138                /*
1139                 * Remove pages from lists in a round-robin fashion. A
1140                 * batch_free count is maintained that is incremented when an
1141                 * empty list is encountered.  This is so more pages are freed
1142                 * off fuller lists instead of spinning excessively around empty
1143                 * lists
1144                 */
1145                do {
1146                        batch_free++;
1147                        if (++migratetype == MIGRATE_PCPTYPES)
1148                                migratetype = 0;
1149                        list = &pcp->lists[migratetype];
1150                } while (list_empty(list));
1151
1152                /* This is the only non-empty list. Free them all. */
1153                if (batch_free == MIGRATE_PCPTYPES)
1154                        batch_free = count;
1155
1156                do {
1157                        page = list_last_entry(list, struct page, lru);
1158                        /* must delete to avoid corrupting pcp list */
1159                        list_del(&page->lru);
1160                        pcp->count--;
1161
1162                        if (bulkfree_pcp_prepare(page))
1163                                continue;
1164
1165                        list_add_tail(&page->lru, &head);
1166
1167                        /*
1168                         * We are going to put the page back to the global
1169                         * pool, prefetch its buddy to speed up later access
1170                         * under zone->lock. It is believed the overhead of
1171                         * an additional test and calculating buddy_pfn here
1172                         * can be offset by reduced memory latency later. To
1173                         * avoid excessive prefetching due to large count, only
1174                         * prefetch buddy for the first pcp->batch nr of pages.
1175                         */
1176                        if (prefetch_nr++ < pcp->batch)
1177                                prefetch_buddy(page);
1178                } while (--count && --batch_free && !list_empty(list));
1179        }
1180
1181        spin_lock(&zone->lock);
1182        isolated_pageblocks = has_isolate_pageblock(zone);
1183
1184        /*
1185         * Use safe version since after __free_one_page(),
1186         * page->lru.next will not point to original list.
1187         */
1188        list_for_each_entry_safe(page, tmp, &head, lru) {
1189                int mt = get_pcppage_migratetype(page);
1190                /* MIGRATE_ISOLATE page should not go to pcplists */
1191                VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1192                /* Pageblock could have been isolated meanwhile */
1193                if (unlikely(isolated_pageblocks))
1194                        mt = get_pageblock_migratetype(page);
1195
1196                __free_one_page(page, page_to_pfn(page), zone, 0, mt);
1197                trace_mm_page_pcpu_drain(page, 0, mt);
1198        }
1199        spin_unlock(&zone->lock);
1200}
1201
1202static void free_one_page(struct zone *zone,
1203                                struct page *page, unsigned long pfn,
1204                                unsigned int order,
1205                                int migratetype)
1206{
1207        spin_lock(&zone->lock);
1208        if (unlikely(has_isolate_pageblock(zone) ||
1209                is_migrate_isolate(migratetype))) {
1210                migratetype = get_pfnblock_migratetype(page, pfn);
1211        }
1212        __free_one_page(page, pfn, zone, order, migratetype);
1213        spin_unlock(&zone->lock);
1214}
1215
1216static void __meminit __init_single_page(struct page *page, unsigned long pfn,
1217                                unsigned long zone, int nid)
1218{
1219        mm_zero_struct_page(page);
1220        set_page_links(page, zone, nid, pfn);
1221        init_page_count(page);
1222        page_mapcount_reset(page);
1223        page_cpupid_reset_last(page);
1224        page_kasan_tag_reset(page);
1225
1226        INIT_LIST_HEAD(&page->lru);
1227#ifdef WANT_PAGE_VIRTUAL
1228        /* The shift won't overflow because ZONE_NORMAL is below 4G. */
1229        if (!is_highmem_idx(zone))
1230                set_page_address(page, __va(pfn << PAGE_SHIFT));
1231#endif
1232}
1233
1234#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1235static void __meminit init_reserved_page(unsigned long pfn)
1236{
1237        pg_data_t *pgdat;
1238        int nid, zid;
1239
1240        if (!early_page_uninitialised(pfn))
1241                return;
1242
1243        nid = early_pfn_to_nid(pfn);
1244        pgdat = NODE_DATA(nid);
1245
1246        for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1247                struct zone *zone = &pgdat->node_zones[zid];
1248
1249                if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
1250                        break;
1251        }
1252        __init_single_page(pfn_to_page(pfn), pfn, zid, nid);
1253}
1254#else
1255static inline void init_reserved_page(unsigned long pfn)
1256{
1257}
1258#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1259
1260/*
1261 * Initialised pages do not have PageReserved set. This function is
1262 * called for each range allocated by the bootmem allocator and
1263 * marks the pages PageReserved. The remaining valid pages are later
1264 * sent to the buddy page allocator.
1265 */
1266void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
1267{
1268        unsigned long start_pfn = PFN_DOWN(start);
1269        unsigned long end_pfn = PFN_UP(end);
1270
1271        for (; start_pfn < end_pfn; start_pfn++) {
1272                if (pfn_valid(start_pfn)) {
1273                        struct page *page = pfn_to_page(start_pfn);
1274
1275                        init_reserved_page(start_pfn);
1276
1277                        /* Avoid false-positive PageTail() */
1278                        INIT_LIST_HEAD(&page->lru);
1279
1280                        /*
1281                         * no need for atomic set_bit because the struct
1282                         * page is not visible yet so nobody should
1283                         * access it yet.
1284                         */
1285                        __SetPageReserved(page);
1286                }
1287        }
1288}
1289
1290static void __free_pages_ok(struct page *page, unsigned int order)
1291{
1292        unsigned long flags;
1293        int migratetype;
1294        unsigned long pfn = page_to_pfn(page);
1295
1296        if (!free_pages_prepare(page, order, true))
1297                return;
1298
1299        migratetype = get_pfnblock_migratetype(page, pfn);
1300        local_irq_save(flags);
1301        __count_vm_events(PGFREE, 1 << order);
1302        free_one_page(page_zone(page), page, pfn, order, migratetype);
1303        local_irq_restore(flags);
1304}
1305
1306static void __init __free_pages_boot_core(struct page *page, unsigned int order)
1307{
1308        unsigned int nr_pages = 1 << order;
1309        struct page *p = page;
1310        unsigned int loop;
1311
1312        prefetchw(p);
1313        for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1314                prefetchw(p + 1);
1315                __ClearPageReserved(p);
1316                set_page_count(p, 0);
1317        }
1318        __ClearPageReserved(p);
1319        set_page_count(p, 0);
1320
1321        atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
1322        set_page_refcounted(page);
1323        __free_pages(page, order);
1324}
1325
1326#if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
1327        defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
1328
1329static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
1330
1331int __meminit early_pfn_to_nid(unsigned long pfn)
1332{
1333        static DEFINE_SPINLOCK(early_pfn_lock);
1334        int nid;
1335
1336        spin_lock(&early_pfn_lock);
1337        nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
1338        if (nid < 0)
1339                nid = first_online_node;
1340        spin_unlock(&early_pfn_lock);
1341
1342        return nid;
1343}
1344#endif
1345
1346#ifdef CONFIG_NODES_SPAN_OTHER_NODES
1347static inline bool __meminit __maybe_unused
1348meminit_pfn_in_nid(unsigned long pfn, int node,
1349                   struct mminit_pfnnid_cache *state)
1350{
1351        int nid;
1352
1353        nid = __early_pfn_to_nid(pfn, state);
1354        if (nid >= 0 && nid != node)
1355                return false;
1356        return true;
1357}
1358
1359/* Only safe to use early in boot when initialisation is single-threaded */
1360static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1361{
1362        return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache);
1363}
1364
1365#else
1366
1367static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
1368{
1369        return true;
1370}
1371static inline bool __meminit  __maybe_unused
1372meminit_pfn_in_nid(unsigned long pfn, int node,
1373                   struct mminit_pfnnid_cache *state)
1374{
1375        return true;
1376}
1377#endif
1378
1379
1380void __init memblock_free_pages(struct page *page, unsigned long pfn,
1381                                                        unsigned int order)
1382{
1383        if (early_page_uninitialised(pfn))
1384                return;
1385        return __free_pages_boot_core(page, order);
1386}
1387
1388/*
1389 * Check that the whole (or subset of) a pageblock given by the interval of
1390 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1391 * with the migration of free compaction scanner. The scanners then need to
1392 * use only pfn_valid_within() check for arches that allow holes within
1393 * pageblocks.
1394 *
1395 * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1396 *
1397 * It's possible on some configurations to have a setup like node0 node1 node0
1398 * i.e. it's possible that all pages within a zones range of pages do not
1399 * belong to a single zone. We assume that a border between node0 and node1
1400 * can occur within a single pageblock, but not a node0 node1 node0
1401 * interleaving within a single pageblock. It is therefore sufficient to check
1402 * the first and last page of a pageblock and avoid checking each individual
1403 * page in a pageblock.
1404 */
1405struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1406                                     unsigned long end_pfn, struct zone *zone)
1407{
1408        struct page *start_page;
1409        struct page *end_page;
1410
1411        /* end_pfn is one past the range we are checking */
1412        end_pfn--;
1413
1414        if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
1415                return NULL;
1416
1417        start_page = pfn_to_online_page(start_pfn);
1418        if (!start_page)
1419                return NULL;
1420
1421        if (page_zone(start_page) != zone)
1422                return NULL;
1423
1424        end_page = pfn_to_page(end_pfn);
1425
1426        /* This gives a shorter code than deriving page_zone(end_page) */
1427        if (page_zone_id(start_page) != page_zone_id(end_page))
1428                return NULL;
1429
1430        return start_page;
1431}
1432
1433void set_zone_contiguous(struct zone *zone)
1434{
1435        unsigned long block_start_pfn = zone->zone_start_pfn;
1436        unsigned long block_end_pfn;
1437
1438        block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
1439        for (; block_start_pfn < zone_end_pfn(zone);
1440                        block_start_pfn = block_end_pfn,
1441                         block_end_pfn += pageblock_nr_pages) {
1442
1443                block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
1444
1445                if (!__pageblock_pfn_to_page(block_start_pfn,
1446                                             block_end_pfn, zone))
1447                        return;
1448        }
1449
1450        /* We confirm that there is no hole */
1451        zone->contiguous = true;
1452}
1453
1454void clear_zone_contiguous(struct zone *zone)
1455{
1456        zone->contiguous = false;
1457}
1458
1459#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1460static void __init deferred_free_range(unsigned long pfn,
1461                                       unsigned long nr_pages)
1462{
1463        struct page *page;
1464        unsigned long i;
1465
1466        if (!nr_pages)
1467                return;
1468
1469        page = pfn_to_page(pfn);
1470
1471        /* Free a large naturally-aligned chunk if possible */
1472        if (nr_pages == pageblock_nr_pages &&
1473            (pfn & (pageblock_nr_pages - 1)) == 0) {
1474                set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1475                __free_pages_boot_core(page, pageblock_order);
1476                return;
1477        }
1478
1479        for (i = 0; i < nr_pages; i++, page++, pfn++) {
1480                if ((pfn & (pageblock_nr_pages - 1)) == 0)
1481                        set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1482                __free_pages_boot_core(page, 0);
1483        }
1484}
1485
1486/* Completion tracking for deferred_init_memmap() threads */
1487static atomic_t pgdat_init_n_undone __initdata;
1488static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1489
1490static inline void __init pgdat_init_report_one_done(void)
1491{
1492        if (atomic_dec_and_test(&pgdat_init_n_undone))
1493                complete(&pgdat_init_all_done_comp);
1494}
1495
1496/*
1497 * Returns true if page needs to be initialized or freed to buddy allocator.
1498 *
1499 * First we check if pfn is valid on architectures where it is possible to have
1500 * holes within pageblock_nr_pages. On systems where it is not possible, this
1501 * function is optimized out.
1502 *
1503 * Then, we check if a current large page is valid by only checking the validity
1504 * of the head pfn.
1505 *
1506 * Finally, meminit_pfn_in_nid is checked on systems where pfns can interleave
1507 * within a node: a pfn is between start and end of a node, but does not belong
1508 * to this memory node.
1509 */
1510static inline bool __init
1511deferred_pfn_valid(int nid, unsigned long pfn,
1512                   struct mminit_pfnnid_cache *nid_init_state)
1513{
1514        if (!pfn_valid_within(pfn))
1515                return false;
1516        if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn))
1517                return false;
1518        if (!meminit_pfn_in_nid(pfn, nid, nid_init_state))
1519                return false;
1520        return true;
1521}
1522
1523/*
1524 * Free pages to buddy allocator. Try to free aligned pages in
1525 * pageblock_nr_pages sizes.
1526 */
1527static void __init deferred_free_pages(int nid, int zid, unsigned long pfn,
1528                                       unsigned long end_pfn)
1529{
1530        struct mminit_pfnnid_cache nid_init_state = { };
1531        unsigned long nr_pgmask = pageblock_nr_pages - 1;
1532        unsigned long nr_free = 0;
1533
1534        for (; pfn < end_pfn; pfn++) {
1535                if (!deferred_pfn_valid(nid, pfn, &nid_init_state)) {
1536                        deferred_free_range(pfn - nr_free, nr_free);
1537                        nr_free = 0;
1538                } else if (!(pfn & nr_pgmask)) {
1539                        deferred_free_range(pfn - nr_free, nr_free);
1540                        nr_free = 1;
1541                        touch_nmi_watchdog();
1542                } else {
1543                        nr_free++;
1544                }
1545        }
1546        /* Free the last block of pages to allocator */
1547        deferred_free_range(pfn - nr_free, nr_free);
1548}
1549
1550/*
1551 * Initialize struct pages.  We minimize pfn page lookups and scheduler checks
1552 * by performing it only once every pageblock_nr_pages.
1553 * Return number of pages initialized.
1554 */
1555static unsigned long  __init deferred_init_pages(int nid, int zid,
1556                                                 unsigned long pfn,
1557                                                 unsigned long end_pfn)
1558{
1559        struct mminit_pfnnid_cache nid_init_state = { };
1560        unsigned long nr_pgmask = pageblock_nr_pages - 1;
1561        unsigned long nr_pages = 0;
1562        struct page *page = NULL;
1563
1564        for (; pfn < end_pfn; pfn++) {
1565                if (!deferred_pfn_valid(nid, pfn, &nid_init_state)) {
1566                        page = NULL;
1567                        continue;
1568                } else if (!page || !(pfn & nr_pgmask)) {
1569                        page = pfn_to_page(pfn);
1570                        touch_nmi_watchdog();
1571                } else {
1572                        page++;
1573                }
1574                __init_single_page(page, pfn, zid, nid);
1575                nr_pages++;
1576        }
1577        return (nr_pages);
1578}
1579
1580/* Initialise remaining memory on a node */
1581static int __init deferred_init_memmap(void *data)
1582{
1583        pg_data_t *pgdat = data;
1584        int nid = pgdat->node_id;
1585        unsigned long start = jiffies;
1586        unsigned long nr_pages = 0;
1587        unsigned long spfn, epfn, first_init_pfn, flags;
1588        phys_addr_t spa, epa;
1589        int zid;
1590        struct zone *zone;
1591        const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1592        u64 i;
1593
1594        /* Bind memory initialisation thread to a local node if possible */
1595        if (!cpumask_empty(cpumask))
1596                set_cpus_allowed_ptr(current, cpumask);
1597
1598        pgdat_resize_lock(pgdat, &flags);
1599        first_init_pfn = pgdat->first_deferred_pfn;
1600        if (first_init_pfn == ULONG_MAX) {
1601                pgdat_resize_unlock(pgdat, &flags);
1602                pgdat_init_report_one_done();
1603                return 0;
1604        }
1605
1606        /* Sanity check boundaries */
1607        BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
1608        BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
1609        pgdat->first_deferred_pfn = ULONG_MAX;
1610
1611        /* Only the highest zone is deferred so find it */
1612        for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1613                zone = pgdat->node_zones + zid;
1614                if (first_init_pfn < zone_end_pfn(zone))
1615                        break;
1616        }
1617        first_init_pfn = max(zone->zone_start_pfn, first_init_pfn);
1618
1619        /*
1620         * Initialize and free pages. We do it in two loops: first we initialize
1621         * struct page, than free to buddy allocator, because while we are
1622         * freeing pages we can access pages that are ahead (computing buddy
1623         * page in __free_one_page()).
1624         */
1625        for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
1626                spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
1627                epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa));
1628                nr_pages += deferred_init_pages(nid, zid, spfn, epfn);
1629        }
1630        for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
1631                spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
1632                epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa));
1633                deferred_free_pages(nid, zid, spfn, epfn);
1634        }
1635        pgdat_resize_unlock(pgdat, &flags);
1636
1637        /* Sanity check that the next zone really is unpopulated */
1638        WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
1639
1640        pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
1641                                        jiffies_to_msecs(jiffies - start));
1642
1643        pgdat_init_report_one_done();
1644        return 0;
1645}
1646
1647/*
1648 * If this zone has deferred pages, try to grow it by initializing enough
1649 * deferred pages to satisfy the allocation specified by order, rounded up to
1650 * the nearest PAGES_PER_SECTION boundary.  So we're adding memory in increments
1651 * of SECTION_SIZE bytes by initializing struct pages in increments of
1652 * PAGES_PER_SECTION * sizeof(struct page) bytes.
1653 *
1654 * Return true when zone was grown, otherwise return false. We return true even
1655 * when we grow less than requested, to let the caller decide if there are
1656 * enough pages to satisfy the allocation.
1657 *
1658 * Note: We use noinline because this function is needed only during boot, and
1659 * it is called from a __ref function _deferred_grow_zone. This way we are
1660 * making sure that it is not inlined into permanent text section.
1661 */
1662static noinline bool __init
1663deferred_grow_zone(struct zone *zone, unsigned int order)
1664{
1665        int zid = zone_idx(zone);
1666        int nid = zone_to_nid(zone);
1667        pg_data_t *pgdat = NODE_DATA(nid);
1668        unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
1669        unsigned long nr_pages = 0;
1670        unsigned long first_init_pfn, spfn, epfn, t, flags;
1671        unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
1672        phys_addr_t spa, epa;
1673        u64 i;
1674
1675        /* Only the last zone may have deferred pages */
1676        if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
1677                return false;
1678
1679        pgdat_resize_lock(pgdat, &flags);
1680
1681        /*
1682         * If deferred pages have been initialized while we were waiting for
1683         * the lock, return true, as the zone was grown.  The caller will retry
1684         * this zone.  We won't return to this function since the caller also
1685         * has this static branch.
1686         */
1687        if (!static_branch_unlikely(&deferred_pages)) {
1688                pgdat_resize_unlock(pgdat, &flags);
1689                return true;
1690        }
1691
1692        /*
1693         * If someone grew this zone while we were waiting for spinlock, return
1694         * true, as there might be enough pages already.
1695         */
1696        if (first_deferred_pfn != pgdat->first_deferred_pfn) {
1697                pgdat_resize_unlock(pgdat, &flags);
1698                return true;
1699        }
1700
1701        first_init_pfn = max(zone->zone_start_pfn, first_deferred_pfn);
1702
1703        if (first_init_pfn >= pgdat_end_pfn(pgdat)) {
1704                pgdat_resize_unlock(pgdat, &flags);
1705                return false;
1706        }
1707
1708        for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
1709                spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
1710                epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa));
1711
1712                while (spfn < epfn && nr_pages < nr_pages_needed) {
1713                        t = ALIGN(spfn + PAGES_PER_SECTION, PAGES_PER_SECTION);
1714                        first_deferred_pfn = min(t, epfn);
1715                        nr_pages += deferred_init_pages(nid, zid, spfn,
1716                                                        first_deferred_pfn);
1717                        spfn = first_deferred_pfn;
1718                }
1719
1720                if (nr_pages >= nr_pages_needed)
1721                        break;
1722        }
1723
1724        for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
1725                spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
1726                epfn = min_t(unsigned long, first_deferred_pfn, PFN_DOWN(epa));
1727                deferred_free_pages(nid, zid, spfn, epfn);
1728
1729                if (first_deferred_pfn == epfn)
1730                        break;
1731        }
1732        pgdat->first_deferred_pfn = first_deferred_pfn;
1733        pgdat_resize_unlock(pgdat, &flags);
1734
1735        return nr_pages > 0;
1736}
1737
1738/*
1739 * deferred_grow_zone() is __init, but it is called from
1740 * get_page_from_freelist() during early boot until deferred_pages permanently
1741 * disables this call. This is why we have refdata wrapper to avoid warning,
1742 * and to ensure that the function body gets unloaded.
1743 */
1744static bool __ref
1745_deferred_grow_zone(struct zone *zone, unsigned int order)
1746{
1747        return deferred_grow_zone(zone, order);
1748}
1749
1750#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1751
1752void __init page_alloc_init_late(void)
1753{
1754        struct zone *zone;
1755
1756#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1757        int nid;
1758
1759        /* There will be num_node_state(N_MEMORY) threads */
1760        atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
1761        for_each_node_state(nid, N_MEMORY) {
1762                kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
1763        }
1764
1765        /* Block until all are initialised */
1766        wait_for_completion(&pgdat_init_all_done_comp);
1767
1768        /*
1769         * We initialized the rest of the deferred pages.  Permanently disable
1770         * on-demand struct page initialization.
1771         */
1772        static_branch_disable(&deferred_pages);
1773
1774        /* Reinit limits that are based on free pages after the kernel is up */
1775        files_maxfiles_init();
1776#endif
1777#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
1778        /* Discard memblock private memory */
1779        memblock_discard();
1780#endif
1781
1782        for_each_populated_zone(zone)
1783                set_zone_contiguous(zone);
1784}
1785
1786#ifdef CONFIG_CMA
1787/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
1788void __init init_cma_reserved_pageblock(struct page *page)
1789{
1790        unsigned i = pageblock_nr_pages;
1791        struct page *p = page;
1792
1793        do {
1794                __ClearPageReserved(p);
1795                set_page_count(p, 0);
1796        } while (++p, --i);
1797
1798        set_pageblock_migratetype(page, MIGRATE_CMA);
1799
1800        if (pageblock_order >= MAX_ORDER) {
1801                i = pageblock_nr_pages;
1802                p = page;
1803                do {
1804                        set_page_refcounted(p);
1805                        __free_pages(p, MAX_ORDER - 1);
1806                        p += MAX_ORDER_NR_PAGES;
1807                } while (i -= MAX_ORDER_NR_PAGES);
1808        } else {
1809                set_page_refcounted(page);
1810                __free_pages(page, pageblock_order);
1811        }
1812
1813        adjust_managed_page_count(page, pageblock_nr_pages);
1814}
1815#endif
1816
1817/*
1818 * The order of subdivision here is critical for the IO subsystem.
1819 * Please do not alter this order without good reasons and regression
1820 * testing. Specifically, as large blocks of memory are subdivided,
1821 * the order in which smaller blocks are delivered depends on the order
1822 * they're subdivided in this function. This is the primary factor
1823 * influencing the order in which pages are delivered to the IO
1824 * subsystem according to empirical testing, and this is also justified
1825 * by considering the behavior of a buddy system containing a single
1826 * large block of memory acted on by a series of small allocations.
1827 * This behavior is a critical factor in sglist merging's success.
1828 *
1829 * -- nyc
1830 */
1831static inline void expand(struct zone *zone, struct page *page,
1832        int low, int high, struct free_area *area,
1833        int migratetype)
1834{
1835        unsigned long size = 1 << high;
1836
1837        while (high > low) {
1838                area--;
1839                high--;
1840                size >>= 1;
1841                VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
1842
1843                /*
1844                 * Mark as guard pages (or page), that will allow to
1845                 * merge back to allocator when buddy will be freed.
1846                 * Corresponding page table entries will not be touched,
1847                 * pages will stay not present in virtual address space
1848                 */
1849                if (set_page_guard(zone, &page[size], high, migratetype))
1850                        continue;
1851
1852                list_add(&page[size].lru, &area->free_list[migratetype]);
1853                area->nr_free++;
1854                set_page_order(&page[size], high);
1855        }
1856}
1857
1858static void check_new_page_bad(struct page *page)
1859{
1860        const char *bad_reason = NULL;
1861        unsigned long bad_flags = 0;
1862
1863        if (unlikely(atomic_read(&page->_mapcount) != -1))
1864                bad_reason = "nonzero mapcount";
1865        if (unlikely(page->mapping != NULL))
1866                bad_reason = "non-NULL mapping";
1867        if (unlikely(page_ref_count(page) != 0))
1868                bad_reason = "nonzero _count";
1869        if (unlikely(page->flags & __PG_HWPOISON)) {
1870                bad_reason = "HWPoisoned (hardware-corrupted)";
1871                bad_flags = __PG_HWPOISON;
1872                /* Don't complain about hwpoisoned pages */
1873                page_mapcount_reset(page); /* remove PageBuddy */
1874                return;
1875        }
1876        if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
1877                bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
1878                bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
1879        }
1880#ifdef CONFIG_MEMCG
1881        if (unlikely(page->mem_cgroup))
1882                bad_reason = "page still charged to cgroup";
1883#endif
1884        bad_page(page, bad_reason, bad_flags);
1885}
1886
1887/*
1888 * This page is about to be returned from the page allocator
1889 */
1890static inline int check_new_page(struct page *page)
1891{
1892        if (likely(page_expected_state(page,
1893                                PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
1894                return 0;
1895
1896        check_new_page_bad(page);
1897        return 1;
1898}
1899
1900static inline bool free_pages_prezeroed(void)
1901{
1902        return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
1903                page_poisoning_enabled();
1904}
1905
1906#ifdef CONFIG_DEBUG_VM
1907static bool check_pcp_refill(struct page *page)
1908{
1909        return false;
1910}
1911
1912static bool check_new_pcp(struct page *page)
1913{
1914        return check_new_page(page);
1915}
1916#else
1917static bool check_pcp_refill(struct page *page)
1918{
1919        return check_new_page(page);
1920}
1921static bool check_new_pcp(struct page *page)
1922{
1923        return false;
1924}
1925#endif /* CONFIG_DEBUG_VM */
1926
1927static bool check_new_pages(struct page *page, unsigned int order)
1928{
1929        int i;
1930        for (i = 0; i < (1 << order); i++) {
1931                struct page *p = page + i;
1932
1933                if (unlikely(check_new_page(p)))
1934                        return true;
1935        }
1936
1937        return false;
1938}
1939
1940inline void post_alloc_hook(struct page *page, unsigned int order,
1941                                gfp_t gfp_flags)
1942{
1943        set_page_private(page, 0);
1944        set_page_refcounted(page);
1945
1946        arch_alloc_page(page, order);
1947        kernel_map_pages(page, 1 << order, 1);
1948        kernel_poison_pages(page, 1 << order, 1);
1949        kasan_alloc_pages(page, order);
1950        set_page_owner(page, order, gfp_flags);
1951}
1952
1953static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
1954                                                        unsigned int alloc_flags)
1955{
1956        int i;
1957
1958        post_alloc_hook(page, order, gfp_flags);
1959
1960        if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO))
1961                for (i = 0; i < (1 << order); i++)
1962                        clear_highpage(page + i);
1963
1964        if (order && (gfp_flags & __GFP_COMP))
1965                prep_compound_page(page, order);
1966
1967        /*
1968         * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
1969         * allocate the page. The expectation is that the caller is taking
1970         * steps that will free more memory. The caller should avoid the page
1971         * being used for !PFMEMALLOC purposes.
1972         */
1973        if (alloc_flags & ALLOC_NO_WATERMARKS)
1974                set_page_pfmemalloc(page);
1975        else
1976                clear_page_pfmemalloc(page);
1977}
1978
1979/*
1980 * Go through the free lists for the given migratetype and remove
1981 * the smallest available page from the freelists
1982 */
1983static __always_inline
1984struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
1985                                                int migratetype)
1986{
1987        unsigned int current_order;
1988        struct free_area *area;
1989        struct page *page;
1990
1991        /* Find a page of the appropriate size in the preferred list */
1992        for (current_order = order; current_order < MAX_ORDER; ++current_order) {
1993                area = &(zone->free_area[current_order]);
1994                page = list_first_entry_or_null(&area->free_list[migratetype],
1995                                                        struct page, lru);
1996                if (!page)
1997                        continue;
1998                list_del(&page->lru);
1999                rmv_page_order(page);
2000                area->nr_free--;
2001                expand(zone, page, order, current_order, area, migratetype);
2002                set_pcppage_migratetype(page, migratetype);
2003                return page;
2004        }
2005
2006        return NULL;
2007}
2008
2009
2010/*
2011 * This array describes the order lists are fallen back to when
2012 * the free lists for the desirable migrate type are depleted
2013 */
2014static int fallbacks[MIGRATE_TYPES][4] = {
2015        [MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_TYPES },
2016        [MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
2017        [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_TYPES },
2018#ifdef CONFIG_CMA
2019        [MIGRATE_CMA]         = { MIGRATE_TYPES }, /* Never used */
2020#endif
2021#ifdef CONFIG_MEMORY_ISOLATION
2022        [MIGRATE_ISOLATE]     = { MIGRATE_TYPES }, /* Never used */
2023#endif
2024};
2025
2026#ifdef CONFIG_CMA
2027static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2028                                        unsigned int order)
2029{
2030        return __rmqueue_smallest(zone, order, MIGRATE_CMA);
2031}
2032#else
2033static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2034                                        unsigned int order) { return NULL; }
2035#endif
2036
2037/*
2038 * Move the free pages in a range to the free lists of the requested type.
2039 * Note that start_page and end_pages are not aligned on a pageblock
2040 * boundary. If alignment is required, use move_freepages_block()
2041 */
2042static int move_freepages(struct zone *zone,
2043                          struct page *start_page, struct page *end_page,
2044                          int migratetype, int *num_movable)
2045{
2046        struct page *page;
2047        unsigned int order;
2048        int pages_moved = 0;
2049
2050#ifndef CONFIG_HOLES_IN_ZONE
2051        /*
2052         * page_zone is not safe to call in this context when
2053         * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
2054         * anyway as we check zone boundaries in move_freepages_block().
2055         * Remove at a later date when no bug reports exist related to
2056         * grouping pages by mobility
2057         */
2058        VM_BUG_ON(pfn_valid(page_to_pfn(start_page)) &&
2059                  pfn_valid(page_to_pfn(end_page)) &&
2060                  page_zone(start_page) != page_zone(end_page));
2061#endif
2062        for (page = start_page; page <= end_page;) {
2063                if (!pfn_valid_within(page_to_pfn(page))) {
2064                        page++;
2065                        continue;
2066                }
2067
2068                /* Make sure we are not inadvertently changing nodes */
2069                VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
2070
2071                if (!PageBuddy(page)) {
2072                        /*
2073                         * We assume that pages that could be isolated for
2074                         * migration are movable. But we don't actually try
2075                         * isolating, as that would be expensive.
2076                         */
2077                        if (num_movable &&
2078                                        (PageLRU(page) || __PageMovable(page)))
2079                                (*num_movable)++;
2080
2081                        page++;
2082                        continue;
2083                }
2084
2085                order = page_order(page);
2086                list_move(&page->lru,
2087                          &zone->free_area[order].free_list[migratetype]);
2088                page += 1 << order;
2089                pages_moved += 1 << order;
2090        }
2091
2092        return pages_moved;
2093}
2094
2095int move_freepages_block(struct zone *zone, struct page *page,
2096                                int migratetype, int *num_movable)
2097{
2098        unsigned long start_pfn, end_pfn;
2099        struct page *start_page, *end_page;
2100
2101        if (num_movable)
2102                *num_movable = 0;
2103
2104        start_pfn = page_to_pfn(page);
2105        start_pfn = start_pfn & ~(pageblock_nr_pages-1);
2106        start_page = pfn_to_page(start_pfn);
2107        end_page = start_page + pageblock_nr_pages - 1;
2108        end_pfn = start_pfn + pageblock_nr_pages - 1;
2109
2110        /* Do not cross zone boundaries */
2111        if (!zone_spans_pfn(zone, start_pfn))
2112                start_page = page;
2113        if (!zone_spans_pfn(zone, end_pfn))
2114                return 0;
2115
2116        return move_freepages(zone, start_page, end_page, migratetype,
2117                                                                num_movable);
2118}
2119
2120static void change_pageblock_range(struct page *pageblock_page,
2121                                        int start_order, int migratetype)
2122{
2123        int nr_pageblocks = 1 << (start_order - pageblock_order);
2124
2125        while (nr_pageblocks--) {
2126                set_pageblock_migratetype(pageblock_page, migratetype);
2127                pageblock_page += pageblock_nr_pages;
2128        }
2129}
2130
2131/*
2132 * When we are falling back to another migratetype during allocation, try to
2133 * steal extra free pages from the same pageblocks to satisfy further
2134 * allocations, instead of polluting multiple pageblocks.
2135 *
2136 * If we are stealing a relatively large buddy page, it is likely there will
2137 * be more free pages in the pageblock, so try to steal them all. For
2138 * reclaimable and unmovable allocations, we steal regardless of page size,
2139 * as fragmentation caused by those allocations polluting movable pageblocks
2140 * is worse than movable allocations stealing from unmovable and reclaimable
2141 * pageblocks.
2142 */
2143static bool can_steal_fallback(unsigned int order, int start_mt)
2144{
2145        /*
2146         * Leaving this order check is intended, although there is
2147         * relaxed order check in next check. The reason is that
2148         * we can actually steal whole pageblock if this condition met,
2149         * but, below check doesn't guarantee it and that is just heuristic
2150         * so could be changed anytime.
2151         */
2152        if (order >= pageblock_order)
2153                return true;
2154
2155        if (order >= pageblock_order / 2 ||
2156                start_mt == MIGRATE_RECLAIMABLE ||
2157                start_mt == MIGRATE_UNMOVABLE ||
2158                page_group_by_mobility_disabled)
2159                return true;
2160
2161        return false;
2162}
2163
2164static inline void boost_watermark(struct zone *zone)
2165{
2166        unsigned long max_boost;
2167
2168        if (!watermark_boost_factor)
2169                return;
2170
2171        max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
2172                        watermark_boost_factor, 10000);
2173
2174        /*
2175         * high watermark may be uninitialised if fragmentation occurs
2176         * very early in boot so do not boost. We do not fall
2177         * through and boost by pageblock_nr_pages as failing
2178         * allocations that early means that reclaim is not going
2179         * to help and it may even be impossible to reclaim the
2180         * boosted watermark resulting in a hang.
2181         */
2182        if (!max_boost)
2183                return;
2184
2185        max_boost = max(pageblock_nr_pages, max_boost);
2186
2187        zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
2188                max_boost);
2189}
2190
2191/*
2192 * This function implements actual steal behaviour. If order is large enough,
2193 * we can steal whole pageblock. If not, we first move freepages in this
2194 * pageblock to our migratetype and determine how many already-allocated pages
2195 * are there in the pageblock with a compatible migratetype. If at least half
2196 * of pages are free or compatible, we can change migratetype of the pageblock
2197 * itself, so pages freed in the future will be put on the correct free list.
2198 */
2199static void steal_suitable_fallback(struct zone *zone, struct page *page,
2200                unsigned int alloc_flags, int start_type, bool whole_block)
2201{
2202        unsigned int current_order = page_order(page);
2203        struct free_area *area;
2204        int free_pages, movable_pages, alike_pages;
2205        int old_block_type;
2206
2207        old_block_type = get_pageblock_migratetype(page);
2208
2209        /*
2210         * This can happen due to races and we want to prevent broken
2211         * highatomic accounting.
2212         */
2213        if (is_migrate_highatomic(old_block_type))
2214                goto single_page;
2215
2216        /* Take ownership for orders >= pageblock_order */
2217        if (current_order >= pageblock_order) {
2218                change_pageblock_range(page, current_order, start_type);
2219                goto single_page;
2220        }
2221
2222        /*
2223         * Boost watermarks to increase reclaim pressure to reduce the
2224         * likelihood of future fallbacks. Wake kswapd now as the node
2225         * may be balanced overall and kswapd will not wake naturally.
2226         */
2227        boost_watermark(zone);
2228        if (alloc_flags & ALLOC_KSWAPD)
2229                set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
2230
2231        /* We are not allowed to try stealing from the whole block */
2232        if (!whole_block)
2233                goto single_page;
2234
2235        free_pages = move_freepages_block(zone, page, start_type,
2236                                                &movable_pages);
2237        /*
2238         * Determine how many pages are compatible with our allocation.
2239         * For movable allocation, it's the number of movable pages which
2240         * we just obtained. For other types it's a bit more tricky.
2241         */
2242        if (start_type == MIGRATE_MOVABLE) {
2243                alike_pages = movable_pages;
2244        } else {
2245                /*
2246                 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
2247                 * to MOVABLE pageblock, consider all non-movable pages as
2248                 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
2249                 * vice versa, be conservative since we can't distinguish the
2250                 * exact migratetype of non-movable pages.
2251                 */
2252                if (old_block_type == MIGRATE_MOVABLE)
2253                        alike_pages = pageblock_nr_pages
2254                                                - (free_pages + movable_pages);
2255                else
2256                        alike_pages = 0;
2257        }
2258
2259        /* moving whole block can fail due to zone boundary conditions */
2260        if (!free_pages)
2261                goto single_page;
2262
2263        /*
2264         * If a sufficient number of pages in the block are either free or of
2265         * comparable migratability as our allocation, claim the whole block.
2266         */
2267        if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
2268                        page_group_by_mobility_disabled)
2269                set_pageblock_migratetype(page, start_type);
2270
2271        return;
2272
2273single_page:
2274        area = &zone->free_area[current_order];
2275        list_move(&page->lru, &area->free_list[start_type]);
2276}
2277
2278/*
2279 * Check whether there is a suitable fallback freepage with requested order.
2280 * If only_stealable is true, this function returns fallback_mt only if
2281 * we can steal other freepages all together. This would help to reduce
2282 * fragmentation due to mixed migratetype pages in one pageblock.
2283 */
2284int find_suitable_fallback(struct free_area *area, unsigned int order,
2285                        int migratetype, bool only_stealable, bool *can_steal)
2286{
2287        int i;
2288        int fallback_mt;
2289
2290        if (area->nr_free == 0)
2291                return -1;
2292
2293        *can_steal = false;
2294        for (i = 0;; i++) {
2295                fallback_mt = fallbacks[migratetype][i];
2296                if (fallback_mt == MIGRATE_TYPES)
2297                        break;
2298
2299                if (list_empty(&area->free_list[fallback_mt]))
2300                        continue;
2301
2302                if (can_steal_fallback(order, migratetype))
2303                        *can_steal = true;
2304
2305                if (!only_stealable)
2306                        return fallback_mt;
2307
2308                if (*can_steal)
2309                        return fallback_mt;
2310        }
2311
2312        return -1;
2313}
2314
2315/*
2316 * Reserve a pageblock for exclusive use of high-order atomic allocations if
2317 * there are no empty page blocks that contain a page with a suitable order
2318 */
2319static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
2320                                unsigned int alloc_order)
2321{
2322        int mt;
2323        unsigned long max_managed, flags;
2324
2325        /*
2326         * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
2327         * Check is race-prone but harmless.
2328         */
2329        max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages;
2330        if (zone->nr_reserved_highatomic >= max_managed)
2331                return;
2332
2333        spin_lock_irqsave(&zone->lock, flags);
2334
2335        /* Recheck the nr_reserved_highatomic limit under the lock */
2336        if (zone->nr_reserved_highatomic >= max_managed)
2337                goto out_unlock;
2338
2339        /* Yoink! */
2340        mt = get_pageblock_migratetype(page);
2341        if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt)
2342            && !is_migrate_cma(mt)) {
2343                zone->nr_reserved_highatomic += pageblock_nr_pages;
2344                set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
2345                move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
2346        }
2347
2348out_unlock:
2349        spin_unlock_irqrestore(&zone->lock, flags);
2350}
2351
2352/*
2353 * Used when an allocation is about to fail under memory pressure. This
2354 * potentially hurts the reliability of high-order allocations when under
2355 * intense memory pressure but failed atomic allocations should be easier
2356 * to recover from than an OOM.
2357 *
2358 * If @force is true, try to unreserve a pageblock even though highatomic
2359 * pageblock is exhausted.
2360 */
2361static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2362                                                bool force)
2363{
2364        struct zonelist *zonelist = ac->zonelist;
2365        unsigned long flags;
2366        struct zoneref *z;
2367        struct zone *zone;
2368        struct page *page;
2369        int order;
2370        bool ret;
2371
2372        for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
2373                                                                ac->nodemask) {
2374                /*
2375                 * Preserve at least one pageblock unless memory pressure
2376                 * is really high.
2377                 */
2378                if (!force && zone->nr_reserved_highatomic <=
2379                                        pageblock_nr_pages)
2380                        continue;
2381
2382                spin_lock_irqsave(&zone->lock, flags);
2383                for (order = 0; order < MAX_ORDER; order++) {
2384                        struct free_area *area = &(zone->free_area[order]);
2385
2386                        page = list_first_entry_or_null(
2387                                        &area->free_list[MIGRATE_HIGHATOMIC],
2388                                        struct page, lru);
2389                        if (!page)
2390                                continue;
2391
2392                        /*
2393                         * In page freeing path, migratetype change is racy so
2394                         * we can counter several free pages in a pageblock
2395                         * in this loop althoug we changed the pageblock type
2396                         * from highatomic to ac->migratetype. So we should
2397                         * adjust the count once.
2398                         */
2399                        if (is_migrate_highatomic_page(page)) {
2400                                /*
2401                                 * It should never happen but changes to
2402                                 * locking could inadvertently allow a per-cpu
2403                                 * drain to add pages to MIGRATE_HIGHATOMIC
2404                                 * while unreserving so be safe and watch for
2405                                 * underflows.
2406                                 */
2407                                zone->nr_reserved_highatomic -= min(
2408                                                pageblock_nr_pages,
2409                                                zone->nr_reserved_highatomic);
2410                        }
2411
2412                        /*
2413                         * Convert to ac->migratetype and avoid the normal
2414                         * pageblock stealing heuristics. Minimally, the caller
2415                         * is doing the work and needs the pages. More
2416                         * importantly, if the block was always converted to
2417                         * MIGRATE_UNMOVABLE or another type then the number
2418                         * of pageblocks that cannot be completely freed
2419                         * may increase.
2420                         */
2421                        set_pageblock_migratetype(page, ac->migratetype);
2422                        ret = move_freepages_block(zone, page, ac->migratetype,
2423                                                                        NULL);
2424                        if (ret) {
2425                                spin_unlock_irqrestore(&zone->lock, flags);
2426                                return ret;
2427                        }
2428                }
2429                spin_unlock_irqrestore(&zone->lock, flags);
2430        }
2431
2432        return false;
2433}
2434
2435/*
2436 * Try finding a free buddy page on the fallback list and put it on the free
2437 * list of requested migratetype, possibly along with other pages from the same
2438 * block, depending on fragmentation avoidance heuristics. Returns true if
2439 * fallback was found so that __rmqueue_smallest() can grab it.
2440 *
2441 * The use of signed ints for order and current_order is a deliberate
2442 * deviation from the rest of this file, to make the for loop
2443 * condition simpler.
2444 */
2445static __always_inline bool
2446__rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
2447                                                unsigned int alloc_flags)
2448{
2449        struct free_area *area;
2450        int current_order;
2451        int min_order = order;
2452        struct page *page;
2453        int fallback_mt;
2454        bool can_steal;
2455
2456        /*
2457         * Do not steal pages from freelists belonging to other pageblocks
2458         * i.e. orders < pageblock_order. If there are no local zones free,
2459         * the zonelists will be reiterated without ALLOC_NOFRAGMENT.
2460         */
2461        if (alloc_flags & ALLOC_NOFRAGMENT)
2462                min_order = pageblock_order;
2463
2464        /*
2465         * Find the largest available free page in the other list. This roughly
2466         * approximates finding the pageblock with the most free pages, which
2467         * would be too costly to do exactly.
2468         */
2469        for (current_order = MAX_ORDER - 1; current_order >= min_order;
2470                                --current_order) {
2471                area = &(zone->free_area[current_order]);
2472                fallback_mt = find_suitable_fallback(area, current_order,
2473                                start_migratetype, false, &can_steal);
2474                if (fallback_mt == -1)
2475                        continue;
2476
2477                /*
2478                 * We cannot steal all free pages from the pageblock and the
2479                 * requested migratetype is movable. In that case it's better to
2480                 * steal and split the smallest available page instead of the
2481                 * largest available page, because even if the next movable
2482                 * allocation falls back into a different pageblock than this
2483                 * one, it won't cause permanent fragmentation.
2484                 */
2485                if (!can_steal && start_migratetype == MIGRATE_MOVABLE
2486                                        && current_order > order)
2487                        goto find_smallest;
2488
2489                goto do_steal;
2490        }
2491
2492        return false;
2493
2494find_smallest:
2495        for (current_order = order; current_order < MAX_ORDER;
2496                                                        current_order++) {
2497                area = &(zone->free_area[current_order]);
2498                fallback_mt = find_suitable_fallback(area, current_order,
2499                                start_migratetype, false, &can_steal);
2500                if (fallback_mt != -1)
2501                        break;
2502        }
2503
2504        /*
2505         * This should not happen - we already found a suitable fallback
2506         * when looking for the largest page.
2507         */
2508        VM_BUG_ON(current_order == MAX_ORDER);
2509
2510do_steal:
2511        page = list_first_entry(&area->free_list[fallback_mt],
2512                                                        struct page, lru);
2513
2514        steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,
2515                                                                can_steal);
2516
2517        trace_mm_page_alloc_extfrag(page, order, current_order,
2518                start_migratetype, fallback_mt);
2519
2520        return true;
2521
2522}
2523
2524/*
2525 * Do the hard work of removing an element from the buddy allocator.
2526 * Call me with the zone->lock already held.
2527 */
2528static __always_inline struct page *
2529__rmqueue(struct zone *zone, unsigned int order, int migratetype,
2530                                                unsigned int alloc_flags)
2531{
2532        struct page *page;
2533
2534retry:
2535        page = __rmqueue_smallest(zone, order, migratetype);
2536        if (unlikely(!page)) {
2537                if (migratetype == MIGRATE_MOVABLE)
2538                        page = __rmqueue_cma_fallback(zone, order);
2539
2540                if (!page && __rmqueue_fallback(zone, order, migratetype,
2541                                                                alloc_flags))
2542                        goto retry;
2543        }
2544
2545        trace_mm_page_alloc_zone_locked(page, order, migratetype);
2546        return page;
2547}
2548
2549/*
2550 * Obtain a specified number of elements from the buddy allocator, all under
2551 * a single hold of the lock, for efficiency.  Add them to the supplied list.
2552 * Returns the number of new pages which were placed at *list.
2553 */
2554static int rmqueue_bulk(struct zone *zone, unsigned int order,
2555                        unsigned long count, struct list_head *list,
2556                        int migratetype, unsigned int alloc_flags)
2557{
2558        int i, alloced = 0;
2559
2560        spin_lock(&zone->lock);
2561        for (i = 0; i < count; ++i) {
2562                struct page *page = __rmqueue(zone, order, migratetype,
2563                                                                alloc_flags);
2564                if (unlikely(page == NULL))
2565                        break;
2566
2567                if (unlikely(check_pcp_refill(page)))
2568                        continue;
2569
2570                /*
2571                 * Split buddy pages returned by expand() are received here in
2572                 * physical page order. The page is added to the tail of
2573                 * caller's list. From the callers perspective, the linked list
2574                 * is ordered by page number under some conditions. This is
2575                 * useful for IO devices that can forward direction from the
2576                 * head, thus also in the physical page order. This is useful
2577                 * for IO devices that can merge IO requests if the physical
2578                 * pages are ordered properly.
2579                 */
2580                list_add_tail(&page->lru, list);
2581                alloced++;
2582                if (is_migrate_cma(get_pcppage_migratetype(page)))
2583                        __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
2584                                              -(1 << order));
2585        }
2586
2587        /*
2588         * i pages were removed from the buddy list even if some leak due
2589         * to check_pcp_refill failing so adjust NR_FREE_PAGES based
2590         * on i. Do not confuse with 'alloced' which is the number of
2591         * pages added to the pcp list.
2592         */
2593        __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
2594        spin_unlock(&zone->lock);
2595        return alloced;
2596}
2597
2598#ifdef CONFIG_NUMA
2599/*
2600 * Called from the vmstat counter updater to drain pagesets of this
2601 * currently executing processor on remote nodes after they have
2602 * expired.
2603 *
2604 * Note that this function must be called with the thread pinned to
2605 * a single processor.
2606 */
2607void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
2608{
2609        unsigned long flags;
2610        int to_drain, batch;
2611
2612        local_irq_save(flags);
2613        batch = READ_ONCE(pcp->batch);
2614        to_drain = min(pcp->count, batch);
2615        if (to_drain > 0)
2616                free_pcppages_bulk(zone, to_drain, pcp);
2617        local_irq_restore(flags);
2618}
2619#endif
2620
2621/*
2622 * Drain pcplists of the indicated processor and zone.
2623 *
2624 * The processor must either be the current processor and the
2625 * thread pinned to the current processor or a processor that
2626 * is not online.
2627 */
2628static void drain_pages_zone(unsigned int cpu, struct zone *zone)
2629{
2630        unsigned long flags;
2631        struct per_cpu_pageset *pset;
2632        struct per_cpu_pages *pcp;
2633
2634        local_irq_save(flags);
2635        pset = per_cpu_ptr(zone->pageset, cpu);
2636
2637        pcp = &pset->pcp;
2638        if (pcp->count)
2639                free_pcppages_bulk(zone, pcp->count, pcp);
2640        local_irq_restore(flags);
2641}
2642
2643/*
2644 * Drain pcplists of all zones on the indicated processor.
2645 *
2646 * The processor must either be the current processor and the
2647 * thread pinned to the current processor or a processor that
2648 * is not online.
2649 */
2650static void drain_pages(unsigned int cpu)
2651{
2652        struct zone *zone;
2653
2654        for_each_populated_zone(zone) {
2655                drain_pages_zone(cpu, zone);
2656        }
2657}
2658
2659/*
2660 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
2661 *
2662 * The CPU has to be pinned. When zone parameter is non-NULL, spill just
2663 * the single zone's pages.
2664 */
2665void drain_local_pages(struct zone *zone)
2666{
2667        int cpu = smp_processor_id();
2668
2669        if (zone)
2670                drain_pages_zone(cpu, zone);
2671        else
2672                drain_pages(cpu);
2673}
2674
2675static void drain_local_pages_wq(struct work_struct *work)
2676{
2677        struct pcpu_drain *drain;
2678
2679        drain = container_of(work, struct pcpu_drain, work);
2680
2681        /*
2682         * drain_all_pages doesn't use proper cpu hotplug protection so
2683         * we can race with cpu offline when the WQ can move this from
2684         * a cpu pinned worker to an unbound one. We can operate on a different
2685         * cpu which is allright but we also have to make sure to not move to
2686         * a different one.
2687         */
2688        preempt_disable();
2689        drain_local_pages(drain->zone);
2690        preempt_enable();
2691}
2692
2693/*
2694 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
2695 *
2696 * When zone parameter is non-NULL, spill just the single zone's pages.
2697 *
2698 * Note that this can be extremely slow as the draining happens in a workqueue.
2699 */
2700void drain_all_pages(struct zone *zone)
2701{
2702        int cpu;
2703
2704        /*
2705         * Allocate in the BSS so we wont require allocation in
2706         * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
2707         */
2708        static cpumask_t cpus_with_pcps;
2709
2710        /*
2711         * Make sure nobody triggers this path before mm_percpu_wq is fully
2712         * initialized.
2713         */
2714        if (WARN_ON_ONCE(!mm_percpu_wq))
2715                return;
2716
2717        /*
2718         * Do not drain if one is already in progress unless it's specific to
2719         * a zone. Such callers are primarily CMA and memory hotplug and need
2720         * the drain to be complete when the call returns.
2721         */
2722        if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
2723                if (!zone)
2724                        return;
2725                mutex_lock(&pcpu_drain_mutex);
2726        }
2727
2728        /*
2729         * We don't care about racing with CPU hotplug event
2730         * as offline notification will cause the notified
2731         * cpu to drain that CPU pcps and on_each_cpu_mask
2732         * disables preemption as part of its processing
2733         */
2734        for_each_online_cpu(cpu) {
2735                struct per_cpu_pageset *pcp;
2736                struct zone *z;
2737                bool has_pcps = false;
2738
2739                if (zone) {
2740                        pcp = per_cpu_ptr(zone->pageset, cpu);
2741                        if (pcp->pcp.count)
2742                                has_pcps = true;
2743                } else {
2744                        for_each_populated_zone(z) {
2745                                pcp = per_cpu_ptr(z->pageset, cpu);
2746                                if (pcp->pcp.count) {
2747                                        has_pcps = true;
2748                                        break;
2749                                }
2750                        }
2751                }
2752
2753                if (has_pcps)
2754                        cpumask_set_cpu(cpu, &cpus_with_pcps);
2755                else
2756                        cpumask_clear_cpu(cpu, &cpus_with_pcps);
2757        }
2758
2759        for_each_cpu(cpu, &cpus_with_pcps) {
2760                struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu);
2761
2762                drain->zone = zone;
2763                INIT_WORK(&drain->work, drain_local_pages_wq);
2764                queue_work_on(cpu, mm_percpu_wq, &drain->work);
2765        }
2766        for_each_cpu(cpu, &cpus_with_pcps)
2767                flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work);
2768
2769        mutex_unlock(&pcpu_drain_mutex);
2770}
2771
2772#ifdef CONFIG_HIBERNATION
2773
2774/*
2775 * Touch the watchdog for every WD_PAGE_COUNT pages.
2776 */
2777#define WD_PAGE_COUNT   (128*1024)
2778
2779void mark_free_pages(struct zone *zone)
2780{
2781        unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
2782        unsigned long flags;
2783        unsigned int order, t;
2784        struct page *page;
2785
2786        if (zone_is_empty(zone))
2787                return;
2788
2789        spin_lock_irqsave(&zone->lock, flags);
2790
2791        max_zone_pfn = zone_end_pfn(zone);
2792        for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
2793                if (pfn_valid(pfn)) {
2794                        page = pfn_to_page(pfn);
2795
2796                        if (!--page_count) {
2797                                touch_nmi_watchdog();
2798                                page_count = WD_PAGE_COUNT;
2799                        }
2800
2801                        if (page_zone(page) != zone)
2802                                continue;
2803
2804                        if (!swsusp_page_is_forbidden(page))
2805                                swsusp_unset_page_free(page);
2806                }
2807
2808        for_each_migratetype_order(order, t) {
2809                list_for_each_entry(page,
2810                                &zone->free_area[order].free_list[t], lru) {
2811                        unsigned long i;
2812
2813                        pfn = page_to_pfn(page);
2814                        for (i = 0; i < (1UL << order); i++) {
2815                                if (!--page_count) {
2816                                        touch_nmi_watchdog();
2817                                        page_count = WD_PAGE_COUNT;
2818                                }
2819                                swsusp_set_page_free(pfn_to_page(pfn + i));
2820                        }
2821                }
2822        }
2823        spin_unlock_irqrestore(&zone->lock, flags);
2824}
2825#endif /* CONFIG_PM */
2826
2827static bool free_unref_page_prepare(struct page *page, unsigned long pfn)
2828{
2829        int migratetype;
2830
2831        if (!free_pcp_prepare(page))
2832                return false;
2833
2834        migratetype = get_pfnblock_migratetype(page, pfn);
2835        set_pcppage_migratetype(page, migratetype);
2836        return true;
2837}
2838
2839static void free_unref_page_commit(struct page *page, unsigned long pfn)
2840{
2841        struct zone *zone = page_zone(page);
2842        struct per_cpu_pages *pcp;
2843        int migratetype;
2844
2845        migratetype = get_pcppage_migratetype(page);
2846        __count_vm_event(PGFREE);
2847
2848        /*
2849         * We only track unmovable, reclaimable and movable on pcp lists.
2850         * Free ISOLATE pages back to the allocator because they are being
2851         * offlined but treat HIGHATOMIC as movable pages so we can get those
2852         * areas back if necessary. Otherwise, we may have to free
2853         * excessively into the page allocator
2854         */
2855        if (migratetype >= MIGRATE_PCPTYPES) {
2856                if (unlikely(is_migrate_isolate(migratetype))) {
2857                        free_one_page(zone, page, pfn, 0, migratetype);
2858                        return;
2859                }
2860                migratetype = MIGRATE_MOVABLE;
2861        }
2862
2863        pcp = &this_cpu_ptr(zone->pageset)->pcp;
2864        list_add(&page->lru, &pcp->lists[migratetype]);
2865        pcp->count++;
2866        if (pcp->count >= pcp->high) {
2867                unsigned long batch = READ_ONCE(pcp->batch);
2868                free_pcppages_bulk(zone, batch, pcp);
2869        }
2870}
2871
2872/*
2873 * Free a 0-order page
2874 */
2875void free_unref_page(struct page *page)
2876{
2877        unsigned long flags;
2878        unsigned long pfn = page_to_pfn(page);
2879
2880        if (!free_unref_page_prepare(page, pfn))
2881                return;
2882
2883        local_irq_save(flags);
2884        free_unref_page_commit(page, pfn);
2885        local_irq_restore(flags);
2886}
2887
2888/*
2889 * Free a list of 0-order pages
2890 */
2891void free_unref_page_list(struct list_head *list)
2892{
2893        struct page *page, *next;
2894        unsigned long flags, pfn;
2895        int batch_count = 0;
2896
2897        /* Prepare pages for freeing */
2898        list_for_each_entry_safe(page, next, list, lru) {
2899                pfn = page_to_pfn(page);
2900                if (!free_unref_page_prepare(page, pfn))
2901                        list_del(&page->lru);
2902                set_page_private(page, pfn);
2903        }
2904
2905        local_irq_save(flags);
2906        list_for_each_entry_safe(page, next, list, lru) {
2907                unsigned long pfn = page_private(page);
2908
2909                set_page_private(page, 0);
2910                trace_mm_page_free_batched(page);
2911                free_unref_page_commit(page, pfn);
2912
2913                /*
2914                 * Guard against excessive IRQ disabled times when we get
2915                 * a large list of pages to free.
2916                 */
2917                if (++batch_count == SWAP_CLUSTER_MAX) {
2918                        local_irq_restore(flags);
2919                        batch_count = 0;
2920                        local_irq_save(flags);
2921                }
2922        }
2923        local_irq_restore(flags);
2924}
2925
2926/*
2927 * split_page takes a non-compound higher-order page, and splits it into
2928 * n (1<<order) sub-pages: page[0..n]
2929 * Each sub-page must be freed individually.
2930 *
2931 * Note: this is probably too low level an operation for use in drivers.
2932 * Please consult with lkml before using this in your driver.
2933 */
2934void split_page(struct page *page, unsigned int order)
2935{
2936        int i;
2937
2938        VM_BUG_ON_PAGE(PageCompound(page), page);
2939        VM_BUG_ON_PAGE(!page_count(page), page);
2940
2941        for (i = 1; i < (1 << order); i++)
2942                set_page_refcounted(page + i);
2943        split_page_owner(page, order);
2944}
2945EXPORT_SYMBOL_GPL(split_page);
2946
2947int __isolate_free_page(struct page *page, unsigned int order)
2948{
2949        unsigned long watermark;
2950        struct zone *zone;
2951        int mt;
2952
2953        BUG_ON(!PageBuddy(page));
2954
2955        zone = page_zone(page);
2956        mt = get_pageblock_migratetype(page);
2957
2958        if (!is_migrate_isolate(mt)) {
2959                /*
2960                 * Obey watermarks as if the page was being allocated. We can
2961                 * emulate a high-order watermark check with a raised order-0
2962                 * watermark, because we already know our high-order page
2963                 * exists.
2964                 */
2965                watermark = min_wmark_pages(zone) + (1UL << order);
2966                if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
2967                        return 0;
2968
2969                __mod_zone_freepage_state(zone, -(1UL << order), mt);
2970        }
2971
2972        /* Remove page from free list */
2973        list_del(&page->lru);
2974        zone->free_area[order].nr_free--;
2975        rmv_page_order(page);
2976
2977        /*
2978         * Set the pageblock if the isolated page is at least half of a
2979         * pageblock
2980         */
2981        if (order >= pageblock_order - 1) {
2982                struct page *endpage = page + (1 << order) - 1;
2983                for (; page < endpage; page += pageblock_nr_pages) {
2984                        int mt = get_pageblock_migratetype(page);
2985                        if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
2986                            && !is_migrate_highatomic(mt))
2987                                set_pageblock_migratetype(page,
2988                                                          MIGRATE_MOVABLE);
2989                }
2990        }
2991
2992
2993        return 1UL << order;
2994}
2995
2996/*
2997 * Update NUMA hit/miss statistics
2998 *
2999 * Must be called with interrupts disabled.
3000 */
3001static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
3002{
3003#ifdef CONFIG_NUMA
3004        enum numa_stat_item local_stat = NUMA_LOCAL;
3005
3006        /* skip numa counters update if numa stats is disabled */
3007        if (!static_branch_likely(&vm_numa_stat_key))
3008                return;
3009
3010        if (zone_to_nid(z) != numa_node_id())
3011                local_stat = NUMA_OTHER;
3012
3013        if (zone_to_nid(z) == zone_to_nid(preferred_zone))
3014                __inc_numa_state(z, NUMA_HIT);
3015        else {
3016                __inc_numa_state(z, NUMA_MISS);
3017                __inc_numa_state(preferred_zone, NUMA_FOREIGN);
3018        }
3019        __inc_numa_state(z, local_stat);
3020#endif
3021}
3022
3023/* Remove page from the per-cpu list, caller must protect the list */
3024static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
3025                        unsigned int alloc_flags,
3026                        struct per_cpu_pages *pcp,
3027                        struct list_head *list)
3028{
3029        struct page *page;
3030
3031        do {
3032                if (list_empty(list)) {
3033                        pcp->count += rmqueue_bulk(zone, 0,
3034                                        pcp->batch, list,
3035                                        migratetype, alloc_flags);
3036                        if (unlikely(list_empty(list)))
3037                                return NULL;
3038                }
3039
3040                page = list_first_entry(list, struct page, lru);
3041                list_del(&page->lru);
3042                pcp->count--;
3043        } while (check_new_pcp(page));
3044
3045        return page;
3046}
3047
3048/* Lock and remove page from the per-cpu list */
3049static struct page *rmqueue_pcplist(struct zone *preferred_zone,
3050                        struct zone *zone, unsigned int order,
3051                        gfp_t gfp_flags, int migratetype,
3052                        unsigned int alloc_flags)
3053{
3054        struct per_cpu_pages *pcp;
3055        struct list_head *list;
3056        struct page *page;
3057        unsigned long flags;
3058
3059        local_irq_save(flags);
3060        pcp = &this_cpu_ptr(zone->pageset)->pcp;
3061        list = &pcp->lists[migratetype];
3062        page = __rmqueue_pcplist(zone,  migratetype, alloc_flags, pcp, list);
3063        if (page) {
3064                __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
3065                zone_statistics(preferred_zone, zone);
3066        }
3067        local_irq_restore(flags);
3068        return page;
3069}
3070
3071/*
3072 * Allocate a page from the given zone. Use pcplists for order-0 allocations.
3073 */
3074static inline
3075struct page *rmqueue(struct zone *preferred_zone,
3076                        struct zone *zone, unsigned int order,
3077                        gfp_t gfp_flags, unsigned int alloc_flags,
3078                        int migratetype)
3079{
3080        unsigned long flags;
3081        struct page *page;
3082
3083        if (likely(order == 0)) {
3084                page = rmqueue_pcplist(preferred_zone, zone, order,
3085                                gfp_flags, migratetype, alloc_flags);
3086                goto out;
3087        }
3088
3089        /*
3090         * We most definitely don't want callers attempting to
3091         * allocate greater than order-1 page units with __GFP_NOFAIL.
3092         */
3093        WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
3094        spin_lock_irqsave(&zone->lock, flags);
3095
3096        do {
3097                page = NULL;
3098                if (alloc_flags & ALLOC_HARDER) {
3099                        page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
3100                        if (page)
3101                                trace_mm_page_alloc_zone_locked(page, order, migratetype);
3102                }
3103                if (!page)
3104                        page = __rmqueue(zone, order, migratetype, alloc_flags);
3105        } while (page && check_new_pages(page, order));
3106        spin_unlock(&zone->lock);
3107        if (!page)
3108                goto failed;
3109        __mod_zone_freepage_state(zone, -(1 << order),
3110                                  get_pcppage_migratetype(page));
3111
3112        __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
3113        zone_statistics(preferred_zone, zone);
3114        local_irq_restore(flags);
3115
3116out:
3117        /* Separate test+clear to avoid unnecessary atomics */
3118        if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) {
3119                clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
3120                wakeup_kswapd(zone, 0, 0, zone_idx(zone));
3121        }
3122
3123        VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
3124        return page;
3125
3126failed:
3127        local_irq_restore(flags);
3128        return NULL;
3129}
3130
3131#ifdef CONFIG_FAIL_PAGE_ALLOC
3132
3133static struct {
3134        struct fault_attr attr;
3135
3136        bool ignore_gfp_highmem;
3137        bool ignore_gfp_reclaim;
3138        u32 min_order;
3139} fail_page_alloc = {
3140        .attr = FAULT_ATTR_INITIALIZER,
3141        .ignore_gfp_reclaim = true,
3142        .ignore_gfp_highmem = true,
3143        .min_order = 1,
3144};
3145
3146static int __init setup_fail_page_alloc(char *str)
3147{
3148        return setup_fault_attr(&fail_page_alloc.attr, str);
3149}
3150__setup("fail_page_alloc=", setup_fail_page_alloc);
3151
3152static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3153{
3154        if (order < fail_page_alloc.min_order)
3155                return false;
3156        if (gfp_mask & __GFP_NOFAIL)
3157                return false;
3158        if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
3159                return false;
3160        if (fail_page_alloc.ignore_gfp_reclaim &&
3161                        (gfp_mask & __GFP_DIRECT_RECLAIM))
3162                return false;
3163
3164        return should_fail(&fail_page_alloc.attr, 1 << order);
3165}
3166
3167#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3168
3169static int __init fail_page_alloc_debugfs(void)
3170{
3171        umode_t mode = S_IFREG | 0600;
3172        struct dentry *dir;
3173
3174        dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
3175                                        &fail_page_alloc.attr);
3176        if (IS_ERR(dir))
3177                return PTR_ERR(dir);
3178
3179        if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
3180                                &fail_page_alloc.ignore_gfp_reclaim))
3181                goto fail;
3182        if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
3183                                &fail_page_alloc.ignore_gfp_highmem))
3184                goto fail;
3185        if (!debugfs_create_u32("min-order", mode, dir,
3186                                &fail_page_alloc.min_order))
3187                goto fail;
3188
3189        return 0;
3190fail:
3191        debugfs_remove_recursive(dir);
3192
3193        return -ENOMEM;
3194}
3195
3196late_initcall(fail_page_alloc_debugfs);
3197
3198#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
3199
3200#else /* CONFIG_FAIL_PAGE_ALLOC */
3201
3202static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3203{
3204        return false;
3205}
3206
3207#endif /* CONFIG_FAIL_PAGE_ALLOC */
3208
3209static noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3210{
3211        return __should_fail_alloc_page(gfp_mask, order);
3212}
3213ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE);
3214
3215/*
3216 * Return true if free base pages are above 'mark'. For high-order checks it
3217 * will return true of the order-0 watermark is reached and there is at least
3218 * one free page of a suitable size. Checking now avoids taking the zone lock
3219 * to check in the allocation paths if no pages are free.
3220 */
3221bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3222                         int classzone_idx, unsigned int alloc_flags,
3223                         long free_pages)
3224{
3225        long min = mark;
3226        int o;
3227        const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
3228
3229        /* free_pages may go negative - that's OK */
3230        free_pages -= (1 << order) - 1;
3231
3232        if (alloc_flags & ALLOC_HIGH)
3233                min -= min / 2;
3234
3235        /*
3236         * If the caller does not have rights to ALLOC_HARDER then subtract
3237         * the high-atomic reserves. This will over-estimate the size of the
3238         * atomic reserve but it avoids a search.
3239         */
3240        if (likely(!alloc_harder)) {
3241                free_pages -= z->nr_reserved_highatomic;
3242        } else {
3243                /*
3244                 * OOM victims can try even harder than normal ALLOC_HARDER
3245                 * users on the grounds that it's definitely going to be in
3246                 * the exit path shortly and free memory. Any allocation it
3247                 * makes during the free path will be small and short-lived.
3248                 */
3249                if (alloc_flags & ALLOC_OOM)
3250                        min -= min / 2;
3251                else
3252                        min -= min / 4;
3253        }
3254
3255
3256#ifdef CONFIG_CMA
3257        /* If allocation can't use CMA areas don't use free CMA pages */
3258        if (!(alloc_flags & ALLOC_CMA))
3259                free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
3260#endif
3261
3262        /*
3263         * Check watermarks for an order-0 allocation request. If these
3264         * are not met, then a high-order request also cannot go ahead
3265         * even if a suitable page happened to be free.
3266         */
3267        if (free_pages <= min + z->lowmem_reserve[classzone_idx])
3268                return false;
3269
3270        /* If this is an order-0 request then the watermark is fine */
3271        if (!order)
3272                return true;
3273
3274        /* For a high-order request, check at least one suitable page is free */
3275        for (o = order; o < MAX_ORDER; o++) {
3276                struct free_area *area = &z->free_area[o];
3277                int mt;
3278
3279                if (!area->nr_free)
3280                        continue;
3281
3282                for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
3283                        if (!list_empty(&area->free_list[mt]))
3284                                return true;
3285                }
3286
3287#ifdef CONFIG_CMA
3288                if ((alloc_flags & ALLOC_CMA) &&
3289                    !list_empty(&area->free_list[MIGRATE_CMA])) {
3290                        return true;
3291                }
3292#endif
3293                if (alloc_harder &&
3294                        !list_empty(&area->free_list[MIGRATE_HIGHATOMIC]))
3295                        return true;
3296        }
3297        return false;
3298}
3299
3300bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3301                      int classzone_idx, unsigned int alloc_flags)
3302{
3303        return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
3304                                        zone_page_state(z, NR_FREE_PAGES));
3305}
3306
3307static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
3308                unsigned long mark, int classzone_idx, unsigned int alloc_flags)
3309{
3310        long free_pages = zone_page_state(z, NR_FREE_PAGES);
3311        long cma_pages = 0;
3312
3313#ifdef CONFIG_CMA
3314        /* If allocation can't use CMA areas don't use free CMA pages */
3315        if (!(alloc_flags & ALLOC_CMA))
3316                cma_pages = zone_page_state(z, NR_FREE_CMA_PAGES);
3317#endif
3318
3319        /*
3320         * Fast check for order-0 only. If this fails then the reserves
3321         * need to be calculated. There is a corner case where the check
3322         * passes but only the high-order atomic reserve are free. If
3323         * the caller is !atomic then it'll uselessly search the free
3324         * list. That corner case is then slower but it is harmless.
3325         */
3326        if (!order && (free_pages - cma_pages) > mark + z->lowmem_reserve[classzone_idx])
3327                return true;
3328
3329        return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
3330                                        free_pages);
3331}
3332
3333bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
3334                        unsigned long mark, int classzone_idx)
3335{
3336        long free_pages = zone_page_state(z, NR_FREE_PAGES);
3337
3338        if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
3339                free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
3340
3341        return __zone_watermark_ok(z, order, mark, classzone_idx, 0,
3342                                                                free_pages);
3343}
3344
3345#ifdef CONFIG_NUMA
3346static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3347{
3348        return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
3349                                RECLAIM_DISTANCE;
3350}
3351#else   /* CONFIG_NUMA */
3352static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3353{
3354        return true;
3355}
3356#endif  /* CONFIG_NUMA */
3357
3358/*
3359 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
3360 * fragmentation is subtle. If the preferred zone was HIGHMEM then
3361 * premature use of a lower zone may cause lowmem pressure problems that
3362 * are worse than fragmentation. If the next zone is ZONE_DMA then it is
3363 * probably too small. It only makes sense to spread allocations to avoid
3364 * fragmentation between the Normal and DMA32 zones.
3365 */
3366static inline unsigned int
3367alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
3368{
3369        unsigned int alloc_flags = 0;
3370
3371        if (gfp_mask & __GFP_KSWAPD_RECLAIM)
3372                alloc_flags |= ALLOC_KSWAPD;
3373
3374#ifdef CONFIG_ZONE_DMA32
3375        if (zone_idx(zone) != ZONE_NORMAL)
3376                goto out;
3377
3378        /*
3379         * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
3380         * the pointer is within zone->zone_pgdat->node_zones[]. Also assume
3381         * on UMA that if Normal is populated then so is DMA32.
3382         */
3383        BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
3384        if (nr_online_nodes > 1 && !populated_zone(--zone))
3385                goto out;
3386
3387out:
3388#endif /* CONFIG_ZONE_DMA32 */
3389        return alloc_flags;
3390}
3391
3392/*
3393 * get_page_from_freelist goes through the zonelist trying to allocate
3394 * a page.
3395 */
3396static struct page *
3397get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
3398                                                const struct alloc_context *ac)
3399{
3400        struct zoneref *z;
3401        struct zone *zone;
3402        struct pglist_data *last_pgdat_dirty_limit = NULL;
3403        bool no_fallback;
3404
3405retry:
3406        /*
3407         * Scan zonelist, looking for a zone with enough free.
3408         * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
3409         */
3410        no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
3411        z = ac->preferred_zoneref;
3412        for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3413                                                                ac->nodemask) {
3414                struct page *page;
3415                unsigned long mark;
3416
3417                if (cpusets_enabled() &&
3418                        (alloc_flags & ALLOC_CPUSET) &&
3419                        !__cpuset_zone_allowed(zone, gfp_mask))
3420                                continue;
3421                /*
3422                 * When allocating a page cache page for writing, we
3423                 * want to get it from a node that is within its dirty
3424                 * limit, such that no single node holds more than its
3425                 * proportional share of globally allowed dirty pages.
3426                 * The dirty limits take into account the node's
3427                 * lowmem reserves and high watermark so that kswapd
3428                 * should be able to balance it without having to
3429                 * write pages from its LRU list.
3430                 *
3431                 * XXX: For now, allow allocations to potentially
3432                 * exceed the per-node dirty limit in the slowpath
3433                 * (spread_dirty_pages unset) before going into reclaim,
3434                 * which is important when on a NUMA setup the allowed
3435                 * nodes are together not big enough to reach the
3436                 * global limit.  The proper fix for these situations
3437                 * will require awareness of nodes in the
3438                 * dirty-throttling and the flusher threads.
3439                 */
3440                if (ac->spread_dirty_pages) {
3441                        if (last_pgdat_dirty_limit == zone->zone_pgdat)
3442                                continue;
3443
3444                        if (!node_dirty_ok(zone->zone_pgdat)) {
3445                                last_pgdat_dirty_limit = zone->zone_pgdat;
3446                                continue;
3447                        }
3448                }
3449
3450                if (no_fallback && nr_online_nodes > 1 &&
3451                    zone != ac->preferred_zoneref->zone) {
3452                        int local_nid;
3453
3454                        /*
3455                         * If moving to a remote node, retry but allow
3456                         * fragmenting fallbacks. Locality is more important
3457                         * than fragmentation avoidance.
3458                         */
3459                        local_nid = zone_to_nid(ac->preferred_zoneref->zone);
3460                        if (zone_to_nid(zone) != local_nid) {
3461                                alloc_flags &= ~ALLOC_NOFRAGMENT;
3462                                goto retry;
3463                        }
3464                }
3465
3466                mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
3467                if (!zone_watermark_fast(zone, order, mark,
3468                                       ac_classzone_idx(ac), alloc_flags)) {
3469                        int ret;
3470
3471#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
3472                        /*
3473                         * Watermark failed for this zone, but see if we can
3474                         * grow this zone if it contains deferred pages.
3475                         */
3476                        if (static_branch_unlikely(&deferred_pages)) {
3477                                if (_deferred_grow_zone(zone, order))
3478                                        goto try_this_zone;
3479                        }
3480#endif
3481                        /* Checked here to keep the fast path fast */
3482                        BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
3483                        if (alloc_flags & ALLOC_NO_WATERMARKS)
3484                                goto try_this_zone;
3485
3486                        if (node_reclaim_mode == 0 ||
3487                            !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
3488                                continue;
3489
3490                        ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
3491                        switch (ret) {
3492                        case NODE_RECLAIM_NOSCAN:
3493                                /* did not scan */
3494                                continue;
3495                        case NODE_RECLAIM_FULL:
3496                                /* scanned but unreclaimable */
3497                                continue;
3498                        default:
3499                                /* did we reclaim enough */
3500                                if (zone_watermark_ok(zone, order, mark,
3501                                                ac_classzone_idx(ac), alloc_flags))
3502                                        goto try_this_zone;
3503
3504                                continue;
3505                        }
3506                }
3507
3508try_this_zone:
3509                page = rmqueue(ac->preferred_zoneref->zone, zone, order,
3510                                gfp_mask, alloc_flags, ac->migratetype);
3511                if (page) {
3512                        prep_new_page(page, order, gfp_mask, alloc_flags);
3513
3514                        /*
3515                         * If this is a high-order atomic allocation then check
3516                         * if the pageblock should be reserved for the future
3517                         */
3518                        if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
3519                                reserve_highatomic_pageblock(page, zone, order);
3520
3521                        return page;
3522                } else {
3523#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
3524                        /* Try again if zone has deferred pages */
3525                        if (static_branch_unlikely(&deferred_pages)) {
3526                                if (_deferred_grow_zone(zone, order))
3527                                        goto try_this_zone;
3528                        }
3529#endif
3530                }
3531        }
3532
3533        /*
3534         * It's possible on a UMA machine to get through all zones that are
3535         * fragmented. If avoiding fragmentation, reset and try again.
3536         */
3537        if (no_fallback) {
3538                alloc_flags &= ~ALLOC_NOFRAGMENT;
3539                goto retry;
3540        }
3541
3542        return NULL;
3543}
3544
3545static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
3546{
3547        unsigned int filter = SHOW_MEM_FILTER_NODES;
3548        static DEFINE_RATELIMIT_STATE(show_mem_rs, HZ, 1);
3549
3550        if (!__ratelimit(&show_mem_rs))
3551                return;
3552
3553        /*
3554         * This documents exceptions given to allocations in certain
3555         * contexts that are allowed to allocate outside current's set
3556         * of allowed nodes.
3557         */
3558        if (!(gfp_mask & __GFP_NOMEMALLOC))
3559                if (tsk_is_oom_victim(current) ||
3560                    (current->flags & (PF_MEMALLOC | PF_EXITING)))
3561                        filter &= ~SHOW_MEM_FILTER_NODES;
3562        if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
3563                filter &= ~SHOW_MEM_FILTER_NODES;
3564
3565        show_mem(filter, nodemask);
3566}
3567
3568void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
3569{
3570        struct va_format vaf;
3571        va_list args;
3572        static DEFINE_RATELIMIT_STATE(nopage_rs, DEFAULT_RATELIMIT_INTERVAL,
3573                                      DEFAULT_RATELIMIT_BURST);
3574
3575        if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
3576                return;
3577
3578        va_start(args, fmt);
3579        vaf.fmt = fmt;
3580        vaf.va = &args;
3581        pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl",
3582                        current->comm, &vaf, gfp_mask, &gfp_mask,
3583                        nodemask_pr_args(nodemask));
3584        va_end(args);
3585
3586        cpuset_print_current_mems_allowed();
3587        pr_cont("\n");
3588        dump_stack();
3589        warn_alloc_show_mem(gfp_mask, nodemask);
3590}
3591
3592static inline struct page *
3593__alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
3594                              unsigned int alloc_flags,
3595                              const struct alloc_context *ac)
3596{
3597        struct page *page;
3598
3599        page = get_page_from_freelist(gfp_mask, order,
3600                        alloc_flags|ALLOC_CPUSET, ac);
3601        /*
3602         * fallback to ignore cpuset restriction if our nodes
3603         * are depleted
3604         */
3605        if (!page)
3606                page = get_page_from_freelist(gfp_mask, order,
3607                                alloc_flags, ac);
3608
3609        return page;
3610}
3611
3612static inline struct page *
3613__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
3614        const struct alloc_context *ac, unsigned long *did_some_progress)
3615{
3616        struct oom_control oc = {
3617                .zonelist = ac->zonelist,
3618                .nodemask = ac->nodemask,
3619                .memcg = NULL,
3620                .gfp_mask = gfp_mask,
3621                .order = order,
3622        };
3623        struct page *page;
3624
3625        *did_some_progress = 0;
3626
3627        /*
3628         * Acquire the oom lock.  If that fails, somebody else is
3629         * making progress for us.
3630         */
3631        if (!mutex_trylock(&oom_lock)) {
3632                *did_some_progress = 1;
3633                schedule_timeout_uninterruptible(1);
3634                return NULL;
3635        }
3636
3637        /*
3638         * Go through the zonelist yet one more time, keep very high watermark
3639         * here, this is only to catch a parallel oom killing, we must fail if
3640         * we're still under heavy pressure. But make sure that this reclaim
3641         * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
3642         * allocation which will never fail due to oom_lock already held.
3643         */
3644        page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
3645                                      ~__GFP_DIRECT_RECLAIM, order,
3646                                      ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
3647        if (page)
3648                goto out;
3649
3650        /* Coredumps can quickly deplete all memory reserves */
3651        if (current->flags & PF_DUMPCORE)
3652                goto out;
3653        /* The OOM killer will not help higher order allocs */
3654        if (order > PAGE_ALLOC_COSTLY_ORDER)
3655                goto out;
3656        /*
3657         * We have already exhausted all our reclaim opportunities without any
3658         * success so it is time to admit defeat. We will skip the OOM killer
3659         * because it is very likely that the caller has a more reasonable
3660         * fallback than shooting a random task.
3661         */
3662        if (gfp_mask & __GFP_RETRY_MAYFAIL)
3663                goto out;
3664        /* The OOM killer does not needlessly kill tasks for lowmem */
3665        if (ac->high_zoneidx < ZONE_NORMAL)
3666                goto out;
3667        if (pm_suspended_storage())
3668                goto out;
3669        /*
3670         * XXX: GFP_NOFS allocations should rather fail than rely on
3671         * other request to make a forward progress.
3672         * We are in an unfortunate situation where out_of_memory cannot
3673         * do much for this context but let's try it to at least get
3674         * access to memory reserved if the current task is killed (see
3675         * out_of_memory). Once filesystems are ready to handle allocation
3676         * failures more gracefully we should just bail out here.
3677         */
3678
3679        /* The OOM killer may not free memory on a specific node */
3680        if (gfp_mask & __GFP_THISNODE)
3681                goto out;
3682
3683        /* Exhausted what can be done so it's blame time */
3684        if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
3685                *did_some_progress = 1;
3686
3687                /*
3688                 * Help non-failing allocations by giving them access to memory
3689                 * reserves
3690                 */
3691                if (gfp_mask & __GFP_NOFAIL)
3692                        page = __alloc_pages_cpuset_fallback(gfp_mask, order,
3693                                        ALLOC_NO_WATERMARKS, ac);
3694        }
3695out:
3696        mutex_unlock(&oom_lock);
3697        return page;
3698}
3699
3700/*
3701 * Maximum number of compaction retries wit a progress before OOM
3702 * killer is consider as the only way to move forward.
3703 */
3704#define MAX_COMPACT_RETRIES 16
3705
3706#ifdef CONFIG_COMPACTION
3707/* Try memory compaction for high-order allocations before reclaim */
3708static struct page *
3709__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3710                unsigned int alloc_flags, const struct alloc_context *ac,
3711                enum compact_priority prio, enum compact_result *compact_result)
3712{
3713        struct page *page;
3714        unsigned long pflags;
3715        unsigned int noreclaim_flag;
3716
3717        if (!order)
3718                return NULL;
3719
3720        psi_memstall_enter(&pflags);
3721        noreclaim_flag = memalloc_noreclaim_save();
3722
3723        *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
3724                                                                        prio);
3725
3726        memalloc_noreclaim_restore(noreclaim_flag);
3727        psi_memstall_leave(&pflags);
3728
3729        if (*compact_result <= COMPACT_INACTIVE)
3730                return NULL;
3731
3732        /*
3733         * At least in one zone compaction wasn't deferred or skipped, so let's
3734         * count a compaction stall
3735         */
3736        count_vm_event(COMPACTSTALL);
3737
3738        page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3739
3740        if (page) {
3741                struct zone *zone = page_zone(page);
3742
3743                zone->compact_blockskip_flush = false;
3744                compaction_defer_reset(zone, order, true);
3745                count_vm_event(COMPACTSUCCESS);
3746                return page;
3747        }
3748
3749        /*
3750         * It's bad if compaction run occurs and fails. The most likely reason
3751         * is that pages exist, but not enough to satisfy watermarks.
3752         */
3753        count_vm_event(COMPACTFAIL);
3754
3755        cond_resched();
3756
3757        return NULL;
3758}
3759
3760static inline bool
3761should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
3762                     enum compact_result compact_result,
3763                     enum compact_priority *compact_priority,
3764                     int *compaction_retries)
3765{
3766        int max_retries = MAX_COMPACT_RETRIES;
3767        int min_priority;
3768        bool ret = false;
3769        int retries = *compaction_retries;
3770        enum compact_priority priority = *compact_priority;
3771
3772        if (!order)
3773                return false;
3774
3775        if (compaction_made_progress(compact_result))
3776                (*compaction_retries)++;
3777
3778        /*
3779         * compaction considers all the zone as desperately out of memory
3780         * so it doesn't really make much sense to retry except when the
3781         * failure could be caused by insufficient priority
3782         */
3783        if (compaction_failed(compact_result))
3784                goto check_priority;
3785
3786        /*
3787         * make sure the compaction wasn't deferred or didn't bail out early
3788         * due to locks contention before we declare that we should give up.
3789         * But do not retry if the given zonelist is not suitable for
3790         * compaction.
3791         */
3792        if (compaction_withdrawn(compact_result)) {
3793                ret = compaction_zonelist_suitable(ac, order, alloc_flags);
3794                goto out;
3795        }
3796
3797        /*
3798         * !costly requests are much more important than __GFP_RETRY_MAYFAIL
3799         * costly ones because they are de facto nofail and invoke OOM
3800         * killer to move on while costly can fail and users are ready
3801         * to cope with that. 1/4 retries is rather arbitrary but we
3802         * would need much more detailed feedback from compaction to
3803         * make a better decision.
3804         */
3805        if (order > PAGE_ALLOC_COSTLY_ORDER)
3806                max_retries /= 4;
3807        if (*compaction_retries <= max_retries) {
3808                ret = true;
3809                goto out;
3810        }
3811
3812        /*
3813         * Make sure there are attempts at the highest priority if we exhausted
3814         * all retries or failed at the lower priorities.
3815         */
3816check_priority:
3817        min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
3818                        MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
3819
3820        if (*compact_priority > min_priority) {
3821                (*compact_priority)--;
3822                *compaction_retries = 0;
3823                ret = true;
3824        }
3825out:
3826        trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
3827        return ret;
3828}
3829#else
3830static inline struct page *
3831__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3832                unsigned int alloc_flags, const struct alloc_context *ac,
3833                enum compact_priority prio, enum compact_result *compact_result)
3834{
3835        *compact_result = COMPACT_SKIPPED;
3836        return NULL;
3837}
3838
3839static inline bool
3840should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
3841                     enum compact_result compact_result,
3842                     enum compact_priority *compact_priority,
3843                     int *compaction_retries)
3844{
3845        struct zone *zone;
3846        struct zoneref *z;
3847
3848        if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
3849                return false;
3850
3851        /*
3852         * There are setups with compaction disabled which would prefer to loop
3853         * inside the allocator rather than hit the oom killer prematurely.
3854         * Let's give them a good hope and keep retrying while the order-0
3855         * watermarks are OK.
3856         */
3857        for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
3858                                        ac->nodemask) {
3859                if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
3860                                        ac_classzone_idx(ac), alloc_flags))
3861                        return true;
3862        }
3863        return false;
3864}
3865#endif /* CONFIG_COMPACTION */
3866
3867#ifdef CONFIG_LOCKDEP
3868static struct lockdep_map __fs_reclaim_map =
3869        STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
3870
3871static bool __need_fs_reclaim(gfp_t gfp_mask)
3872{
3873        gfp_mask = current_gfp_context(gfp_mask);
3874
3875        /* no reclaim without waiting on it */
3876        if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
3877                return false;
3878
3879        /* this guy won't enter reclaim */
3880        if (current->flags & PF_MEMALLOC)
3881                return false;
3882
3883        /* We're only interested __GFP_FS allocations for now */
3884        if (!(gfp_mask & __GFP_FS))
3885                return false;
3886
3887        if (gfp_mask & __GFP_NOLOCKDEP)
3888                return false;
3889
3890        return true;
3891}
3892
3893void __fs_reclaim_acquire(void)
3894{
3895        lock_map_acquire(&__fs_reclaim_map);
3896}
3897
3898void __fs_reclaim_release(void)
3899{
3900        lock_map_release(&__fs_reclaim_map);
3901}
3902
3903void fs_reclaim_acquire(gfp_t gfp_mask)
3904{
3905        if (__need_fs_reclaim(gfp_mask))
3906                __fs_reclaim_acquire();
3907}
3908EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
3909
3910void fs_reclaim_release(gfp_t gfp_mask)
3911{
3912        if (__need_fs_reclaim(gfp_mask))
3913                __fs_reclaim_release();
3914}
3915EXPORT_SYMBOL_GPL(fs_reclaim_release);
3916#endif
3917
3918/* Perform direct synchronous page reclaim */
3919static int
3920__perform_reclaim(gfp_t gfp_mask, unsigned int order,
3921                                        const struct alloc_context *ac)
3922{
3923        struct reclaim_state reclaim_state;
3924        int progress;
3925        unsigned int noreclaim_flag;
3926        unsigned long pflags;
3927
3928        cond_resched();
3929
3930        /* We now go into synchronous reclaim */
3931        cpuset_memory_pressure_bump();
3932        psi_memstall_enter(&pflags);
3933        fs_reclaim_acquire(gfp_mask);
3934        noreclaim_flag = memalloc_noreclaim_save();
3935        reclaim_state.reclaimed_slab = 0;
3936        current->reclaim_state = &reclaim_state;
3937
3938        progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
3939                                                                ac->nodemask);
3940
3941        current->reclaim_state = NULL;
3942        memalloc_noreclaim_restore(noreclaim_flag);
3943        fs_reclaim_release(gfp_mask);
3944        psi_memstall_leave(&pflags);
3945
3946        cond_resched();
3947
3948        return progress;
3949}
3950
3951/* The really slow allocator path where we enter direct reclaim */
3952static inline struct page *
3953__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
3954                unsigned int alloc_flags, const struct alloc_context *ac,
3955                unsigned long *did_some_progress)
3956{
3957        struct page *page = NULL;
3958        bool drained = false;
3959
3960        *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
3961        if (unlikely(!(*did_some_progress)))
3962                return NULL;
3963
3964retry:
3965        page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3966
3967        /*
3968         * If an allocation failed after direct reclaim, it could be because
3969         * pages are pinned on the per-cpu lists or in high alloc reserves.
3970         * Shrink them them and try again
3971         */
3972        if (!page && !drained) {
3973                unreserve_highatomic_pageblock(ac, false);
3974                drain_all_pages(NULL);
3975                drained = true;
3976                goto retry;
3977        }
3978
3979        return page;
3980}
3981
3982static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
3983                             const struct alloc_context *ac)
3984{
3985        struct zoneref *z;
3986        struct zone *zone;
3987        pg_data_t *last_pgdat = NULL;
3988        enum zone_type high_zoneidx = ac->high_zoneidx;
3989
3990        for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, high_zoneidx,
3991                                        ac->nodemask) {
3992                if (last_pgdat != zone->zone_pgdat)
3993                        wakeup_kswapd(zone, gfp_mask, order, high_zoneidx);
3994                last_pgdat = zone->zone_pgdat;
3995        }
3996}
3997
3998static inline unsigned int
3999gfp_to_alloc_flags(gfp_t gfp_mask)
4000{
4001        unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
4002
4003        /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
4004        BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
4005
4006        /*
4007         * The caller may dip into page reserves a bit more if the caller
4008         * cannot run direct reclaim, or if the caller has realtime scheduling
4009         * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
4010         * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
4011         */
4012        alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
4013
4014        if (gfp_mask & __GFP_ATOMIC) {
4015                /*
4016                 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
4017                 * if it can't schedule.
4018                 */
4019                if (!(gfp_mask & __GFP_NOMEMALLOC))
4020                        alloc_flags |= ALLOC_HARDER;
4021                /*
4022                 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
4023                 * comment for __cpuset_node_allowed().
4024                 */
4025                alloc_flags &= ~ALLOC_CPUSET;
4026        } else if (unlikely(rt_task(current)) && !in_interrupt())
4027                alloc_flags |= ALLOC_HARDER;
4028
4029        if (gfp_mask & __GFP_KSWAPD_RECLAIM)
4030                alloc_flags |= ALLOC_KSWAPD;
4031
4032#ifdef CONFIG_CMA
4033        if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
4034                alloc_flags |= ALLOC_CMA;
4035#endif
4036        return alloc_flags;
4037}
4038
4039static bool oom_reserves_allowed(struct task_struct *tsk)
4040{
4041        if (!tsk_is_oom_victim(tsk))
4042                return false;
4043
4044        /*
4045         * !MMU doesn't have oom reaper so give access to memory reserves
4046         * only to the thread with TIF_MEMDIE set
4047         */
4048        if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE))
4049                return false;
4050
4051        return true;
4052}
4053
4054/*
4055 * Distinguish requests which really need access to full memory
4056 * reserves from oom victims which can live with a portion of it
4057 */
4058static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
4059{
4060        if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
4061                return 0;
4062        if (gfp_mask & __GFP_MEMALLOC)
4063                return ALLOC_NO_WATERMARKS;
4064        if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
4065                return ALLOC_NO_WATERMARKS;
4066        if (!in_interrupt()) {
4067                if (current->flags & PF_MEMALLOC)
4068                        return ALLOC_NO_WATERMARKS;
4069                else if (oom_reserves_allowed(current))
4070                        return ALLOC_OOM;
4071        }
4072
4073        return 0;
4074}
4075
4076bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
4077{
4078        return !!__gfp_pfmemalloc_flags(gfp_mask);
4079}
4080
4081/*
4082 * Checks whether it makes sense to retry the reclaim to make a forward progress
4083 * for the given allocation request.
4084 *
4085 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
4086 * without success, or when we couldn't even meet the watermark if we
4087 * reclaimed all remaining pages on the LRU lists.
4088 *
4089 * Returns true if a retry is viable or false to enter the oom path.
4090 */
4091static inline bool
4092should_reclaim_retry(gfp_t gfp_mask, unsigned order,
4093                     struct alloc_context *ac, int alloc_flags,
4094                     bool did_some_progress, int *no_progress_loops)
4095{
4096        struct zone *zone;
4097        struct zoneref *z;
4098        bool ret = false;
4099
4100        /*
4101         * Costly allocations might have made a progress but this doesn't mean
4102         * their order will become available due to high fragmentation so
4103         * always increment the no progress counter for them
4104         */
4105        if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
4106                *no_progress_loops = 0;
4107        else
4108                (*no_progress_loops)++;
4109
4110        /*
4111         * Make sure we converge to OOM if we cannot make any progress
4112         * several times in the row.
4113         */
4114        if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
4115                /* Before OOM, exhaust highatomic_reserve */
4116                return unreserve_highatomic_pageblock(ac, true);
4117        }
4118
4119        /*
4120         * Keep reclaiming pages while there is a chance this will lead
4121         * somewhere.  If none of the target zones can satisfy our allocation
4122         * request even if all reclaimable pages are considered then we are
4123         * screwed and have to go OOM.
4124         */
4125        for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
4126                                        ac->nodemask) {
4127                unsigned long available;
4128                unsigned long reclaimable;
4129                unsigned long min_wmark = min_wmark_pages(zone);
4130                bool wmark;
4131
4132                available = reclaimable = zone_reclaimable_pages(zone);
4133                available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
4134
4135                /*
4136                 * Would the allocation succeed if we reclaimed all
4137                 * reclaimable pages?
4138                 */
4139                wmark = __zone_watermark_ok(zone, order, min_wmark,
4140                                ac_classzone_idx(ac), alloc_flags, available);
4141                trace_reclaim_retry_zone(z, order, reclaimable,
4142                                available, min_wmark, *no_progress_loops, wmark);
4143                if (wmark) {
4144                        /*
4145                         * If we didn't make any progress and have a lot of
4146                         * dirty + writeback pages then we should wait for
4147                         * an IO to complete to slow down the reclaim and
4148                         * prevent from pre mature OOM
4149                         */
4150                        if (!did_some_progress) {
4151                                unsigned long write_pending;
4152
4153                                write_pending = zone_page_state_snapshot(zone,
4154                                                        NR_ZONE_WRITE_PENDING);
4155
4156                                if (2 * write_pending > reclaimable) {
4157                                        congestion_wait(BLK_RW_ASYNC, HZ/10);
4158                                        return true;
4159                                }
4160                        }
4161
4162                        ret = true;
4163                        goto out;
4164                }
4165        }
4166
4167out:
4168        /*
4169         * Memory allocation/reclaim might be called from a WQ context and the
4170         * current implementation of the WQ concurrency control doesn't
4171         * recognize that a particular WQ is congested if the worker thread is
4172         * looping without ever sleeping. Therefore we have to do a short sleep
4173         * here rather than calling cond_resched().
4174         */
4175        if (current->flags & PF_WQ_WORKER)
4176                schedule_timeout_uninterruptible(1);
4177        else
4178                cond_resched();
4179        return ret;
4180}
4181
4182static inline bool
4183check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
4184{
4185        /*
4186         * It's possible that cpuset's mems_allowed and the nodemask from
4187         * mempolicy don't intersect. This should be normally dealt with by
4188         * policy_nodemask(), but it's possible to race with cpuset update in
4189         * such a way the check therein was true, and then it became false
4190         * before we got our cpuset_mems_cookie here.
4191         * This assumes that for all allocations, ac->nodemask can come only
4192         * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
4193         * when it does not intersect with the cpuset restrictions) or the
4194         * caller can deal with a violated nodemask.
4195         */
4196        if (cpusets_enabled() && ac->nodemask &&
4197                        !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
4198                ac->nodemask = NULL;
4199                return true;
4200        }
4201
4202        /*
4203         * When updating a task's mems_allowed or mempolicy nodemask, it is
4204         * possible to race with parallel threads in such a way that our
4205         * allocation can fail while the mask is being updated. If we are about
4206         * to fail, check if the cpuset changed during allocation and if so,
4207         * retry.
4208         */
4209        if (read_mems_allowed_retry(cpuset_mems_cookie))
4210                return true;
4211
4212        return false;
4213}
4214
4215static inline struct page *
4216__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
4217                                                struct alloc_context *ac)
4218{
4219        bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
4220        const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
4221        struct page *page = NULL;
4222        unsigned int alloc_flags;
4223        unsigned long did_some_progress;
4224        enum compact_priority compact_priority;
4225        enum compact_result compact_result;
4226        int compaction_retries;
4227        int no_progress_loops;
4228        unsigned int cpuset_mems_cookie;
4229        int reserve_flags;
4230
4231        /*
4232         * We also sanity check to catch abuse of atomic reserves being used by
4233         * callers that are not in atomic context.
4234         */
4235        if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
4236                                (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
4237                gfp_mask &= ~__GFP_ATOMIC;
4238
4239retry_cpuset:
4240        compaction_retries = 0;
4241        no_progress_loops = 0;
4242        compact_priority = DEF_COMPACT_PRIORITY;
4243        cpuset_mems_cookie = read_mems_allowed_begin();
4244
4245        /*
4246         * The fast path uses conservative alloc_flags to succeed only until
4247         * kswapd needs to be woken up, and to avoid the cost of setting up
4248         * alloc_flags precisely. So we do that now.
4249         */
4250        alloc_flags = gfp_to_alloc_flags(gfp_mask);
4251
4252        /*
4253         * We need to recalculate the starting point for the zonelist iterator
4254         * because we might have used different nodemask in the fast path, or
4255         * there was a cpuset modification and we are retrying - otherwise we
4256         * could end up iterating over non-eligible zones endlessly.
4257         */
4258        ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4259                                        ac->high_zoneidx, ac->nodemask);
4260        if (!ac->preferred_zoneref->zone)
4261                goto nopage;
4262
4263        if (alloc_flags & ALLOC_KSWAPD)
4264                wake_all_kswapds(order, gfp_mask, ac);
4265
4266        /*
4267         * The adjusted alloc_flags might result in immediate success, so try
4268         * that first
4269         */
4270        page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4271        if (page)
4272                goto got_pg;
4273
4274        /*
4275         * For costly allocations, try direct compaction first, as it's likely
4276         * that we have enough base pages and don't need to reclaim. For non-
4277         * movable high-order allocations, do that as well, as compaction will
4278         * try prevent permanent fragmentation by migrating from blocks of the
4279         * same migratetype.
4280         * Don't try this for allocations that are allowed to ignore
4281         * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
4282         */
4283        if (can_direct_reclaim &&
4284                        (costly_order ||
4285                           (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
4286                        && !gfp_pfmemalloc_allowed(gfp_mask)) {
4287                page = __alloc_pages_direct_compact(gfp_mask, order,
4288                                                alloc_flags, ac,
4289                                                INIT_COMPACT_PRIORITY,
4290                                                &compact_result);
4291                if (page)
4292                        goto got_pg;
4293
4294                /*
4295                 * Checks for costly allocations with __GFP_NORETRY, which
4296                 * includes THP page fault allocations
4297                 */
4298                if (costly_order && (gfp_mask & __GFP_NORETRY)) {
4299                        /*
4300                         * If compaction is deferred for high-order allocations,
4301                         * it is because sync compaction recently failed. If
4302                         * this is the case and the caller requested a THP
4303                         * allocation, we do not want to heavily disrupt the
4304                         * system, so we fail the allocation instead of entering
4305                         * direct reclaim.
4306                         */
4307                        if (compact_result == COMPACT_DEFERRED)
4308                                goto nopage;
4309
4310                        /*
4311                         * Looks like reclaim/compaction is worth trying, but
4312                         * sync compaction could be very expensive, so keep
4313                         * using async compaction.
4314                         */
4315                        compact_priority = INIT_COMPACT_PRIORITY;
4316                }
4317        }
4318
4319retry:
4320        /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
4321        if (alloc_flags & ALLOC_KSWAPD)
4322                wake_all_kswapds(order, gfp_mask, ac);
4323
4324        reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
4325        if (reserve_flags)
4326                alloc_flags = reserve_flags;
4327
4328        /*
4329         * Reset the nodemask and zonelist iterators if memory policies can be
4330         * ignored. These allocations are high priority and system rather than
4331         * user oriented.
4332         */
4333        if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
4334                ac->nodemask = NULL;
4335                ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4336                                        ac->high_zoneidx, ac->nodemask);
4337        }
4338
4339        /* Attempt with potentially adjusted zonelist and alloc_flags */
4340        page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4341        if (page)
4342                goto got_pg;
4343
4344        /* Caller is not willing to reclaim, we can't balance anything */
4345        if (!can_direct_reclaim)
4346                goto nopage;
4347
4348        /* Avoid recursion of direct reclaim */
4349        if (current->flags & PF_MEMALLOC)
4350                goto nopage;
4351
4352        /* Try direct reclaim and then allocating */
4353        page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
4354                                                        &did_some_progress);
4355        if (page)
4356                goto got_pg;
4357
4358        /* Try direct compaction and then allocating */
4359        page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
4360                                        compact_priority, &compact_result);
4361        if (page)
4362                goto got_pg;
4363
4364        /* Do not loop if specifically requested */
4365        if (gfp_mask & __GFP_NORETRY)
4366                goto nopage;
4367
4368        /*
4369         * Do not retry costly high order allocations unless they are
4370         * __GFP_RETRY_MAYFAIL
4371         */
4372        if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
4373                goto nopage;
4374
4375        if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
4376                                 did_some_progress > 0, &no_progress_loops))
4377                goto retry;
4378
4379        /*
4380         * It doesn't make any sense to retry for the compaction if the order-0
4381         * reclaim is not able to make any progress because the current
4382         * implementation of the compaction depends on the sufficient amount
4383         * of free memory (see __compaction_suitable)
4384         */
4385        if (did_some_progress > 0 &&
4386                        should_compact_retry(ac, order, alloc_flags,
4387                                compact_result, &compact_priority,
4388                                &compaction_retries))
4389                goto retry;
4390
4391
4392        /* Deal with possible cpuset update races before we start OOM killing */
4393        if (check_retry_cpuset(cpuset_mems_cookie, ac))
4394                goto retry_cpuset;
4395
4396        /* Reclaim has failed us, start killing things */
4397        page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
4398        if (page)
4399                goto got_pg;
4400
4401        /* Avoid allocations with no watermarks from looping endlessly */
4402        if (tsk_is_oom_victim(current) &&
4403            (alloc_flags == ALLOC_OOM ||
4404             (gfp_mask & __GFP_NOMEMALLOC)))
4405                goto nopage;
4406
4407        /* Retry as long as the OOM killer is making progress */
4408        if (did_some_progress) {
4409                no_progress_loops = 0;
4410                goto retry;
4411        }
4412
4413nopage:
4414        /* Deal with possible cpuset update races before we fail */
4415        if (check_retry_cpuset(cpuset_mems_cookie, ac))
4416                goto retry_cpuset;
4417
4418        /*
4419         * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
4420         * we always retry
4421         */
4422        if (gfp_mask & __GFP_NOFAIL) {
4423                /*
4424                 * All existing users of the __GFP_NOFAIL are blockable, so warn
4425                 * of any new users that actually require GFP_NOWAIT
4426                 */
4427                if (WARN_ON_ONCE(!can_direct_reclaim))
4428                        goto fail;
4429
4430                /*
4431                 * PF_MEMALLOC request from this context is rather bizarre
4432                 * because we cannot reclaim anything and only can loop waiting
4433                 * for somebody to do a work for us
4434                 */
4435                WARN_ON_ONCE(current->flags & PF_MEMALLOC);
4436
4437                /*
4438                 * non failing costly orders are a hard requirement which we
4439                 * are not prepared for much so let's warn about these users
4440                 * so that we can identify them and convert them to something
4441                 * else.
4442                 */
4443                WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
4444
4445                /*
4446                 * Help non-failing allocations by giving them access to memory
4447                 * reserves but do not use ALLOC_NO_WATERMARKS because this
4448                 * could deplete whole memory reserves which would just make
4449                 * the situation worse
4450                 */
4451                page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
4452                if (page)
4453                        goto got_pg;
4454
4455                cond_resched();
4456                goto retry;
4457        }
4458fail:
4459        warn_alloc(gfp_mask, ac->nodemask,
4460                        "page allocation failure: order:%u", order);
4461got_pg:
4462        return page;
4463}
4464
4465static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
4466                int preferred_nid, nodemask_t *nodemask,
4467                struct alloc_context *ac, gfp_t *alloc_mask,
4468                unsigned int *alloc_flags)
4469{
4470        ac->high_zoneidx = gfp_zone(gfp_mask);
4471        ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
4472        ac->nodemask = nodemask;
4473        ac->migratetype = gfpflags_to_migratetype(gfp_mask);
4474
4475        if (cpusets_enabled()) {
4476                *alloc_mask |= __GFP_HARDWALL;
4477                if (!ac->nodemask)
4478                        ac->nodemask = &cpuset_current_mems_allowed;
4479                else
4480                        *alloc_flags |= ALLOC_CPUSET;
4481        }
4482
4483        fs_reclaim_acquire(gfp_mask);
4484        fs_reclaim_release(gfp_mask);
4485
4486        might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
4487
4488        if (should_fail_alloc_page(gfp_mask, order))
4489                return false;
4490
4491        if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE)
4492                *alloc_flags |= ALLOC_CMA;
4493
4494        return true;
4495}
4496
4497/* Determine whether to spread dirty pages and what the first usable zone */
4498static inline void finalise_ac(gfp_t gfp_mask, struct alloc_context *ac)
4499{
4500        /* Dirty zone balancing only done in the fast path */
4501        ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
4502
4503        /*
4504         * The preferred zone is used for statistics but crucially it is
4505         * also used as the starting point for the zonelist iterator. It
4506         * may get reset for allocations that ignore memory policies.
4507         */
4508        ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4509                                        ac->high_zoneidx, ac->nodemask);
4510}
4511
4512/*
4513 * This is the 'heart' of the zoned buddy allocator.
4514 */
4515struct page *
4516__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
4517                                                        nodemask_t *nodemask)
4518{
4519        struct page *page;
4520        unsigned int alloc_flags = ALLOC_WMARK_LOW;
4521        gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
4522        struct alloc_context ac = { };
4523
4524        /*
4525         * There are several places where we assume that the order value is sane
4526         * so bail out early if the request is out of bound.
4527         */
4528        if (unlikely(order >= MAX_ORDER)) {
4529                WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
4530                return NULL;
4531        }
4532
4533        gfp_mask &= gfp_allowed_mask;
4534        alloc_mask = gfp_mask;
4535        if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
4536                return NULL;
4537
4538        finalise_ac(gfp_mask, &ac);
4539
4540        /*
4541         * Forbid the first pass from falling back to types that fragment
4542         * memory until all local zones are considered.
4543         */
4544        alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask);
4545
4546        /* First allocation attempt */
4547        page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
4548        if (likely(page))
4549                goto out;
4550
4551        /*
4552         * Apply scoped allocation constraints. This is mainly about GFP_NOFS
4553         * resp. GFP_NOIO which has to be inherited for all allocation requests
4554         * from a particular context which has been marked by
4555         * memalloc_no{fs,io}_{save,restore}.
4556         */
4557        alloc_mask = current_gfp_context(gfp_mask);
4558        ac.spread_dirty_pages = false;
4559
4560        /*
4561         * Restore the original nodemask if it was potentially replaced with
4562         * &cpuset_current_mems_allowed to optimize the fast-path attempt.
4563         */
4564        if (unlikely(ac.nodemask != nodemask))
4565                ac.nodemask = nodemask;
4566
4567        page = __alloc_pages_slowpath(alloc_mask, order, &ac);
4568
4569out:
4570        if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
4571            unlikely(memcg_kmem_charge(page, gfp_mask, order) != 0)) {
4572                __free_pages(page, order);
4573                page = NULL;
4574        }
4575
4576        trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
4577
4578        return page;
4579}
4580EXPORT_SYMBOL(__alloc_pages_nodemask);
4581
4582/*
4583 * Common helper functions. Never use with __GFP_HIGHMEM because the returned
4584 * address cannot represent highmem pages. Use alloc_pages and then kmap if
4585 * you need to access high mem.
4586 */
4587unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
4588{
4589        struct page *page;
4590
4591        page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order);
4592        if (!page)
4593                return 0;
4594        return (unsigned long) page_address(page);
4595}
4596EXPORT_SYMBOL(__get_free_pages);
4597
4598unsigned long get_zeroed_page(gfp_t gfp_mask)
4599{
4600        return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
4601}
4602EXPORT_SYMBOL(get_zeroed_page);
4603
4604static inline void free_the_page(struct page *page, unsigned int order)
4605{
4606        if (order == 0)         /* Via pcp? */
4607                free_unref_page(page);
4608        else
4609                __free_pages_ok(page, order);
4610}
4611
4612void __free_pages(struct page *page, unsigned int order)
4613{
4614        if (put_page_testzero(page))
4615                free_the_page(page, order);
4616}
4617EXPORT_SYMBOL(__free_pages);
4618
4619void free_pages(unsigned long addr, unsigned int order)
4620{
4621        if (addr != 0) {
4622                VM_BUG_ON(!virt_addr_valid((void *)addr));
4623                __free_pages(virt_to_page((void *)addr), order);
4624        }
4625}
4626
4627EXPORT_SYMBOL(free_pages);
4628
4629/*
4630 * Page Fragment:
4631 *  An arbitrary-length arbitrary-offset area of memory which resides
4632 *  within a 0 or higher order page.  Multiple fragments within that page
4633 *  are individually refcounted, in the page's reference counter.
4634 *
4635 * The page_frag functions below provide a simple allocation framework for
4636 * page fragments.  This is used by the network stack and network device
4637 * drivers to provide a backing region of memory for use as either an
4638 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
4639 */
4640static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
4641                                             gfp_t gfp_mask)
4642{
4643        struct page *page = NULL;
4644        gfp_t gfp = gfp_mask;
4645
4646#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4647        gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
4648                    __GFP_NOMEMALLOC;
4649        page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
4650                                PAGE_FRAG_CACHE_MAX_ORDER);
4651        nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
4652#endif
4653        if (unlikely(!page))
4654                page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
4655
4656        nc->va = page ? page_address(page) : NULL;
4657
4658        return page;
4659}
4660
4661void __page_frag_cache_drain(struct page *page, unsigned int count)
4662{
4663        VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
4664
4665        if (page_ref_sub_and_test(page, count))
4666                free_the_page(page, compound_order(page));
4667}
4668EXPORT_SYMBOL(__page_frag_cache_drain);
4669
4670void *page_frag_alloc(struct page_frag_cache *nc,
4671                      unsigned int fragsz, gfp_t gfp_mask)
4672{
4673        unsigned int size = PAGE_SIZE;
4674        struct page *page;
4675        int offset;
4676
4677        if (unlikely(!nc->va)) {
4678refill:
4679                page = __page_frag_cache_refill(nc, gfp_mask);
4680                if (!page)
4681                        return NULL;
4682
4683#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4684                /* if size can vary use size else just use PAGE_SIZE */
4685                size = nc->size;
4686#endif
4687                /* Even if we own the page, we do not use atomic_set().
4688                 * This would break get_page_unless_zero() users.
4689                 */
4690                page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
4691
4692                /* reset page count bias and offset to start of new frag */
4693                nc->pfmemalloc = page_is_pfmemalloc(page);
4694                nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
4695                nc->offset = size;
4696        }
4697
4698        offset = nc->offset - fragsz;
4699        if (unlikely(offset < 0)) {
4700                page = virt_to_page(nc->va);
4701
4702                if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
4703                        goto refill;
4704
4705#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4706                /* if size can vary use size else just use PAGE_SIZE */
4707                size = nc->size;
4708#endif
4709                /* OK, page count is 0, we can safely set it */
4710                set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
4711
4712                /* reset page count bias and offset to start of new frag */
4713                nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
4714                offset = size - fragsz;
4715        }
4716
4717        nc->pagecnt_bias--;
4718        nc->offset = offset;
4719
4720        return nc->va + offset;
4721}
4722EXPORT_SYMBOL(page_frag_alloc);
4723
4724/*
4725 * Frees a page fragment allocated out of either a compound or order 0 page.
4726 */
4727void page_frag_free(void *addr)
4728{
4729        struct page *page = virt_to_head_page(addr);
4730
4731        if (unlikely(put_page_testzero(page)))
4732                free_the_page(page, compound_order(page));
4733}
4734EXPORT_SYMBOL(page_frag_free);
4735
4736static void *make_alloc_exact(unsigned long addr, unsigned int order,
4737                size_t size)
4738{
4739        if (addr) {
4740                unsigned long alloc_end = addr + (PAGE_SIZE << order);
4741                unsigned long used = addr + PAGE_ALIGN(size);
4742
4743                split_page(virt_to_page((void *)addr), order);
4744                while (used < alloc_end) {
4745                        free_page(used);
4746                        used += PAGE_SIZE;
4747                }
4748        }
4749        return (void *)addr;
4750}
4751
4752/**
4753 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
4754 * @size: the number of bytes to allocate
4755 * @gfp_mask: GFP flags for the allocation
4756 *
4757 * This function is similar to alloc_pages(), except that it allocates the
4758 * minimum number of pages to satisfy the request.  alloc_pages() can only
4759 * allocate memory in power-of-two pages.
4760 *
4761 * This function is also limited by MAX_ORDER.
4762 *
4763 * Memory allocated by this function must be released by free_pages_exact().
4764 */
4765void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
4766{
4767        unsigned int order = get_order(size);
4768        unsigned long addr;
4769
4770        addr = __get_free_pages(gfp_mask, order);
4771        return make_alloc_exact(addr, order, size);
4772}
4773EXPORT_SYMBOL(alloc_pages_exact);
4774
4775/**
4776 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
4777 *                         pages on a node.
4778 * @nid: the preferred node ID where memory should be allocated
4779 * @size: the number of bytes to allocate
4780 * @gfp_mask: GFP flags for the allocation
4781 *
4782 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
4783 * back.
4784 */
4785void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
4786{
4787        unsigned int order = get_order(size);
4788        struct page *p = alloc_pages_node(nid, gfp_mask, order);
4789        if (!p)
4790                return NULL;
4791        return make_alloc_exact((unsigned long)page_address(p), order, size);
4792}
4793
4794/**
4795 * free_pages_exact - release memory allocated via alloc_pages_exact()
4796 * @virt: the value returned by alloc_pages_exact.
4797 * @size: size of allocation, same value as passed to alloc_pages_exact().
4798 *
4799 * Release the memory allocated by a previous call to alloc_pages_exact.
4800 */
4801void free_pages_exact(void *virt, size_t size)
4802{
4803        unsigned long addr = (unsigned long)virt;
4804        unsigned long end = addr + PAGE_ALIGN(size);
4805
4806        while (addr < end) {
4807                free_page(addr);
4808                addr += PAGE_SIZE;
4809        }
4810}
4811EXPORT_SYMBOL(free_pages_exact);
4812
4813/**
4814 * nr_free_zone_pages - count number of pages beyond high watermark
4815 * @offset: The zone index of the highest zone
4816 *
4817 * nr_free_zone_pages() counts the number of counts pages which are beyond the
4818 * high watermark within all zones at or below a given zone index.  For each
4819 * zone, the number of pages is calculated as:
4820 *
4821 *     nr_free_zone_pages = managed_pages - high_pages
4822 */
4823static unsigned long nr_free_zone_pages(int offset)
4824{
4825        struct zoneref *z;
4826        struct zone *zone;
4827
4828        /* Just pick one node, since fallback list is circular */
4829        unsigned long sum = 0;
4830
4831        struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
4832
4833        for_each_zone_zonelist(zone, z, zonelist, offset) {
4834                unsigned long size = zone_managed_pages(zone);
4835                unsigned long high = high_wmark_pages(zone);
4836                if (size > high)
4837                        sum += size - high;
4838        }
4839
4840        return sum;
4841}
4842
4843/**
4844 * nr_free_buffer_pages - count number of pages beyond high watermark
4845 *
4846 * nr_free_buffer_pages() counts the number of pages which are beyond the high
4847 * watermark within ZONE_DMA and ZONE_NORMAL.
4848 */
4849unsigned long nr_free_buffer_pages(void)
4850{
4851        return nr_free_zone_pages(gfp_zone(GFP_USER));
4852}
4853EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
4854
4855/**
4856 * nr_free_pagecache_pages - count number of pages beyond high watermark
4857 *
4858 * nr_free_pagecache_pages() counts the number of pages which are beyond the
4859 * high watermark within all zones.
4860 */
4861unsigned long nr_free_pagecache_pages(void)
4862{
4863        return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
4864}
4865
4866static inline void show_node(struct zone *zone)
4867{
4868        if (IS_ENABLED(CONFIG_NUMA))
4869                printk("Node %d ", zone_to_nid(zone));
4870}
4871
4872long si_mem_available(void)
4873{
4874        long available;
4875        unsigned long pagecache;
4876        unsigned long wmark_low = 0;
4877        unsigned long pages[NR_LRU_LISTS];
4878        unsigned long reclaimable;
4879        struct zone *zone;
4880        int lru;
4881
4882        for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
4883                pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
4884
4885        for_each_zone(zone)
4886                wmark_low += low_wmark_pages(zone);
4887
4888        /*
4889         * Estimate the amount of memory available for userspace allocations,
4890         * without causing swapping.
4891         */
4892        available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
4893
4894        /*
4895         * Not all the page cache can be freed, otherwise the system will
4896         * start swapping. Assume at least half of the page cache, or the
4897         * low watermark worth of cache, needs to stay.
4898         */
4899        pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
4900        pagecache -= min(pagecache / 2, wmark_low);
4901        available += pagecache;
4902
4903        /*
4904         * Part of the reclaimable slab and other kernel memory consists of
4905         * items that are in use, and cannot be freed. Cap this estimate at the
4906         * low watermark.
4907         */
4908        reclaimable = global_node_page_state(NR_SLAB_RECLAIMABLE) +
4909                        global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
4910        available += reclaimable - min(reclaimable / 2, wmark_low);
4911
4912        if (available < 0)
4913                available = 0;
4914        return available;
4915}
4916EXPORT_SYMBOL_GPL(si_mem_available);
4917
4918void si_meminfo(struct sysinfo *val)
4919{
4920        val->totalram = totalram_pages();
4921        val->sharedram = global_node_page_state(NR_SHMEM);
4922        val->freeram = global_zone_page_state(NR_FREE_PAGES);
4923        val->bufferram = nr_blockdev_pages();
4924        val->totalhigh = totalhigh_pages();
4925        val->freehigh = nr_free_highpages();
4926        val->mem_unit = PAGE_SIZE;
4927}
4928
4929EXPORT_SYMBOL(si_meminfo);
4930
4931#ifdef CONFIG_NUMA
4932void si_meminfo_node(struct sysinfo *val, int nid)
4933{
4934        int zone_type;          /* needs to be signed */
4935        unsigned long managed_pages = 0;
4936        unsigned long managed_highpages = 0;
4937        unsigned long free_highpages = 0;
4938        pg_data_t *pgdat = NODE_DATA(nid);
4939
4940        for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
4941                managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]);
4942        val->totalram = managed_pages;
4943        val->sharedram = node_page_state(pgdat, NR_SHMEM);
4944        val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
4945#ifdef CONFIG_HIGHMEM
4946        for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
4947                struct zone *zone = &pgdat->node_zones[zone_type];
4948
4949                if (is_highmem(zone)) {
4950                        managed_highpages += zone_managed_pages(zone);
4951                        free_highpages += zone_page_state(zone, NR_FREE_PAGES);
4952                }
4953        }
4954        val->totalhigh = managed_highpages;
4955        val->freehigh = free_highpages;
4956#else
4957        val->totalhigh = managed_highpages;
4958        val->freehigh = free_highpages;
4959#endif
4960        val->mem_unit = PAGE_SIZE;
4961}
4962#endif
4963
4964/*
4965 * Determine whether the node should be displayed or not, depending on whether
4966 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
4967 */
4968static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
4969{
4970        if (!(flags & SHOW_MEM_FILTER_NODES))
4971                return false;
4972
4973        /*
4974         * no node mask - aka implicit memory numa policy. Do not bother with
4975         * the synchronization - read_mems_allowed_begin - because we do not
4976         * have to be precise here.
4977         */
4978        if (!nodemask)
4979                nodemask = &cpuset_current_mems_allowed;
4980
4981        return !node_isset(nid, *nodemask);
4982}
4983
4984#define K(x) ((x) << (PAGE_SHIFT-10))
4985
4986static void show_migration_types(unsigned char type)
4987{
4988        static const char types[MIGRATE_TYPES] = {
4989                [MIGRATE_UNMOVABLE]     = 'U',
4990                [MIGRATE_MOVABLE]       = 'M',
4991                [MIGRATE_RECLAIMABLE]   = 'E',
4992                [MIGRATE_HIGHATOMIC]    = 'H',
4993#ifdef CONFIG_CMA
4994                [MIGRATE_CMA]           = 'C',
4995#endif
4996#ifdef CONFIG_MEMORY_ISOLATION
4997                [MIGRATE_ISOLATE]       = 'I',
4998#endif
4999        };
5000        char tmp[MIGRATE_TYPES + 1];
5001        char *p = tmp;
5002        int i;
5003
5004        for (i = 0; i < MIGRATE_TYPES; i++) {
5005                if (type & (1 << i))
5006                        *p++ = types[i];
5007        }
5008
5009        *p = '\0';
5010        printk(KERN_CONT "(%s) ", tmp);
5011}
5012
5013/*
5014 * Show free area list (used inside shift_scroll-lock stuff)
5015 * We also calculate the percentage fragmentation. We do this by counting the
5016 * memory on each free list with the exception of the first item on the list.
5017 *
5018 * Bits in @filter:
5019 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
5020 *   cpuset.
5021 */
5022void show_free_areas(unsigned int filter, nodemask_t *nodemask)
5023{
5024        unsigned long free_pcp = 0;
5025        int cpu;
5026        struct zone *zone;
5027        pg_data_t *pgdat;
5028
5029        for_each_populated_zone(zone) {
5030                if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
5031                        continue;
5032
5033                for_each_online_cpu(cpu)
5034                        free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
5035        }
5036
5037        printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
5038                " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
5039                " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"
5040                " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
5041                " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
5042                " free:%lu free_pcp:%lu free_cma:%lu\n",
5043                global_node_page_state(NR_ACTIVE_ANON),
5044                global_node_page_state(NR_INACTIVE_ANON),
5045                global_node_page_state(NR_ISOLATED_ANON),
5046                global_node_page_state(NR_ACTIVE_FILE),
5047                global_node_page_state(NR_INACTIVE_FILE),
5048                global_node_page_state(NR_ISOLATED_FILE),
5049                global_node_page_state(NR_UNEVICTABLE),
5050                global_node_page_state(NR_FILE_DIRTY),
5051                global_node_page_state(NR_WRITEBACK),
5052                global_node_page_state(NR_UNSTABLE_NFS),
5053                global_node_page_state(NR_SLAB_RECLAIMABLE),
5054                global_node_page_state(NR_SLAB_UNRECLAIMABLE),
5055                global_node_page_state(NR_FILE_MAPPED),
5056                global_node_page_state(NR_SHMEM),
5057                global_zone_page_state(NR_PAGETABLE),
5058                global_zone_page_state(NR_BOUNCE),
5059                global_zone_page_state(NR_FREE_PAGES),
5060                free_pcp,
5061                global_zone_page_state(NR_FREE_CMA_PAGES));
5062
5063        for_each_online_pgdat(pgdat) {
5064                if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
5065                        continue;
5066
5067                printk("Node %d"
5068                        " active_anon:%lukB"
5069                        " inactive_anon:%lukB"
5070                        " active_file:%lukB"
5071                        " inactive_file:%lukB"
5072                        " unevictable:%lukB"
5073                        " isolated(anon):%lukB"
5074                        " isolated(file):%lukB"
5075                        " mapped:%lukB"
5076                        " dirty:%lukB"
5077                        " writeback:%lukB"
5078                        " shmem:%lukB"
5079#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5080                        " shmem_thp: %lukB"
5081                        " shmem_pmdmapped: %lukB"
5082                        " anon_thp: %lukB"
5083#endif
5084                        " writeback_tmp:%lukB"
5085                        " unstable:%lukB"
5086                        " all_unreclaimable? %s"
5087                        "\n",
5088                        pgdat->node_id,
5089                        K(node_page_state(pgdat, NR_ACTIVE_ANON)),
5090                        K(node_page_state(pgdat, NR_INACTIVE_ANON)),
5091                        K(node_page_state(pgdat, NR_ACTIVE_FILE)),
5092                        K(node_page_state(pgdat, NR_INACTIVE_FILE)),
5093                        K(node_page_state(pgdat, NR_UNEVICTABLE)),
5094                        K(node_page_state(pgdat, NR_ISOLATED_ANON)),
5095                        K(node_page_state(pgdat, NR_ISOLATED_FILE)),
5096                        K(node_page_state(pgdat, NR_FILE_MAPPED)),
5097                        K(node_page_state(pgdat, NR_FILE_DIRTY)),
5098                        K(node_page_state(pgdat, NR_WRITEBACK)),
5099                        K(node_page_state(pgdat, NR_SHMEM)),
5100#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5101                        K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
5102                        K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
5103                                        * HPAGE_PMD_NR),
5104                        K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
5105#endif
5106                        K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
5107                        K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
5108                        pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
5109                                "yes" : "no");
5110        }
5111
5112        for_each_populated_zone(zone) {
5113                int i;
5114
5115                if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
5116                        continue;
5117
5118                free_pcp = 0;
5119                for_each_online_cpu(cpu)
5120                        free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
5121
5122                show_node(zone);
5123                printk(KERN_CONT
5124                        "%s"
5125                        " free:%lukB"
5126                        " min:%lukB"
5127                        " low:%lukB"
5128                        " high:%lukB"
5129                        " active_anon:%lukB"
5130                        " inactive_anon:%lukB"
5131                        " active_file:%lukB"
5132                        " inactive_file:%lukB"
5133                        " unevictable:%lukB"
5134                        " writepending:%lukB"
5135                        " present:%lukB"
5136                        " managed:%lukB"
5137                        " mlocked:%lukB"
5138                        " kernel_stack:%lukB"
5139                        " pagetables:%lukB"
5140                        " bounce:%lukB"
5141                        " free_pcp:%lukB"
5142                        " local_pcp:%ukB"
5143                        " free_cma:%lukB"
5144                        "\n",
5145                        zone->name,
5146                        K(zone_page_state(zone, NR_FREE_PAGES)),
5147                        K(min_wmark_pages(zone)),
5148                        K(low_wmark_pages(zone)),
5149                        K(high_wmark_pages(zone)),
5150                        K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
5151                        K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
5152                        K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
5153                        K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
5154                        K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
5155                        K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
5156                        K(zone->present_pages),
5157                        K(zone_managed_pages(zone)),
5158                        K(zone_page_state(zone, NR_MLOCK)),
5159                        zone_page_state(zone, NR_KERNEL_STACK_KB),
5160                        K(zone_page_state(zone, NR_PAGETABLE)),
5161                        K(zone_page_state(zone, NR_BOUNCE)),
5162                        K(free_pcp),
5163                        K(this_cpu_read(zone->pageset->pcp.count)),
5164                        K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
5165                printk("lowmem_reserve[]:");
5166                for (i = 0; i < MAX_NR_ZONES; i++)
5167                        printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
5168                printk(KERN_CONT "\n");
5169        }
5170
5171        for_each_populated_zone(zone) {
5172                unsigned int order;
5173                unsigned long nr[MAX_ORDER], flags, total = 0;
5174                unsigned char types[MAX_ORDER];
5175
5176                if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
5177                        continue;
5178                show_node(zone);
5179                printk(KERN_CONT "%s: ", zone->name);
5180
5181                spin_lock_irqsave(&zone->lock, flags);
5182                for (order = 0; order < MAX_ORDER; order++) {
5183                        struct free_area *area = &zone->free_area[order];
5184                        int type;
5185
5186                        nr[order] = area->nr_free;
5187                        total += nr[order] << order;
5188
5189                        types[order] = 0;
5190                        for (type = 0; type < MIGRATE_TYPES; type++) {
5191                                if (!list_empty(&area->free_list[type]))
5192                                        types[order] |= 1 << type;
5193                        }
5194                }
5195                spin_unlock_irqrestore(&zone->lock, flags);
5196                for (order = 0; order < MAX_ORDER; order++) {
5197                        printk(KERN_CONT "%lu*%lukB ",
5198                               nr[order], K(1UL) << order);
5199                        if (nr[order])
5200                                show_migration_types(types[order]);
5201                }
5202                printk(KERN_CONT "= %lukB\n", K(total));
5203        }
5204
5205        hugetlb_show_meminfo();
5206
5207        printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
5208
5209        show_swap_cache_info();
5210}
5211
5212static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
5213{
5214        zoneref->zone = zone;
5215        zoneref->zone_idx = zone_idx(zone);
5216}
5217
5218/*
5219 * Builds allocation fallback zone lists.
5220 *
5221 * Add all populated zones of a node to the zonelist.
5222 */
5223static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
5224{
5225        struct zone *zone;
5226        enum zone_type zone_type = MAX_NR_ZONES;
5227        int nr_zones = 0;
5228
5229        do {
5230                zone_type--;
5231                zone = pgdat->node_zones + zone_type;
5232                if (managed_zone(zone)) {
5233                        zoneref_set_zone(zone, &zonerefs[nr_zones++]);
5234                        check_highest_zone(zone_type);
5235                }
5236        } while (zone_type);
5237
5238        return nr_zones;
5239}
5240
5241#ifdef CONFIG_NUMA
5242
5243static int __parse_numa_zonelist_order(char *s)
5244{
5245        /*
5246         * We used to support different zonlists modes but they turned
5247         * out to be just not useful. Let's keep the warning in place
5248         * if somebody still use the cmd line parameter so that we do
5249         * not fail it silently
5250         */
5251        if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) {
5252                pr_warn("Ignoring unsupported numa_zonelist_order value:  %s\n", s);
5253                return -EINVAL;
5254        }
5255        return 0;
5256}
5257
5258static __init int setup_numa_zonelist_order(char *s)
5259{
5260        if (!s)
5261                return 0;
5262
5263        return __parse_numa_zonelist_order(s);
5264}
5265early_param("numa_zonelist_order", setup_numa_zonelist_order);
5266
5267char numa_zonelist_order[] = "Node";
5268
5269/*
5270 * sysctl handler for numa_zonelist_order
5271 */
5272int numa_zonelist_order_handler(struct ctl_table *table, int write,
5273                void __user *buffer, size_t *length,
5274                loff_t *ppos)
5275{
5276        char *str;
5277        int ret;
5278
5279        if (!write)
5280                return proc_dostring(table, write, buffer, length, ppos);
5281        str = memdup_user_nul(buffer, 16);
5282        if (IS_ERR(str))
5283                return PTR_ERR(str);
5284
5285        ret = __parse_numa_zonelist_order(str);
5286        kfree(str);
5287        return ret;
5288}
5289
5290
5291#define MAX_NODE_LOAD (nr_online_nodes)
5292static int node_load[MAX_NUMNODES];
5293
5294/**
5295 * find_next_best_node - find the next node that should appear in a given node's fallback list
5296 * @node: node whose fallback list we're appending
5297 * @used_node_mask: nodemask_t of already used nodes
5298 *
5299 * We use a number of factors to determine which is the next node that should
5300 * appear on a given node's fallback list.  The node should not have appeared
5301 * already in @node's fallback list, and it should be the next closest node
5302 * according to the distance array (which contains arbitrary distance values
5303 * from each node to each node in the system), and should also prefer nodes
5304 * with no CPUs, since presumably they'll have very little allocation pressure
5305 * on them otherwise.
5306 * It returns -1 if no node is found.
5307 */
5308static int find_next_best_node(int node, nodemask_t *used_node_mask)
5309{
5310        int n, val;
5311        int min_val = INT_MAX;
5312        int best_node = NUMA_NO_NODE;
5313        const struct cpumask *tmp = cpumask_of_node(0);
5314
5315        /* Use the local node if we haven't already */
5316        if (!node_isset(node, *used_node_mask)) {
5317                node_set(node, *used_node_mask);
5318                return node;
5319        }
5320
5321        for_each_node_state(n, N_MEMORY) {
5322
5323                /* Don't want a node to appear more than once */
5324                if (node_isset(n, *used_node_mask))
5325                        continue;
5326
5327                /* Use the distance array to find the distance */
5328                val = node_distance(node, n);
5329
5330                /* Penalize nodes under us ("prefer the next node") */
5331                val += (n < node);
5332
5333                /* Give preference to headless and unused nodes */
5334                tmp = cpumask_of_node(n);
5335                if (!cpumask_empty(tmp))
5336                        val += PENALTY_FOR_NODE_WITH_CPUS;
5337
5338                /* Slight preference for less loaded node */
5339                val *= (MAX_NODE_LOAD*MAX_NUMNODES);
5340                val += node_load[n];
5341
5342                if (val < min_val) {
5343                        min_val = val;
5344                        best_node = n;
5345                }
5346        }
5347
5348        if (best_node >= 0)
5349                node_set(best_node, *used_node_mask);
5350
5351        return best_node;
5352}
5353
5354
5355/*
5356 * Build zonelists ordered by node and zones within node.
5357 * This results in maximum locality--normal zone overflows into local
5358 * DMA zone, if any--but risks exhausting DMA zone.
5359 */
5360static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order,
5361                unsigned nr_nodes)
5362{
5363        struct zoneref *zonerefs;
5364        int i;
5365
5366        zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5367
5368        for (i = 0; i < nr_nodes; i++) {
5369                int nr_zones;
5370
5371                pg_data_t *node = NODE_DATA(node_order[i]);
5372
5373                nr_zones = build_zonerefs_node(node, zonerefs);
5374                zonerefs += nr_zones;
5375        }
5376        zonerefs->zone = NULL;
5377        zonerefs->zone_idx = 0;
5378}
5379
5380/*
5381 * Build gfp_thisnode zonelists
5382 */
5383static void build_thisnode_zonelists(pg_data_t *pgdat)
5384{
5385        struct zoneref *zonerefs;
5386        int nr_zones;
5387
5388        zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
5389        nr_zones = build_zonerefs_node(pgdat, zonerefs);
5390        zonerefs += nr_zones;
5391        zonerefs->zone = NULL;
5392        zonerefs->zone_idx = 0;
5393}
5394
5395/*
5396 * Build zonelists ordered by zone and nodes within zones.
5397 * This results in conserving DMA zone[s] until all Normal memory is
5398 * exhausted, but results in overflowing to remote node while memory
5399 * may still exist in local DMA zone.
5400 */
5401
5402static void build_zonelists(pg_data_t *pgdat)
5403{
5404        static int node_order[MAX_NUMNODES];
5405        int node, load, nr_nodes = 0;
5406        nodemask_t used_mask;
5407        int local_node, prev_node;
5408
5409        /* NUMA-aware ordering of nodes */
5410        local_node = pgdat->node_id;
5411        load = nr_online_nodes;
5412        prev_node = local_node;
5413        nodes_clear(used_mask);
5414
5415        memset(node_order, 0, sizeof(node_order));
5416        while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
5417                /*
5418                 * We don't want to pressure a particular node.
5419                 * So adding penalty to the first node in same
5420                 * distance group to make it round-robin.
5421                 */
5422                if (node_distance(local_node, node) !=
5423                    node_distance(local_node, prev_node))
5424                        node_load[node] = load;
5425
5426                node_order[nr_nodes++] = node;
5427                prev_node = node;
5428                load--;
5429        }
5430
5431        build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
5432        build_thisnode_zonelists(pgdat);
5433}
5434
5435#ifdef CONFIG_HAVE_MEMORYLESS_NODES
5436/*
5437 * Return node id of node used for "local" allocations.
5438 * I.e., first node id of first zone in arg node's generic zonelist.
5439 * Used for initializing percpu 'numa_mem', which is used primarily
5440 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
5441 */
5442int local_memory_node(int node)
5443{
5444        struct zoneref *z;
5445
5446        z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
5447                                   gfp_zone(GFP_KERNEL),
5448                                   NULL);
5449        return zone_to_nid(z->zone);
5450}
5451#endif
5452
5453static void setup_min_unmapped_ratio(void);
5454static void setup_min_slab_ratio(void);
5455#else   /* CONFIG_NUMA */
5456
5457static void build_zonelists(pg_data_t *pgdat)
5458{
5459        int node, local_node;
5460        struct zoneref *zonerefs;
5461        int nr_zones;
5462
5463        local_node = pgdat->node_id;
5464
5465        zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5466        nr_zones = build_zonerefs_node(pgdat, zonerefs);
5467        zonerefs += nr_zones;
5468
5469        /*
5470         * Now we build the zonelist so that it contains the zones
5471         * of all the other nodes.
5472         * We don't want to pressure a particular node, so when
5473         * building the zones for node N, we make sure that the
5474         * zones coming right after the local ones are those from
5475         * node N+1 (modulo N)
5476         */
5477        for (node = local_node + 1; node < MAX_NUMNODES; node++) {
5478                if (!node_online(node))
5479                        continue;
5480                nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
5481                zonerefs += nr_zones;
5482        }
5483        for (node = 0; node < local_node; node++) {
5484                if (!node_online(node))
5485                        continue;
5486                nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
5487                zonerefs += nr_zones;
5488        }
5489
5490        zonerefs->zone = NULL;
5491        zonerefs->zone_idx = 0;
5492}
5493
5494#endif  /* CONFIG_NUMA */
5495
5496/*
5497 * Boot pageset table. One per cpu which is going to be used for all
5498 * zones and all nodes. The parameters will be set in such a way
5499 * that an item put on a list will immediately be handed over to
5500 * the buddy list. This is safe since pageset manipulation is done
5501 * with interrupts disabled.
5502 *
5503 * The boot_pagesets must be kept even after bootup is complete for
5504 * unused processors and/or zones. They do play a role for bootstrapping
5505 * hotplugged processors.
5506 *
5507 * zoneinfo_show() and maybe other functions do
5508 * not check if the processor is online before following the pageset pointer.
5509 * Other parts of the kernel may not check if the zone is available.
5510 */
5511static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
5512static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
5513static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
5514
5515static void __build_all_zonelists(void *data)
5516{
5517        int nid;
5518        int __maybe_unused cpu;
5519        pg_data_t *self = data;
5520        static DEFINE_SPINLOCK(lock);
5521
5522        spin_lock(&lock);
5523
5524#ifdef CONFIG_NUMA
5525        memset(node_load, 0, sizeof(node_load));
5526#endif
5527
5528        /*
5529         * This node is hotadded and no memory is yet present.   So just
5530         * building zonelists is fine - no need to touch other nodes.
5531         */
5532        if (self && !node_online(self->node_id)) {
5533                build_zonelists(self);
5534        } else {
5535                for_each_online_node(nid) {
5536                        pg_data_t *pgdat = NODE_DATA(nid);
5537
5538                        build_zonelists(pgdat);
5539                }
5540
5541#ifdef CONFIG_HAVE_MEMORYLESS_NODES
5542                /*
5543                 * We now know the "local memory node" for each node--
5544                 * i.e., the node of the first zone in the generic zonelist.
5545                 * Set up numa_mem percpu variable for on-line cpus.  During
5546                 * boot, only the boot cpu should be on-line;  we'll init the
5547                 * secondary cpus' numa_mem as they come on-line.  During
5548                 * node/memory hotplug, we'll fixup all on-line cpus.
5549                 */
5550                for_each_online_cpu(cpu)
5551                        set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
5552#endif
5553        }
5554
5555        spin_unlock(&lock);
5556}
5557
5558static noinline void __init
5559build_all_zonelists_init(void)
5560{
5561        int cpu;
5562
5563        __build_all_zonelists(NULL);
5564
5565        /*
5566         * Initialize the boot_pagesets that are going to be used
5567         * for bootstrapping processors. The real pagesets for
5568         * each zone will be allocated later when the per cpu
5569         * allocator is available.
5570         *
5571         * boot_pagesets are used also for bootstrapping offline
5572         * cpus if the system is already booted because the pagesets
5573         * are needed to initialize allocators on a specific cpu too.
5574         * F.e. the percpu allocator needs the page allocator which
5575         * needs the percpu allocator in order to allocate its pagesets
5576         * (a chicken-egg dilemma).
5577         */
5578        for_each_possible_cpu(cpu)
5579                setup_pageset(&per_cpu(boot_pageset, cpu), 0);
5580
5581        mminit_verify_zonelist();
5582        cpuset_init_current_mems_allowed();
5583}
5584
5585/*
5586 * unless system_state == SYSTEM_BOOTING.
5587 *
5588 * __ref due to call of __init annotated helper build_all_zonelists_init
5589 * [protected by SYSTEM_BOOTING].
5590 */
5591void __ref build_all_zonelists(pg_data_t *pgdat)
5592{
5593        if (system_state == SYSTEM_BOOTING) {
5594                build_all_zonelists_init();
5595        } else {
5596                __build_all_zonelists(pgdat);
5597                /* cpuset refresh routine should be here */
5598        }
5599        vm_total_pages = nr_free_pagecache_pages();
5600        /*
5601         * Disable grouping by mobility if the number of pages in the
5602         * system is too low to allow the mechanism to work. It would be
5603         * more accurate, but expensive to check per-zone. This check is
5604         * made on memory-hotadd so a system can start with mobility
5605         * disabled and enable it later
5606         */
5607        if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
5608                page_group_by_mobility_disabled = 1;
5609        else
5610                page_group_by_mobility_disabled = 0;
5611
5612        pr_info("Built %i zonelists, mobility grouping %s.  Total pages: %ld\n",
5613                nr_online_nodes,
5614                page_group_by_mobility_disabled ? "off" : "on",
5615                vm_total_pages);
5616#ifdef CONFIG_NUMA
5617        pr_info("Policy zone: %s\n", zone_names[policy_zone]);
5618#endif
5619}
5620
5621/* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
5622static bool __meminit
5623overlap_memmap_init(unsigned long zone, unsigned long *pfn)
5624{
5625#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5626        static struct memblock_region *r;
5627
5628        if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
5629                if (!r || *pfn >= memblock_region_memory_end_pfn(r)) {
5630                        for_each_memblock(memory, r) {
5631                                if (*pfn < memblock_region_memory_end_pfn(r))
5632                                        break;
5633                        }
5634                }
5635                if (*pfn >= memblock_region_memory_base_pfn(r) &&
5636                    memblock_is_mirror(r)) {
5637                        *pfn = memblock_region_memory_end_pfn(r);
5638                        return true;
5639                }
5640        }
5641#endif
5642        return false;
5643}
5644
5645/*
5646 * Initially all pages are reserved - free ones are freed
5647 * up by memblock_free_all() once the early boot process is
5648 * done. Non-atomic initialization, single-pass.
5649 */
5650void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
5651                unsigned long start_pfn, enum memmap_context context,
5652                struct vmem_altmap *altmap)
5653{
5654        unsigned long pfn, end_pfn = start_pfn + size;
5655        struct page *page;
5656
5657        if (highest_memmap_pfn < end_pfn - 1)
5658                highest_memmap_pfn = end_pfn - 1;
5659
5660#ifdef CONFIG_ZONE_DEVICE
5661        /*
5662         * Honor reservation requested by the driver for this ZONE_DEVICE
5663         * memory. We limit the total number of pages to initialize to just
5664         * those that might contain the memory mapping. We will defer the
5665         * ZONE_DEVICE page initialization until after we have released
5666         * the hotplug lock.
5667         */
5668        if (zone == ZONE_DEVICE) {
5669                if (!altmap)
5670                        return;
5671
5672                if (start_pfn == altmap->base_pfn)
5673                        start_pfn += altmap->reserve;
5674                end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
5675        }
5676#endif
5677
5678        for (pfn = start_pfn; pfn < end_pfn; pfn++) {
5679                /*
5680                 * There can be holes in boot-time mem_map[]s handed to this
5681                 * function.  They do not exist on hotplugged memory.
5682                 */
5683                if (context == MEMMAP_EARLY) {
5684                        if (!early_pfn_valid(pfn))
5685                                continue;
5686                        if (!early_pfn_in_nid(pfn, nid))
5687                                continue;
5688                        if (overlap_memmap_init(zone, &pfn))
5689                                continue;
5690                        if (defer_init(nid, pfn, end_pfn))
5691                                break;
5692                }
5693
5694                page = pfn_to_page(pfn);
5695                __init_single_page(page, pfn, zone, nid);
5696                if (context == MEMMAP_HOTPLUG)
5697                        __SetPageReserved(page);
5698
5699                /*
5700                 * Mark the block movable so that blocks are reserved for
5701                 * movable at startup. This will force kernel allocations
5702                 * to reserve their blocks rather than leaking throughout
5703                 * the address space during boot when many long-lived
5704                 * kernel allocations are made.
5705                 *
5706                 * bitmap is created for zone's valid pfn range. but memmap
5707                 * can be created for invalid pages (for alignment)
5708                 * check here not to call set_pageblock_migratetype() against
5709                 * pfn out of zone.
5710                 */
5711                if (!(pfn & (pageblock_nr_pages - 1))) {
5712                        set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5713                        cond_resched();
5714                }
5715        }
5716}
5717
5718#ifdef CONFIG_ZONE_DEVICE
5719void __ref memmap_init_zone_device(struct zone *zone,
5720                                   unsigned long start_pfn,
5721                                   unsigned long size,
5722                                   struct dev_pagemap *pgmap)
5723{
5724        unsigned long pfn, end_pfn = start_pfn + size;
5725        struct pglist_data *pgdat = zone->zone_pgdat;
5726        unsigned long zone_idx = zone_idx(zone);
5727        unsigned long start = jiffies;
5728        int nid = pgdat->node_id;
5729
5730        if (WARN_ON_ONCE(!pgmap || !is_dev_zone(zone)))
5731                return;
5732
5733        /*
5734         * The call to memmap_init_zone should have already taken care
5735         * of the pages reserved for the memmap, so we can just jump to
5736         * the end of that region and start processing the device pages.
5737         */
5738        if (pgmap->altmap_valid) {
5739                struct vmem_altmap *altmap = &pgmap->altmap;
5740
5741                start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
5742                size = end_pfn - start_pfn;
5743        }
5744
5745        for (pfn = start_pfn; pfn < end_pfn; pfn++) {
5746                struct page *page = pfn_to_page(pfn);
5747
5748                __init_single_page(page, pfn, zone_idx, nid);
5749
5750                /*
5751                 * Mark page reserved as it will need to wait for onlining
5752                 * phase for it to be fully associated with a zone.
5753                 *
5754                 * We can use the non-atomic __set_bit operation for setting
5755                 * the flag as we are still initializing the pages.
5756                 */
5757                __SetPageReserved(page);
5758
5759                /*
5760                 * ZONE_DEVICE pages union ->lru with a ->pgmap back
5761                 * pointer and hmm_data.  It is a bug if a ZONE_DEVICE
5762                 * page is ever freed or placed on a driver-private list.
5763                 */
5764                page->pgmap = pgmap;
5765                page->hmm_data = 0;
5766
5767                /*
5768                 * Mark the block movable so that blocks are reserved for
5769                 * movable at startup. This will force kernel allocations
5770                 * to reserve their blocks rather than leaking throughout
5771                 * the address space during boot when many long-lived
5772                 * kernel allocations are made.
5773                 *
5774                 * bitmap is created for zone's valid pfn range. but memmap
5775                 * can be created for invalid pages (for alignment)
5776                 * check here not to call set_pageblock_migratetype() against
5777                 * pfn out of zone.
5778                 *
5779                 * Please note that MEMMAP_HOTPLUG path doesn't clear memmap
5780                 * because this is done early in sparse_add_one_section
5781                 */
5782                if (!(pfn & (pageblock_nr_pages - 1))) {
5783                        set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5784                        cond_resched();
5785                }
5786        }
5787
5788        pr_info("%s initialised, %lu pages in %ums\n", dev_name(pgmap->dev),
5789                size, jiffies_to_msecs(jiffies - start));
5790}
5791
5792#endif
5793static void __meminit zone_init_free_lists(struct zone *zone)
5794{
5795        unsigned int order, t;
5796        for_each_migratetype_order(order, t) {
5797                INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
5798                zone->free_area[order].nr_free = 0;
5799        }
5800}
5801
5802void __meminit __weak memmap_init(unsigned long size, int nid,
5803                                  unsigned long zone, unsigned long start_pfn)
5804{
5805        memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY, NULL);
5806}
5807
5808static int zone_batchsize(struct zone *zone)
5809{
5810#ifdef CONFIG_MMU
5811        int batch;
5812
5813        /*
5814         * The per-cpu-pages pools are set to around 1000th of the
5815         * size of the zone.
5816         */
5817        batch = zone_managed_pages(zone) / 1024;
5818        /* But no more than a meg. */
5819        if (batch * PAGE_SIZE > 1024 * 1024)
5820                batch = (1024 * 1024) / PAGE_SIZE;
5821        batch /= 4;             /* We effectively *= 4 below */
5822        if (batch < 1)
5823                batch = 1;
5824
5825        /*
5826         * Clamp the batch to a 2^n - 1 value. Having a power
5827         * of 2 value was found to be more likely to have
5828         * suboptimal cache aliasing properties in some cases.
5829         *
5830         * For example if 2 tasks are alternately allocating
5831         * batches of pages, one task can end up with a lot
5832         * of pages of one half of the possible page colors
5833         * and the other with pages of the other colors.
5834         */
5835        batch = rounddown_pow_of_two(batch + batch/2) - 1;
5836
5837        return batch;
5838
5839#else
5840        /* The deferral and batching of frees should be suppressed under NOMMU
5841         * conditions.
5842         *
5843         * The problem is that NOMMU needs to be able to allocate large chunks
5844         * of contiguous memory as there's no hardware page translation to
5845         * assemble apparent contiguous memory from discontiguous pages.
5846         *
5847         * Queueing large contiguous runs of pages for batching, however,
5848         * causes the pages to actually be freed in smaller chunks.  As there
5849         * can be a significant delay between the individual batches being
5850         * recycled, this leads to the once large chunks of space being
5851         * fragmented and becoming unavailable for high-order allocations.
5852         */
5853        return 0;
5854#endif
5855}
5856
5857/*
5858 * pcp->high and pcp->batch values are related and dependent on one another:
5859 * ->batch must never be higher then ->high.
5860 * The following function updates them in a safe manner without read side
5861 * locking.
5862 *
5863 * Any new users of pcp->batch and pcp->high should ensure they can cope with
5864 * those fields changing asynchronously (acording the the above rule).
5865 *
5866 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
5867 * outside of boot time (or some other assurance that no concurrent updaters
5868 * exist).
5869 */
5870static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
5871                unsigned long batch)
5872{
5873       /* start with a fail safe value for batch */
5874        pcp->batch = 1;
5875        smp_wmb();
5876
5877       /* Update high, then batch, in order */
5878        pcp->high = high;
5879        smp_wmb();
5880
5881        pcp->batch = batch;
5882}
5883
5884/* a companion to pageset_set_high() */
5885static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
5886{
5887        pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
5888}
5889
5890static void pageset_init(struct per_cpu_pageset *p)
5891{
5892        struct per_cpu_pages *pcp;
5893        int migratetype;
5894
5895        memset(p, 0, sizeof(*p));
5896
5897        pcp = &p->pcp;
5898        for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
5899                INIT_LIST_HEAD(&pcp->lists[migratetype]);
5900}
5901
5902static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
5903{
5904        pageset_init(p);
5905        pageset_set_batch(p, batch);
5906}
5907
5908/*
5909 * pageset_set_high() sets the high water mark for hot per_cpu_pagelist
5910 * to the value high for the pageset p.
5911 */
5912static void pageset_set_high(struct per_cpu_pageset *p,
5913                                unsigned long high)
5914{
5915        unsigned long batch = max(1UL, high / 4);
5916        if ((high / 4) > (PAGE_SHIFT * 8))
5917                batch = PAGE_SHIFT * 8;
5918
5919        pageset_update(&p->pcp, high, batch);
5920}
5921
5922static void pageset_set_high_and_batch(struct zone *zone,
5923                                       struct per_cpu_pageset *pcp)
5924{
5925        if (percpu_pagelist_fraction)
5926                pageset_set_high(pcp,
5927                        (zone_managed_pages(zone) /
5928                                percpu_pagelist_fraction));
5929        else
5930                pageset_set_batch(pcp, zone_batchsize(zone));
5931}
5932
5933static void __meminit zone_pageset_init(struct zone *zone, int cpu)
5934{
5935        struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
5936
5937        pageset_init(pcp);
5938        pageset_set_high_and_batch(zone, pcp);
5939}
5940
5941void __meminit setup_zone_pageset(struct zone *zone)
5942{
5943        int cpu;
5944        zone->pageset = alloc_percpu(struct per_cpu_pageset);
5945        for_each_possible_cpu(cpu)
5946                zone_pageset_init(zone, cpu);
5947}
5948
5949/*
5950 * Allocate per cpu pagesets and initialize them.
5951 * Before this call only boot pagesets were available.
5952 */
5953void __init setup_per_cpu_pageset(void)
5954{
5955        struct pglist_data *pgdat;
5956        struct zone *zone;
5957
5958        for_each_populated_zone(zone)
5959                setup_zone_pageset(zone);
5960
5961        for_each_online_pgdat(pgdat)
5962                pgdat->per_cpu_nodestats =
5963                        alloc_percpu(struct per_cpu_nodestat);
5964}
5965
5966static __meminit void zone_pcp_init(struct zone *zone)
5967{
5968        /*
5969         * per cpu subsystem is not up at this point. The following code
5970         * relies on the ability of the linker to provide the
5971         * offset of a (static) per cpu variable into the per cpu area.
5972         */
5973        zone->pageset = &boot_pageset;
5974
5975        if (populated_zone(zone))
5976                printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%u\n",
5977                        zone->name, zone->present_pages,
5978                                         zone_batchsize(zone));
5979}
5980
5981void __meminit init_currently_empty_zone(struct zone *zone,
5982                                        unsigned long zone_start_pfn,
5983                                        unsigned long size)
5984{
5985        struct pglist_data *pgdat = zone->zone_pgdat;
5986        int zone_idx = zone_idx(zone) + 1;
5987
5988        if (zone_idx > pgdat->nr_zones)
5989                pgdat->nr_zones = zone_idx;
5990
5991        zone->zone_start_pfn = zone_start_pfn;
5992
5993        mminit_dprintk(MMINIT_TRACE, "memmap_init",
5994                        "Initialising map node %d zone %lu pfns %lu -> %lu\n",
5995                        pgdat->node_id,
5996                        (unsigned long)zone_idx(zone),
5997                        zone_start_pfn, (zone_start_pfn + size));
5998
5999        zone_init_free_lists(zone);
6000        zone->initialized = 1;
6001}
6002
6003#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
6004#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
6005
6006/*
6007 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
6008 */
6009int __meminit __early_pfn_to_nid(unsigned long pfn,
6010                                        struct mminit_pfnnid_cache *state)
6011{
6012        unsigned long start_pfn, end_pfn;
6013        int nid;
6014
6015        if (state->last_start <= pfn && pfn < state->last_end)
6016                return state->last_nid;
6017
6018        nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
6019        if (nid != -1) {
6020                state->last_start = start_pfn;
6021                state->last_end = end_pfn;
6022                state->last_nid = nid;
6023        }
6024
6025        return nid;
6026}
6027#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
6028
6029/**
6030 * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
6031 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
6032 * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid
6033 *
6034 * If an architecture guarantees that all ranges registered contain no holes
6035 * and may be freed, this this function may be used instead of calling
6036 * memblock_free_early_nid() manually.
6037 */
6038void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
6039{
6040        unsigned long start_pfn, end_pfn;
6041        int i, this_nid;
6042
6043        for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
6044                start_pfn = min(start_pfn, max_low_pfn);
6045                end_pfn = min(end_pfn, max_low_pfn);
6046
6047                if (start_pfn < end_pfn)
6048                        memblock_free_early_nid(PFN_PHYS(start_pfn),
6049                                        (end_pfn - start_pfn) << PAGE_SHIFT,
6050                                        this_nid);
6051        }
6052}
6053
6054/**
6055 * sparse_memory_present_with_active_regions - Call memory_present for each active range
6056 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
6057 *
6058 * If an architecture guarantees that all ranges registered contain no holes and may
6059 * be freed, this function may be used instead of calling memory_present() manually.
6060 */
6061void __init sparse_memory_present_with_active_regions(int nid)
6062{
6063        unsigned long start_pfn, end_pfn;
6064        int i, this_nid;
6065
6066        for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
6067                memory_present(this_nid, start_pfn, end_pfn);
6068}
6069
6070/**
6071 * get_pfn_range_for_nid - Return the start and end page frames for a node
6072 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
6073 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
6074 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
6075 *
6076 * It returns the start and end page frame of a node based on information
6077 * provided by memblock_set_node(). If called for a node
6078 * with no available memory, a warning is printed and the start and end
6079 * PFNs will be 0.
6080 */
6081void __init get_pfn_range_for_nid(unsigned int nid,
6082                        unsigned long *start_pfn, unsigned long *end_pfn)
6083{
6084        unsigned long this_start_pfn, this_end_pfn;
6085        int i;
6086
6087        *start_pfn = -1UL;
6088        *end_pfn = 0;
6089
6090        for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
6091                *start_pfn = min(*start_pfn, this_start_pfn);
6092                *end_pfn = max(*end_pfn, this_end_pfn);
6093        }
6094
6095        if (*start_pfn == -1UL)
6096                *start_pfn = 0;
6097}
6098
6099/*
6100 * This finds a zone that can be used for ZONE_MOVABLE pages. The
6101 * assumption is made that zones within a node are ordered in monotonic
6102 * increasing memory addresses so that the "highest" populated zone is used
6103 */
6104static void __init find_usable_zone_for_movable(void)
6105{
6106        int zone_index;
6107        for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
6108                if (zone_index == ZONE_MOVABLE)
6109                        continue;
6110
6111                if (arch_zone_highest_possible_pfn[zone_index] >
6112                                arch_zone_lowest_possible_pfn[zone_index])
6113                        break;
6114        }
6115
6116        VM_BUG_ON(zone_index == -1);
6117        movable_zone = zone_index;
6118}
6119
6120/*
6121 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
6122 * because it is sized independent of architecture. Unlike the other zones,
6123 * the starting point for ZONE_MOVABLE is not fixed. It may be different
6124 * in each node depending on the size of each node and how evenly kernelcore
6125 * is distributed. This helper function adjusts the zone ranges
6126 * provided by the architecture for a given node by using the end of the
6127 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
6128 * zones within a node are in order of monotonic increases memory addresses
6129 */
6130static void __init adjust_zone_range_for_zone_movable(int nid,
6131                                        unsigned long zone_type,
6132                                        unsigned long node_start_pfn,
6133                                        unsigned long node_end_pfn,
6134                                        unsigned long *zone_start_pfn,
6135                                        unsigned long *zone_end_pfn)
6136{
6137        /* Only adjust if ZONE_MOVABLE is on this node */
6138        if (zone_movable_pfn[nid]) {
6139                /* Size ZONE_MOVABLE */
6140                if (zone_type == ZONE_MOVABLE) {
6141                        *zone_start_pfn = zone_movable_pfn[nid];
6142                        *zone_end_pfn = min(node_end_pfn,
6143                                arch_zone_highest_possible_pfn[movable_zone]);
6144
6145                /* Adjust for ZONE_MOVABLE starting within this range */
6146                } else if (!mirrored_kernelcore &&
6147                        *zone_start_pfn < zone_movable_pfn[nid] &&
6148                        *zone_end_pfn > zone_movable_pfn[nid]) {
6149                        *zone_end_pfn = zone_movable_pfn[nid];
6150
6151                /* Check if this whole range is within ZONE_MOVABLE */
6152                } else if (*zone_start_pfn >= zone_movable_pfn[nid])
6153                        *zone_start_pfn = *zone_end_pfn;
6154        }
6155}
6156
6157/*
6158 * Return the number of pages a zone spans in a node, including holes
6159 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
6160 */
6161static unsigned long __init zone_spanned_pages_in_node(int nid,
6162                                        unsigned long zone_type,
6163                                        unsigned long node_start_pfn,
6164                                        unsigned long node_end_pfn,
6165                                        unsigned long *zone_start_pfn,
6166                                        unsigned long *zone_end_pfn,
6167                                        unsigned long *ignored)
6168{
6169        /* When hotadd a new node from cpu_up(), the node should be empty */
6170        if (!node_start_pfn && !node_end_pfn)
6171                return 0;
6172
6173        /* Get the start and end of the zone */
6174        *zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
6175        *zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
6176        adjust_zone_range_for_zone_movable(nid, zone_type,
6177                                node_start_pfn, node_end_pfn,
6178                                zone_start_pfn, zone_end_pfn);
6179
6180        /* Check that this node has pages within the zone's required range */
6181        if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
6182                return 0;
6183
6184        /* Move the zone boundaries inside the node if necessary */
6185        *zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
6186        *zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
6187
6188        /* Return the spanned pages */
6189        return *zone_end_pfn - *zone_start_pfn;
6190}
6191
6192/*
6193 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
6194 * then all holes in the requested range will be accounted for.
6195 */
6196unsigned long __init __absent_pages_in_range(int nid,
6197                                unsigned long range_start_pfn,
6198                                unsigned long range_end_pfn)
6199{
6200        unsigned long nr_absent = range_end_pfn - range_start_pfn;
6201        unsigned long start_pfn, end_pfn;
6202        int i;
6203
6204        for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
6205                start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
6206                end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
6207                nr_absent -= end_pfn - start_pfn;
6208        }
6209        return nr_absent;
6210}
6211
6212/**
6213 * absent_pages_in_range - Return number of page frames in holes within a range
6214 * @start_pfn: The start PFN to start searching for holes
6215 * @end_pfn: The end PFN to stop searching for holes
6216 *
6217 * It returns the number of pages frames in memory holes within a range.
6218 */
6219unsigned long __init absent_pages_in_range(unsigned long start_pfn,
6220                                                        unsigned long end_pfn)
6221{
6222        return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
6223}
6224
6225/* Return the number of page frames in holes in a zone on a node */
6226static unsigned long __init zone_absent_pages_in_node(int nid,
6227                                        unsigned long zone_type,
6228                                        unsigned long node_start_pfn,
6229                                        unsigned long node_end_pfn,
6230                                        unsigned long *ignored)
6231{
6232        unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
6233        unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
6234        unsigned long zone_start_pfn, zone_end_pfn;
6235        unsigned long nr_absent;
6236
6237        /* When hotadd a new node from cpu_up(), the node should be empty */
6238        if (!node_start_pfn && !node_end_pfn)
6239                return 0;
6240
6241        zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
6242        zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
6243
6244        adjust_zone_range_for_zone_movable(nid, zone_type,
6245                        node_start_pfn, node_end_pfn,
6246                        &zone_start_pfn, &zone_end_pfn);
6247        nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
6248
6249        /*
6250         * ZONE_MOVABLE handling.
6251         * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
6252         * and vice versa.
6253         */
6254        if (mirrored_kernelcore && zone_movable_pfn[nid]) {
6255                unsigned long start_pfn, end_pfn;
6256                struct memblock_region *r;
6257
6258                for_each_memblock(memory, r) {
6259                        start_pfn = clamp(memblock_region_memory_base_pfn(r),
6260                                          zone_start_pfn, zone_end_pfn);
6261                        end_pfn = clamp(memblock_region_memory_end_pfn(r),
6262                                        zone_start_pfn, zone_end_pfn);
6263
6264                        if (zone_type == ZONE_MOVABLE &&
6265                            memblock_is_mirror(r))
6266                                nr_absent += end_pfn - start_pfn;
6267
6268                        if (zone_type == ZONE_NORMAL &&
6269                            !memblock_is_mirror(r))
6270                                nr_absent += end_pfn - start_pfn;
6271                }
6272        }
6273
6274        return nr_absent;
6275}
6276
6277#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
6278static inline unsigned long __init zone_spanned_pages_in_node(int nid,
6279                                        unsigned long zone_type,
6280                                        unsigned long node_start_pfn,
6281                                        unsigned long node_end_pfn,
6282                                        unsigned long *zone_start_pfn,
6283                                        unsigned long *zone_end_pfn,
6284                                        unsigned long *zones_size)
6285{
6286        unsigned int zone;
6287
6288        *zone_start_pfn = node_start_pfn;
6289        for (zone = 0; zone < zone_type; zone++)
6290                *zone_start_pfn += zones_size[zone];
6291
6292        *zone_end_pfn = *zone_start_pfn + zones_size[zone_type];
6293
6294        return zones_size[zone_type];
6295}
6296
6297static inline unsigned long __init zone_absent_pages_in_node(int nid,
6298                                                unsigned long zone_type,
6299                                                unsigned long node_start_pfn,
6300                                                unsigned long node_end_pfn,
6301                                                unsigned long *zholes_size)
6302{
6303        if (!zholes_size)
6304                return 0;
6305
6306        return zholes_size[zone_type];
6307}
6308
6309#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
6310
6311static void __init calculate_node_totalpages(struct pglist_data *pgdat,
6312                                                unsigned long node_start_pfn,
6313                                                unsigned long node_end_pfn,
6314                                                unsigned long *zones_size,
6315                                                unsigned long *zholes_size)
6316{
6317        unsigned long realtotalpages = 0, totalpages = 0;
6318        enum zone_type i;
6319
6320        for (i = 0; i < MAX_NR_ZONES; i++) {
6321                struct zone *zone = pgdat->node_zones + i;
6322                unsigned long zone_start_pfn, zone_end_pfn;
6323                unsigned long size, real_size;
6324
6325                size = zone_spanned_pages_in_node(pgdat->node_id, i,
6326                                                  node_start_pfn,
6327                                                  node_end_pfn,
6328                                                  &zone_start_pfn,
6329                                                  &zone_end_pfn,
6330                                                  zones_size);
6331                real_size = size - zone_absent_pages_in_node(pgdat->node_id, i,
6332                                                  node_start_pfn, node_end_pfn,
6333                                                  zholes_size);
6334                if (size)
6335                        zone->zone_start_pfn = zone_start_pfn;
6336                else
6337                        zone->zone_start_pfn = 0;
6338                zone->spanned_pages = size;
6339                zone->present_pages = real_size;
6340
6341                totalpages += size;
6342                realtotalpages += real_size;
6343        }
6344
6345        pgdat->node_spanned_pages = totalpages;
6346        pgdat->node_present_pages = realtotalpages;
6347        printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
6348                                                        realtotalpages);
6349}
6350
6351#ifndef CONFIG_SPARSEMEM
6352/*
6353 * Calculate the size of the zone->blockflags rounded to an unsigned long
6354 * Start by making sure zonesize is a multiple of pageblock_order by rounding
6355 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
6356 * round what is now in bits to nearest long in bits, then return it in
6357 * bytes.
6358 */
6359static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
6360{
6361        unsigned long usemapsize;
6362
6363        zonesize += zone_start_pfn & (pageblock_nr_pages-1);
6364        usemapsize = roundup(zonesize, pageblock_nr_pages);
6365        usemapsize = usemapsize >> pageblock_order;
6366        usemapsize *= NR_PAGEBLOCK_BITS;
6367        usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
6368
6369        return usemapsize / 8;
6370}
6371
6372static void __ref setup_usemap(struct pglist_data *pgdat,
6373                                struct zone *zone,
6374                                unsigned long zone_start_pfn,
6375                                unsigned long zonesize)
6376{
6377        unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
6378        zone->pageblock_flags = NULL;
6379        if (usemapsize)
6380                zone->pageblock_flags =
6381                        memblock_alloc_node_nopanic(usemapsize,
6382                                                         pgdat->node_id);
6383}
6384#else
6385static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
6386                                unsigned long zone_start_pfn, unsigned long zonesize) {}
6387#endif /* CONFIG_SPARSEMEM */
6388
6389#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
6390
6391/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
6392void __init set_pageblock_order(void)
6393{
6394        unsigned int order;
6395
6396        /* Check that pageblock_nr_pages has not already been setup */
6397        if (pageblock_order)
6398                return;
6399
6400        if (HPAGE_SHIFT > PAGE_SHIFT)
6401                order = HUGETLB_PAGE_ORDER;
6402        else
6403                order = MAX_ORDER - 1;
6404
6405        /*
6406         * Assume the largest contiguous order of interest is a huge page.
6407         * This value may be variable depending on boot parameters on IA64 and
6408         * powerpc.
6409         */
6410        pageblock_order = order;
6411}
6412#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
6413
6414/*
6415 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
6416 * is unused as pageblock_order is set at compile-time. See
6417 * include/linux/pageblock-flags.h for the values of pageblock_order based on
6418 * the kernel config
6419 */
6420void __init set_pageblock_order(void)
6421{
6422}
6423
6424#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
6425
6426static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
6427                                                unsigned long present_pages)
6428{
6429        unsigned long pages = spanned_pages;
6430
6431        /*
6432         * Provide a more accurate estimation if there are holes within
6433         * the zone and SPARSEMEM is in use. If there are holes within the
6434         * zone, each populated memory region may cost us one or two extra
6435         * memmap pages due to alignment because memmap pages for each
6436         * populated regions may not be naturally aligned on page boundary.
6437         * So the (present_pages >> 4) heuristic is a tradeoff for that.
6438         */
6439        if (spanned_pages > present_pages + (present_pages >> 4) &&
6440            IS_ENABLED(CONFIG_SPARSEMEM))
6441                pages = present_pages;
6442
6443        return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
6444}
6445
6446#ifdef CONFIG_TRANSPARENT_HUGEPAGE
6447static void pgdat_init_split_queue(struct pglist_data *pgdat)
6448{
6449        spin_lock_init(&pgdat->split_queue_lock);
6450        INIT_LIST_HEAD(&pgdat->split_queue);
6451        pgdat->split_queue_len = 0;
6452}
6453#else
6454static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
6455#endif
6456
6457#ifdef CONFIG_COMPACTION
6458static void pgdat_init_kcompactd(struct pglist_data *pgdat)
6459{
6460        init_waitqueue_head(&pgdat->kcompactd_wait);
6461}
6462#else
6463static void pgdat_init_kcompactd(struct pglist_data *pgdat) {}
6464#endif
6465
6466static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
6467{
6468        pgdat_resize_init(pgdat);
6469
6470        pgdat_init_split_queue(pgdat);
6471        pgdat_init_kcompactd(pgdat);
6472
6473        init_waitqueue_head(&pgdat->kswapd_wait);
6474        init_waitqueue_head(&pgdat->pfmemalloc_wait);
6475
6476        pgdat_page_ext_init(pgdat);
6477        spin_lock_init(&pgdat->lru_lock);
6478        lruvec_init(node_lruvec(pgdat));
6479}
6480
6481static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
6482                                                        unsigned long remaining_pages)
6483{
6484        atomic_long_set(&zone->managed_pages, remaining_pages);
6485        zone_set_nid(zone, nid);
6486        zone->name = zone_names[idx];
6487        zone->zone_pgdat = NODE_DATA(nid);
6488        spin_lock_init(&zone->lock);
6489        zone_seqlock_init(zone);
6490        zone_pcp_init(zone);
6491}
6492
6493/*
6494 * Set up the zone data structures
6495 * - init pgdat internals
6496 * - init all zones belonging to this node
6497 *
6498 * NOTE: this function is only called during memory hotplug
6499 */
6500#ifdef CONFIG_MEMORY_HOTPLUG
6501void __ref free_area_init_core_hotplug(int nid)
6502{
6503        enum zone_type z;
6504        pg_data_t *pgdat = NODE_DATA(nid);
6505
6506        pgdat_init_internals(pgdat);
6507        for (z = 0; z < MAX_NR_ZONES; z++)
6508                zone_init_internals(&pgdat->node_zones[z], z, nid, 0);
6509}
6510#endif
6511
6512/*
6513 * Set up the zone data structures:
6514 *   - mark all pages reserved
6515 *   - mark all memory queues empty
6516 *   - clear the memory bitmaps
6517 *
6518 * NOTE: pgdat should get zeroed by caller.
6519 * NOTE: this function is only called during early init.
6520 */
6521static void __init free_area_init_core(struct pglist_data *pgdat)
6522{
6523        enum zone_type j;
6524        int nid = pgdat->node_id;
6525
6526        pgdat_init_internals(pgdat);
6527        pgdat->per_cpu_nodestats = &boot_nodestats;
6528
6529        for (j = 0; j < MAX_NR_ZONES; j++) {
6530                struct zone *zone = pgdat->node_zones + j;
6531                unsigned long size, freesize, memmap_pages;
6532                unsigned long zone_start_pfn = zone->zone_start_pfn;
6533
6534                size = zone->spanned_pages;
6535                freesize = zone->present_pages;
6536
6537                /*
6538                 * Adjust freesize so that it accounts for how much memory
6539                 * is used by this zone for memmap. This affects the watermark
6540                 * and per-cpu initialisations
6541                 */
6542                memmap_pages = calc_memmap_size(size, freesize);
6543                if (!is_highmem_idx(j)) {
6544                        if (freesize >= memmap_pages) {
6545                                freesize -= memmap_pages;
6546                                if (memmap_pages)
6547                                        printk(KERN_DEBUG
6548                                               "  %s zone: %lu pages used for memmap\n",
6549                                               zone_names[j], memmap_pages);
6550                        } else
6551                                pr_warn("  %s zone: %lu pages exceeds freesize %lu\n",
6552                                        zone_names[j], memmap_pages, freesize);
6553                }
6554
6555                /* Account for reserved pages */
6556                if (j == 0 && freesize > dma_reserve) {
6557                        freesize -= dma_reserve;
6558                        printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
6559                                        zone_names[0], dma_reserve);
6560                }
6561
6562                if (!is_highmem_idx(j))
6563                        nr_kernel_pages += freesize;
6564                /* Charge for highmem memmap if there are enough kernel pages */
6565                else if (nr_kernel_pages > memmap_pages * 2)
6566                        nr_kernel_pages -= memmap_pages;
6567                nr_all_pages += freesize;
6568
6569                /*
6570                 * Set an approximate value for lowmem here, it will be adjusted
6571                 * when the bootmem allocator frees pages into the buddy system.
6572                 * And all highmem pages will be managed by the buddy system.
6573                 */
6574                zone_init_internals(zone, j, nid, freesize);
6575
6576                if (!size)
6577                        continue;
6578
6579                set_pageblock_order();
6580                setup_usemap(pgdat, zone, zone_start_pfn, size);
6581                init_currently_empty_zone(zone, zone_start_pfn, size);
6582                memmap_init(size, nid, j, zone_start_pfn);
6583        }
6584}
6585
6586#ifdef CONFIG_FLAT_NODE_MEM_MAP
6587static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
6588{
6589        unsigned long __maybe_unused start = 0;
6590        unsigned long __maybe_unused offset = 0;
6591
6592        /* Skip empty nodes */
6593        if (!pgdat->node_spanned_pages)
6594                return;
6595
6596        start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
6597        offset = pgdat->node_start_pfn - start;
6598        /* ia64 gets its own node_mem_map, before this, without bootmem */
6599        if (!pgdat->node_mem_map) {
6600                unsigned long size, end;
6601                struct page *map;
6602
6603                /*
6604                 * The zone's endpoints aren't required to be MAX_ORDER
6605                 * aligned but the node_mem_map endpoints must be in order
6606                 * for the buddy allocator to function correctly.
6607                 */
6608                end = pgdat_end_pfn(pgdat);
6609                end = ALIGN(end, MAX_ORDER_NR_PAGES);
6610                size =  (end - start) * sizeof(struct page);
6611                map = memblock_alloc_node_nopanic(size, pgdat->node_id);
6612                pgdat->node_mem_map = map + offset;
6613        }
6614        pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
6615                                __func__, pgdat->node_id, (unsigned long)pgdat,
6616                                (unsigned long)pgdat->node_mem_map);
6617#ifndef CONFIG_NEED_MULTIPLE_NODES
6618        /*
6619         * With no DISCONTIG, the global mem_map is just set as node 0's
6620         */
6621        if (pgdat == NODE_DATA(0)) {
6622                mem_map = NODE_DATA(0)->node_mem_map;
6623#if defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) || defined(CONFIG_FLATMEM)
6624                if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
6625                        mem_map -= offset;
6626#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
6627        }
6628#endif
6629}
6630#else
6631static void __ref alloc_node_mem_map(struct pglist_data *pgdat) { }
6632#endif /* CONFIG_FLAT_NODE_MEM_MAP */
6633
6634#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
6635static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
6636{
6637        pgdat->first_deferred_pfn = ULONG_MAX;
6638}
6639#else
6640static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
6641#endif
6642
6643void __init free_area_init_node(int nid, unsigned long *zones_size,
6644                                   unsigned long node_start_pfn,
6645                                   unsigned long *zholes_size)
6646{
6647        pg_data_t *pgdat = NODE_DATA(nid);
6648        unsigned long start_pfn = 0;
6649        unsigned long end_pfn = 0;
6650
6651        /* pg_data_t should be reset to zero when it's allocated */
6652        WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx);
6653
6654        pgdat->node_id = nid;
6655        pgdat->node_start_pfn = node_start_pfn;
6656        pgdat->per_cpu_nodestats = NULL;
6657#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
6658        get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
6659        pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
6660                (u64)start_pfn << PAGE_SHIFT,
6661                end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
6662#else
6663        start_pfn = node_start_pfn;
6664#endif
6665        calculate_node_totalpages(pgdat, start_pfn, end_pfn,
6666                                  zones_size, zholes_size);
6667
6668        alloc_node_mem_map(pgdat);
6669        pgdat_set_deferred_range(pgdat);
6670
6671        free_area_init_core(pgdat);
6672}
6673
6674#if !defined(CONFIG_FLAT_NODE_MEM_MAP)
6675/*
6676 * Zero all valid struct pages in range [spfn, epfn), return number of struct
6677 * pages zeroed
6678 */
6679static u64 zero_pfn_range(unsigned long spfn, unsigned long epfn)
6680{
6681        unsigned long pfn;
6682        u64 pgcnt = 0;
6683
6684        for (pfn = spfn; pfn < epfn; pfn++) {
6685                if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) {
6686                        pfn = ALIGN_DOWN(pfn, pageblock_nr_pages)
6687                                + pageblock_nr_pages - 1;
6688                        continue;
6689                }
6690                mm_zero_struct_page(pfn_to_page(pfn));
6691                pgcnt++;
6692        }
6693
6694        return pgcnt;
6695}
6696
6697/*
6698 * Only struct pages that are backed by physical memory are zeroed and
6699 * initialized by going through __init_single_page(). But, there are some
6700 * struct pages which are reserved in memblock allocator and their fields
6701 * may be accessed (for example page_to_pfn() on some configuration accesses
6702 * flags). We must explicitly zero those struct pages.
6703 *
6704 * This function also addresses a similar issue where struct pages are left
6705 * uninitialized because the physical address range is not covered by
6706 * memblock.memory or memblock.reserved. That could happen when memblock
6707 * layout is manually configured via memmap=.
6708 */
6709void __init zero_resv_unavail(void)
6710{
6711        phys_addr_t start, end;
6712        u64 i, pgcnt;
6713        phys_addr_t next = 0;
6714
6715        /*
6716         * Loop through unavailable ranges not covered by memblock.memory.
6717         */
6718        pgcnt = 0;
6719        for_each_mem_range(i, &memblock.memory, NULL,
6720                        NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, NULL) {
6721                if (next < start)
6722                        pgcnt += zero_pfn_range(PFN_DOWN(next), PFN_UP(start));
6723                next = end;
6724        }
6725        pgcnt += zero_pfn_range(PFN_DOWN(next), max_pfn);
6726
6727        /*
6728         * Struct pages that do not have backing memory. This could be because
6729         * firmware is using some of this memory, or for some other reasons.
6730         */
6731        if (pgcnt)
6732                pr_info("Zeroed struct page in unavailable ranges: %lld pages", pgcnt);
6733}
6734#endif /* !CONFIG_FLAT_NODE_MEM_MAP */
6735
6736#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
6737
6738#if MAX_NUMNODES > 1
6739/*
6740 * Figure out the number of possible node ids.
6741 */
6742void __init setup_nr_node_ids(void)
6743{
6744        unsigned int highest;
6745
6746        highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
6747        nr_node_ids = highest + 1;
6748}
6749#endif
6750
6751/**
6752 * node_map_pfn_alignment - determine the maximum internode alignment
6753 *
6754 * This function should be called after node map is populated and sorted.
6755 * It calculates the maximum power of two alignment which can distinguish
6756 * all the nodes.
6757 *
6758 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
6759 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)).  If the
6760 * nodes are shifted by 256MiB, 256MiB.  Note that if only the last node is
6761 * shifted, 1GiB is enough and this function will indicate so.
6762 *
6763 * This is used to test whether pfn -> nid mapping of the chosen memory
6764 * model has fine enough granularity to avoid incorrect mapping for the
6765 * populated node map.
6766 *
6767 * Returns the determined alignment in pfn's.  0 if there is no alignment
6768 * requirement (single node).
6769 */
6770unsigned long __init node_map_pfn_alignment(void)
6771{
6772        unsigned long accl_mask = 0, last_end = 0;
6773        unsigned long start, end, mask;
6774        int last_nid = -1;
6775        int i, nid;
6776
6777        for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
6778                if (!start || last_nid < 0 || last_nid == nid) {
6779                        last_nid = nid;
6780                        last_end = end;
6781                        continue;
6782                }
6783
6784                /*
6785                 * Start with a mask granular enough to pin-point to the
6786                 * start pfn and tick off bits one-by-one until it becomes
6787                 * too coarse to separate the current node from the last.
6788                 */
6789                mask = ~((1 << __ffs(start)) - 1);
6790                while (mask && last_end <= (start & (mask << 1)))
6791                        mask <<= 1;
6792
6793                /* accumulate all internode masks */
6794                accl_mask |= mask;
6795        }
6796
6797        /* convert mask to number of pages */
6798        return ~accl_mask + 1;
6799}
6800
6801/* Find the lowest pfn for a node */
6802static unsigned long __init find_min_pfn_for_node(int nid)
6803{
6804        unsigned long min_pfn = ULONG_MAX;
6805        unsigned long start_pfn;
6806        int i;
6807
6808        for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
6809                min_pfn = min(min_pfn, start_pfn);
6810
6811        if (min_pfn == ULONG_MAX) {
6812                pr_warn("Could not find start_pfn for node %d\n", nid);
6813                return 0;
6814        }
6815
6816        return min_pfn;
6817}
6818
6819/**
6820 * find_min_pfn_with_active_regions - Find the minimum PFN registered
6821 *
6822 * It returns the minimum PFN based on information provided via
6823 * memblock_set_node().
6824 */
6825unsigned long __init find_min_pfn_with_active_regions(void)
6826{
6827        return find_min_pfn_for_node(MAX_NUMNODES);
6828}
6829
6830/*
6831 * early_calculate_totalpages()
6832 * Sum pages in active regions for movable zone.
6833 * Populate N_MEMORY for calculating usable_nodes.
6834 */
6835static unsigned long __init early_calculate_totalpages(void)
6836{
6837        unsigned long totalpages = 0;
6838        unsigned long start_pfn, end_pfn;
6839        int i, nid;
6840
6841        for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
6842                unsigned long pages = end_pfn - start_pfn;
6843
6844                totalpages += pages;
6845                if (pages)
6846                        node_set_state(nid, N_MEMORY);
6847        }
6848        return totalpages;
6849}
6850
6851/*
6852 * Find the PFN the Movable zone begins in each node. Kernel memory
6853 * is spread evenly between nodes as long as the nodes have enough
6854 * memory. When they don't, some nodes will have more kernelcore than
6855 * others
6856 */
6857static void __init find_zone_movable_pfns_for_nodes(void)
6858{
6859        int i, nid;
6860        unsigned long usable_startpfn;
6861        unsigned long kernelcore_node, kernelcore_remaining;
6862        /* save the state before borrow the nodemask */
6863        nodemask_t saved_node_state = node_states[N_MEMORY];
6864        unsigned long totalpages = early_calculate_totalpages();
6865        int usable_nodes = nodes_weight(node_states[N_MEMORY]);
6866        struct memblock_region *r;
6867
6868        /* Need to find movable_zone earlier when movable_node is specified. */
6869        find_usable_zone_for_movable();
6870
6871        /*
6872         * If movable_node is specified, ignore kernelcore and movablecore
6873         * options.
6874         */
6875        if (movable_node_is_enabled()) {
6876                for_each_memblock(memory, r) {
6877                        if (!memblock_is_hotpluggable(r))
6878                                continue;
6879
6880                        nid = r->nid;
6881
6882                        usable_startpfn = PFN_DOWN(r->base);
6883                        zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
6884                                min(usable_startpfn, zone_movable_pfn[nid]) :
6885                                usable_startpfn;
6886                }
6887
6888                goto out2;
6889        }
6890
6891        /*
6892         * If kernelcore=mirror is specified, ignore movablecore option
6893         */
6894        if (mirrored_kernelcore) {
6895                bool mem_below_4gb_not_mirrored = false;
6896
6897                for_each_memblock(memory, r) {
6898                        if (memblock_is_mirror(r))
6899                                continue;
6900
6901                        nid = r->nid;
6902
6903                        usable_startpfn = memblock_region_memory_base_pfn(r);
6904
6905                        if (usable_startpfn < 0x100000) {
6906                                mem_below_4gb_not_mirrored = true;
6907                                continue;
6908                        }
6909
6910                        zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
6911                                min(usable_startpfn, zone_movable_pfn[nid]) :
6912                                usable_startpfn;
6913                }
6914
6915                if (mem_below_4gb_not_mirrored)
6916                        pr_warn("This configuration results in unmirrored kernel memory.");
6917
6918                goto out2;
6919        }
6920
6921        /*
6922         * If kernelcore=nn% or movablecore=nn% was specified, calculate the
6923         * amount of necessary memory.
6924         */
6925        if (required_kernelcore_percent)
6926                required_kernelcore = (totalpages * 100 * required_kernelcore_percent) /
6927                                       10000UL;
6928        if (required_movablecore_percent)
6929                required_movablecore = (totalpages * 100 * required_movablecore_percent) /
6930                                        10000UL;
6931
6932        /*
6933         * If movablecore= was specified, calculate what size of
6934         * kernelcore that corresponds so that memory usable for
6935         * any allocation type is evenly spread. If both kernelcore
6936         * and movablecore are specified, then the value of kernelcore
6937         * will be used for required_kernelcore if it's greater than
6938         * what movablecore would have allowed.
6939         */
6940        if (required_movablecore) {
6941                unsigned long corepages;
6942
6943                /*
6944                 * Round-up so that ZONE_MOVABLE is at least as large as what
6945                 * was requested by the user
6946                 */
6947                required_movablecore =
6948                        roundup(required_movablecore, MAX_ORDER_NR_PAGES);
6949                required_movablecore = min(totalpages, required_movablecore);
6950                corepages = totalpages - required_movablecore;
6951
6952                required_kernelcore = max(required_kernelcore, corepages);
6953        }
6954
6955        /*
6956         * If kernelcore was not specified or kernelcore size is larger
6957         * than totalpages, there is no ZONE_MOVABLE.
6958         */
6959        if (!required_kernelcore || required_kernelcore >= totalpages)
6960                goto out;
6961
6962        /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
6963        usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
6964
6965restart:
6966        /* Spread kernelcore memory as evenly as possible throughout nodes */
6967        kernelcore_node = required_kernelcore / usable_nodes;
6968        for_each_node_state(nid, N_MEMORY) {
6969                unsigned long start_pfn, end_pfn;
6970
6971                /*
6972                 * Recalculate kernelcore_node if the division per node
6973                 * now exceeds what is necessary to satisfy the requested
6974                 * amount of memory for the kernel
6975                 */
6976                if (required_kernelcore < kernelcore_node)
6977                        kernelcore_node = required_kernelcore / usable_nodes;
6978
6979                /*
6980                 * As the map is walked, we track how much memory is usable
6981                 * by the kernel using kernelcore_remaining. When it is
6982                 * 0, the rest of the node is usable by ZONE_MOVABLE
6983                 */
6984                kernelcore_remaining = kernelcore_node;
6985
6986                /* Go through each range of PFNs within this node */
6987                for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
6988                        unsigned long size_pages;
6989
6990                        start_pfn = max(start_pfn, zone_movable_pfn[nid]);
6991                        if (start_pfn >= end_pfn)
6992                                continue;
6993
6994                        /* Account for what is only usable for kernelcore */
6995                        if (start_pfn < usable_startpfn) {
6996                                unsigned long kernel_pages;
6997                                kernel_pages = min(end_pfn, usable_startpfn)
6998                                                                - start_pfn;
6999
7000                                kernelcore_remaining -= min(kernel_pages,
7001                                                        kernelcore_remaining);
7002                                required_kernelcore -= min(kernel_pages,
7003                                                        required_kernelcore);
7004
7005                                /* Continue if range is now fully accounted */
7006                                if (end_pfn <= usable_startpfn) {
7007
7008                                        /*
7009                                         * Push zone_movable_pfn to the end so
7010                                         * that if we have to rebalance
7011                                         * kernelcore across nodes, we will
7012                                         * not double account here
7013                                         */
7014                                        zone_movable_pfn[nid] = end_pfn;
7015                                        continue;
7016                                }
7017                                start_pfn = usable_startpfn;
7018                        }
7019
7020                        /*
7021                         * The usable PFN range for ZONE_MOVABLE is from
7022                         * start_pfn->end_pfn. Calculate size_pages as the
7023                         * number of pages used as kernelcore
7024                         */
7025                        size_pages = end_pfn - start_pfn;
7026                        if (size_pages > kernelcore_remaining)
7027                                size_pages = kernelcore_remaining;
7028                        zone_movable_pfn[nid] = start_pfn + size_pages;
7029
7030                        /*
7031                         * Some kernelcore has been met, update counts and
7032                         * break if the kernelcore for this node has been
7033                         * satisfied
7034                         */
7035                        required_kernelcore -= min(required_kernelcore,
7036                                                                size_pages);
7037                        kernelcore_remaining -= size_pages;
7038                        if (!kernelcore_remaining)
7039                                break;
7040                }
7041        }
7042
7043        /*
7044         * If there is still required_kernelcore, we do another pass with one
7045         * less node in the count. This will push zone_movable_pfn[nid] further
7046         * along on the nodes that still have memory until kernelcore is
7047         * satisfied
7048         */
7049        usable_nodes--;
7050        if (usable_nodes && required_kernelcore > usable_nodes)
7051                goto restart;
7052
7053out2:
7054        /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
7055        for (nid = 0; nid < MAX_NUMNODES; nid++)
7056                zone_movable_pfn[nid] =
7057                        roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
7058
7059out:
7060        /* restore the node_state */
7061        node_states[N_MEMORY] = saved_node_state;
7062}
7063
7064/* Any regular or high memory on that node ? */
7065static void check_for_memory(pg_data_t *pgdat, int nid)
7066{
7067        enum zone_type zone_type;
7068
7069        for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
7070                struct zone *zone = &pgdat->node_zones[zone_type];
7071                if (populated_zone(zone)) {
7072                        if (IS_ENABLED(CONFIG_HIGHMEM))
7073                                node_set_state(nid, N_HIGH_MEMORY);
7074                        if (zone_type <= ZONE_NORMAL)
7075                                node_set_state(nid, N_NORMAL_MEMORY);
7076                        break;
7077                }
7078        }
7079}
7080
7081/**
7082 * free_area_init_nodes - Initialise all pg_data_t and zone data
7083 * @max_zone_pfn: an array of max PFNs for each zone
7084 *
7085 * This will call free_area_init_node() for each active node in the system.
7086 * Using the page ranges provided by memblock_set_node(), the size of each
7087 * zone in each node and their holes is calculated. If the maximum PFN
7088 * between two adjacent zones match, it is assumed that the zone is empty.
7089 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
7090 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
7091 * starts where the previous one ended. For example, ZONE_DMA32 starts
7092 * at arch_max_dma_pfn.
7093 */
7094void __init free_area_init_nodes(unsigned long *max_zone_pfn)
7095{
7096        unsigned long start_pfn, end_pfn;
7097        int i, nid;
7098
7099        /* Record where the zone boundaries are */
7100        memset(arch_zone_lowest_possible_pfn, 0,
7101                                sizeof(arch_zone_lowest_possible_pfn));
7102        memset(arch_zone_highest_possible_pfn, 0,
7103                                sizeof(arch_zone_highest_possible_pfn));
7104
7105        start_pfn = find_min_pfn_with_active_regions();
7106
7107        for (i = 0; i < MAX_NR_ZONES; i++) {
7108                if (i == ZONE_MOVABLE)
7109                        continue;
7110
7111                end_pfn = max(max_zone_pfn[i], start_pfn);
7112                arch_zone_lowest_possible_pfn[i] = start_pfn;
7113                arch_zone_highest_possible_pfn[i] = end_pfn;
7114
7115                start_pfn = end_pfn;
7116        }
7117
7118        /* Find the PFNs that ZONE_MOVABLE begins at in each node */
7119        memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
7120        find_zone_movable_pfns_for_nodes();
7121
7122        /* Print out the zone ranges */
7123        pr_info("Zone ranges:\n");
7124        for (i = 0; i < MAX_NR_ZONES; i++) {
7125                if (i == ZONE_MOVABLE)
7126                        continue;
7127                pr_info("  %-8s ", zone_names[i]);
7128                if (arch_zone_lowest_possible_pfn[i] ==
7129                                arch_zone_highest_possible_pfn[i])
7130                        pr_cont("empty\n");
7131                else
7132                        pr_cont("[mem %#018Lx-%#018Lx]\n",
7133                                (u64)arch_zone_lowest_possible_pfn[i]
7134                                        << PAGE_SHIFT,
7135                                ((u64)arch_zone_highest_possible_pfn[i]
7136                                        << PAGE_SHIFT) - 1);
7137        }
7138
7139        /* Print out the PFNs ZONE_MOVABLE begins at in each node */
7140        pr_info("Movable zone start for each node\n");
7141        for (i = 0; i < MAX_NUMNODES; i++) {
7142                if (zone_movable_pfn[i])
7143                        pr_info("  Node %d: %#018Lx\n", i,
7144                               (u64)zone_movable_pfn[i] << PAGE_SHIFT);
7145        }
7146
7147        /* Print out the early node map */
7148        pr_info("Early memory node ranges\n");
7149        for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
7150                pr_info("  node %3d: [mem %#018Lx-%#018Lx]\n", nid,
7151                        (u64)start_pfn << PAGE_SHIFT,
7152                        ((u64)end_pfn << PAGE_SHIFT) - 1);
7153
7154        /* Initialise every node */
7155        mminit_verify_pageflags_layout();
7156        setup_nr_node_ids();
7157        zero_resv_unavail();
7158        for_each_online_node(nid) {
7159                pg_data_t *pgdat = NODE_DATA(nid);
7160                free_area_init_node(nid, NULL,
7161                                find_min_pfn_for_node(nid), NULL);
7162
7163                /* Any memory on that node */
7164                if (pgdat->node_present_pages)
7165                        node_set_state(nid, N_MEMORY);
7166                check_for_memory(pgdat, nid);
7167        }
7168}
7169
7170static int __init cmdline_parse_core(char *p, unsigned long *core,
7171                                     unsigned long *percent)
7172{
7173        unsigned long long coremem;
7174        char *endptr;
7175
7176        if (!p)
7177                return -EINVAL;
7178
7179        /* Value may be a percentage of total memory, otherwise bytes */
7180        coremem = simple_strtoull(p, &endptr, 0);
7181        if (*endptr == '%') {
7182                /* Paranoid check for percent values greater than 100 */
7183                WARN_ON(coremem > 100);
7184
7185                *percent = coremem;
7186        } else {
7187                coremem = memparse(p, &p);
7188                /* Paranoid check that UL is enough for the coremem value */
7189                WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
7190
7191                *core = coremem >> PAGE_SHIFT;
7192                *percent = 0UL;
7193        }
7194        return 0;
7195}
7196
7197/*
7198 * kernelcore=size sets the amount of memory for use for allocations that
7199 * cannot be reclaimed or migrated.
7200 */
7201static int __init cmdline_parse_kernelcore(char *p)
7202{
7203        /* parse kernelcore=mirror */
7204        if (parse_option_str(p, "mirror")) {
7205                mirrored_kernelcore = true;
7206                return 0;
7207        }
7208
7209        return cmdline_parse_core(p, &required_kernelcore,
7210                                  &required_kernelcore_percent);
7211}
7212
7213/*
7214 * movablecore=size sets the amount of memory for use for allocations that
7215 * can be reclaimed or migrated.
7216 */
7217static int __init cmdline_parse_movablecore(char *p)
7218{
7219        return cmdline_parse_core(p, &required_movablecore,
7220                                  &required_movablecore_percent);
7221}
7222
7223early_param("kernelcore", cmdline_parse_kernelcore);
7224early_param("movablecore", cmdline_parse_movablecore);
7225
7226#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
7227
7228void adjust_managed_page_count(struct page *page, long count)
7229{
7230        atomic_long_add(count, &page_zone(page)->managed_pages);
7231        totalram_pages_add(count);
7232#ifdef CONFIG_HIGHMEM
7233        if (PageHighMem(page))
7234                totalhigh_pages_add(count);
7235#endif
7236}
7237EXPORT_SYMBOL(adjust_managed_page_count);
7238
7239unsigned long free_reserved_area(void *start, void *end, int poison, const char *s)
7240{
7241        void *pos;
7242        unsigned long pages = 0;
7243
7244        start = (void *)PAGE_ALIGN((unsigned long)start);
7245        end = (void *)((unsigned long)end & PAGE_MASK);
7246        for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
7247                struct page *page = virt_to_page(pos);
7248                void *direct_map_addr;
7249
7250                /*
7251                 * 'direct_map_addr' might be different from 'pos'
7252                 * because some architectures' virt_to_page()
7253                 * work with aliases.  Getting the direct map
7254                 * address ensures that we get a _writeable_
7255                 * alias for the memset().
7256                 */
7257                direct_map_addr = page_address(page);
7258                if ((unsigned int)poison <= 0xFF)
7259                        memset(direct_map_addr, poison, PAGE_SIZE);
7260
7261                free_reserved_page(page);
7262        }
7263
7264        if (pages && s)
7265                pr_info("Freeing %s memory: %ldK\n",
7266                        s, pages << (PAGE_SHIFT - 10));
7267
7268        return pages;
7269}
7270EXPORT_SYMBOL(free_reserved_area);
7271
7272#ifdef  CONFIG_HIGHMEM
7273void free_highmem_page(struct page *page)
7274{
7275        __free_reserved_page(page);
7276        totalram_pages_inc();
7277        atomic_long_inc(&page_zone(page)->managed_pages);
7278        totalhigh_pages_inc();
7279}
7280#endif
7281
7282
7283void __init mem_init_print_info(const char *str)
7284{
7285        unsigned long physpages, codesize, datasize, rosize, bss_size;
7286        unsigned long init_code_size, init_data_size;
7287
7288        physpages = get_num_physpages();
7289        codesize = _etext - _stext;
7290        datasize = _edata - _sdata;
7291        rosize = __end_rodata - __start_rodata;
7292        bss_size = __bss_stop - __bss_start;
7293        init_data_size = __init_end - __init_begin;
7294        init_code_size = _einittext - _sinittext;
7295
7296        /*
7297         * Detect special cases and adjust section sizes accordingly:
7298         * 1) .init.* may be embedded into .data sections
7299         * 2) .init.text.* may be out of [__init_begin, __init_end],
7300         *    please refer to arch/tile/kernel/vmlinux.lds.S.
7301         * 3) .rodata.* may be embedded into .text or .data sections.
7302         */
7303#define adj_init_size(start, end, size, pos, adj) \
7304        do { \
7305                if (start <= pos && pos < end && size > adj) \
7306                        size -= adj; \
7307        } while (0)
7308
7309        adj_init_size(__init_begin, __init_end, init_data_size,
7310                     _sinittext, init_code_size);
7311        adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
7312        adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
7313        adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
7314        adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
7315
7316#undef  adj_init_size
7317
7318        pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
7319#ifdef  CONFIG_HIGHMEM
7320                ", %luK highmem"
7321#endif
7322                "%s%s)\n",
7323                nr_free_pages() << (PAGE_SHIFT - 10),
7324                physpages << (PAGE_SHIFT - 10),
7325                codesize >> 10, datasize >> 10, rosize >> 10,
7326                (init_data_size + init_code_size) >> 10, bss_size >> 10,
7327                (physpages - totalram_pages() - totalcma_pages) << (PAGE_SHIFT - 10),
7328                totalcma_pages << (PAGE_SHIFT - 10),
7329#ifdef  CONFIG_HIGHMEM
7330                totalhigh_pages() << (PAGE_SHIFT - 10),
7331#endif
7332                str ? ", " : "", str ? str : "");
7333}
7334
7335/**
7336 * set_dma_reserve - set the specified number of pages reserved in the first zone
7337 * @new_dma_reserve: The number of pages to mark reserved
7338 *
7339 * The per-cpu batchsize and zone watermarks are determined by managed_pages.
7340 * In the DMA zone, a significant percentage may be consumed by kernel image
7341 * and other unfreeable allocations which can skew the watermarks badly. This
7342 * function may optionally be used to account for unfreeable pages in the
7343 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
7344 * smaller per-cpu batchsize.
7345 */
7346void __init set_dma_reserve(unsigned long new_dma_reserve)
7347{
7348        dma_reserve = new_dma_reserve;
7349}
7350
7351void __init free_area_init(unsigned long *zones_size)
7352{
7353        zero_resv_unavail();
7354        free_area_init_node(0, zones_size,
7355                        __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
7356}
7357
7358static int page_alloc_cpu_dead(unsigned int cpu)
7359{
7360
7361        lru_add_drain_cpu(cpu);
7362        drain_pages(cpu);
7363
7364        /*
7365         * Spill the event counters of the dead processor
7366         * into the current processors event counters.
7367         * This artificially elevates the count of the current
7368         * processor.
7369         */
7370        vm_events_fold_cpu(cpu);
7371
7372        /*
7373         * Zero the differential counters of the dead processor
7374         * so that the vm statistics are consistent.
7375         *
7376         * This is only okay since the processor is dead and cannot
7377         * race with what we are doing.
7378         */
7379        cpu_vm_stats_fold(cpu);
7380        return 0;
7381}
7382
7383void __init page_alloc_init(void)
7384{
7385        int ret;
7386
7387        ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC_DEAD,
7388                                        "mm/page_alloc:dead", NULL,
7389                                        page_alloc_cpu_dead);
7390        WARN_ON(ret < 0);
7391}
7392
7393/*
7394 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
7395 *      or min_free_kbytes changes.
7396 */
7397static void calculate_totalreserve_pages(void)
7398{
7399        struct pglist_data *pgdat;
7400        unsigned long reserve_pages = 0;
7401        enum zone_type i, j;
7402
7403        for_each_online_pgdat(pgdat) {
7404
7405                pgdat->totalreserve_pages = 0;
7406
7407                for (i = 0; i < MAX_NR_ZONES; i++) {
7408                        struct zone *zone = pgdat->node_zones + i;
7409                        long max = 0;
7410                        unsigned long managed_pages = zone_managed_pages(zone);
7411
7412                        /* Find valid and maximum lowmem_reserve in the zone */
7413                        for (j = i; j < MAX_NR_ZONES; j++) {
7414                                if (zone->lowmem_reserve[j] > max)
7415                                        max = zone->lowmem_reserve[j];
7416                        }
7417
7418                        /* we treat the high watermark as reserved pages. */
7419                        max += high_wmark_pages(zone);
7420
7421                        if (max > managed_pages)
7422                                max = managed_pages;
7423
7424                        pgdat->totalreserve_pages += max;
7425
7426                        reserve_pages += max;
7427                }
7428        }
7429        totalreserve_pages = reserve_pages;
7430}
7431
7432/*
7433 * setup_per_zone_lowmem_reserve - called whenever
7434 *      sysctl_lowmem_reserve_ratio changes.  Ensures that each zone
7435 *      has a correct pages reserved value, so an adequate number of
7436 *      pages are left in the zone after a successful __alloc_pages().
7437 */
7438static void setup_per_zone_lowmem_reserve(void)
7439{
7440        struct pglist_data *pgdat;
7441        enum zone_type j, idx;
7442
7443        for_each_online_pgdat(pgdat) {
7444                for (j = 0; j < MAX_NR_ZONES; j++) {
7445                        struct zone *zone = pgdat->node_zones + j;
7446                        unsigned long managed_pages = zone_managed_pages(zone);
7447
7448                        zone->lowmem_reserve[j] = 0;
7449
7450                        idx = j;
7451                        while (idx) {
7452                                struct zone *lower_zone;
7453
7454                                idx--;
7455                                lower_zone = pgdat->node_zones + idx;
7456
7457                                if (sysctl_lowmem_reserve_ratio[idx] < 1) {
7458                                        sysctl_lowmem_reserve_ratio[idx] = 0;
7459                                        lower_zone->lowmem_reserve[j] = 0;
7460                                } else {
7461                                        lower_zone->lowmem_reserve[j] =
7462                                                managed_pages / sysctl_lowmem_reserve_ratio[idx];
7463                                }
7464                                managed_pages += zone_managed_pages(lower_zone);
7465                        }
7466                }
7467        }
7468
7469        /* update totalreserve_pages */
7470        calculate_totalreserve_pages();
7471}
7472
7473static void __setup_per_zone_wmarks(void)
7474{
7475        unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
7476        unsigned long lowmem_pages = 0;
7477        struct zone *zone;
7478        unsigned long flags;
7479
7480        /* Calculate total number of !ZONE_HIGHMEM pages */
7481        for_each_zone(zone) {
7482                if (!is_highmem(zone))
7483                        lowmem_pages += zone_managed_pages(zone);
7484        }
7485
7486        for_each_zone(zone) {
7487                u64 tmp;
7488
7489                spin_lock_irqsave(&zone->lock, flags);
7490                tmp = (u64)pages_min * zone_managed_pages(zone);
7491                do_div(tmp, lowmem_pages);
7492                if (is_highmem(zone)) {
7493                        /*
7494                         * __GFP_HIGH and PF_MEMALLOC allocations usually don't
7495                         * need highmem pages, so cap pages_min to a small
7496                         * value here.
7497                         *
7498                         * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
7499                         * deltas control asynch page reclaim, and so should
7500                         * not be capped for highmem.
7501                         */
7502                        unsigned long min_pages;
7503
7504                        min_pages = zone_managed_pages(zone) / 1024;
7505                        min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
7506                        zone->_watermark[WMARK_MIN] = min_pages;
7507                } else {
7508                        /*
7509                         * If it's a lowmem zone, reserve a number of pages
7510                         * proportionate to the zone's size.
7511                         */
7512                        zone->_watermark[WMARK_MIN] = tmp;
7513                }
7514
7515                /*
7516                 * Set the kswapd watermarks distance according to the
7517                 * scale factor in proportion to available memory, but
7518                 * ensure a minimum size on small systems.
7519                 */
7520                tmp = max_t(u64, tmp >> 2,
7521                            mult_frac(zone_managed_pages(zone),
7522                                      watermark_scale_factor, 10000));
7523
7524                zone->_watermark[WMARK_LOW]  = min_wmark_pages(zone) + tmp;
7525                zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
7526                zone->watermark_boost = 0;
7527
7528                spin_unlock_irqrestore(&zone->lock, flags);
7529        }
7530
7531        /* update totalreserve_pages */
7532        calculate_totalreserve_pages();
7533}
7534
7535/**
7536 * setup_per_zone_wmarks - called when min_free_kbytes changes
7537 * or when memory is hot-{added|removed}
7538 *
7539 * Ensures that the watermark[min,low,high] values for each zone are set
7540 * correctly with respect to min_free_kbytes.
7541 */
7542void setup_per_zone_wmarks(void)
7543{
7544        static DEFINE_SPINLOCK(lock);
7545
7546        spin_lock(&lock);
7547        __setup_per_zone_wmarks();
7548        spin_unlock(&lock);
7549}
7550
7551/*
7552 * Initialise min_free_kbytes.
7553 *
7554 * For small machines we want it small (128k min).  For large machines
7555 * we want it large (64MB max).  But it is not linear, because network
7556 * bandwidth does not increase linearly with machine size.  We use
7557 *
7558 *      min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
7559 *      min_free_kbytes = sqrt(lowmem_kbytes * 16)
7560 *
7561 * which yields
7562 *
7563 * 16MB:        512k
7564 * 32MB:        724k
7565 * 64MB:        1024k
7566 * 128MB:       1448k
7567 * 256MB:       2048k
7568 * 512MB:       2896k
7569 * 1024MB:      4096k
7570 * 2048MB:      5792k
7571 * 4096MB:      8192k
7572 * 8192MB:      11584k
7573 * 16384MB:     16384k
7574 */
7575int __meminit init_per_zone_wmark_min(void)
7576{
7577        unsigned long lowmem_kbytes;
7578        int new_min_free_kbytes;
7579
7580        lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
7581        new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
7582
7583        if (new_min_free_kbytes > user_min_free_kbytes) {
7584                min_free_kbytes = new_min_free_kbytes;
7585                if (min_free_kbytes < 128)
7586                        min_free_kbytes = 128;
7587                if (min_free_kbytes > 65536)
7588                        min_free_kbytes = 65536;
7589        } else {
7590                pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
7591                                new_min_free_kbytes, user_min_free_kbytes);
7592        }
7593        setup_per_zone_wmarks();
7594        refresh_zone_stat_thresholds();
7595        setup_per_zone_lowmem_reserve();
7596
7597#ifdef CONFIG_NUMA
7598        setup_min_unmapped_ratio();
7599        setup_min_slab_ratio();
7600#endif
7601
7602        return 0;
7603}
7604core_initcall(init_per_zone_wmark_min)
7605
7606/*
7607 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
7608 *      that we can call two helper functions whenever min_free_kbytes
7609 *      changes.
7610 */
7611int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
7612        void __user *buffer, size_t *length, loff_t *ppos)
7613{
7614        int rc;
7615
7616        rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7617        if (rc)
7618                return rc;
7619
7620        if (write) {
7621                user_min_free_kbytes = min_free_kbytes;
7622                setup_per_zone_wmarks();
7623        }
7624        return 0;
7625}
7626
7627int watermark_boost_factor_sysctl_handler(struct ctl_table *table, int write,
7628        void __user *buffer, size_t *length, loff_t *ppos)
7629{
7630        int rc;
7631
7632        rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7633        if (rc)
7634                return rc;
7635
7636        return 0;
7637}
7638
7639int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
7640        void __user *buffer, size_t *length, loff_t *ppos)
7641{
7642        int rc;
7643
7644        rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7645        if (rc)
7646                return rc;
7647
7648        if (write)
7649                setup_per_zone_wmarks();
7650
7651        return 0;
7652}
7653
7654#ifdef CONFIG_NUMA
7655static void setup_min_unmapped_ratio(void)
7656{
7657        pg_data_t *pgdat;
7658        struct zone *zone;
7659
7660        for_each_online_pgdat(pgdat)
7661                pgdat->min_unmapped_pages = 0;
7662
7663        for_each_zone(zone)
7664                zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) *
7665                                                         sysctl_min_unmapped_ratio) / 100;
7666}
7667
7668
7669int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
7670        void __user *buffer, size_t *length, loff_t *ppos)
7671{
7672        int rc;
7673
7674        rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7675        if (rc)
7676                return rc;
7677
7678        setup_min_unmapped_ratio();
7679
7680        return 0;
7681}
7682
7683static void setup_min_slab_ratio(void)
7684{
7685        pg_data_t *pgdat;
7686        struct zone *zone;
7687
7688        for_each_online_pgdat(pgdat)
7689                pgdat->min_slab_pages = 0;
7690
7691        for_each_zone(zone)
7692                zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) *
7693                                                     sysctl_min_slab_ratio) / 100;
7694}
7695
7696int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
7697        void __user *buffer, size_t *length, loff_t *ppos)
7698{
7699        int rc;
7700
7701        rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
7702        if (rc)
7703                return rc;
7704
7705        setup_min_slab_ratio();
7706
7707        return 0;
7708}
7709#endif
7710
7711/*
7712 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
7713 *      proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
7714 *      whenever sysctl_lowmem_reserve_ratio changes.
7715 *
7716 * The reserve ratio obviously has absolutely no relation with the
7717 * minimum watermarks. The lowmem reserve ratio can only make sense
7718 * if in function of the boot time zone sizes.
7719 */
7720int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
7721        void __user *buffer, size_t *length, loff_t *ppos)
7722{
7723        proc_dointvec_minmax(table, write, buffer, length, ppos);
7724        setup_per_zone_lowmem_reserve();
7725        return 0;
7726}
7727
7728/*
7729 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
7730 * cpu.  It is the fraction of total pages in each zone that a hot per cpu
7731 * pagelist can have before it gets flushed back to buddy allocator.
7732 */
7733int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
7734        void __user *buffer, size_t *length, loff_t *ppos)
7735{
7736        struct zone *zone;
7737        int old_percpu_pagelist_fraction;
7738        int ret;
7739
7740        mutex_lock(&pcp_batch_high_lock);
7741        old_percpu_pagelist_fraction = percpu_pagelist_fraction;
7742
7743        ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
7744        if (!write || ret < 0)
7745                goto out;
7746
7747        /* Sanity checking to avoid pcp imbalance */
7748        if (percpu_pagelist_fraction &&
7749            percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
7750                percpu_pagelist_fraction = old_percpu_pagelist_fraction;
7751                ret = -EINVAL;
7752                goto out;
7753        }
7754
7755        /* No change? */
7756        if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
7757                goto out;
7758
7759        for_each_populated_zone(zone) {
7760                unsigned int cpu;
7761
7762                for_each_possible_cpu(cpu)
7763                        pageset_set_high_and_batch(zone,
7764                                        per_cpu_ptr(zone->pageset, cpu));
7765        }
7766out:
7767        mutex_unlock(&pcp_batch_high_lock);
7768        return ret;
7769}
7770
7771#ifdef CONFIG_NUMA
7772int hashdist = HASHDIST_DEFAULT;
7773
7774static int __init set_hashdist(char *str)
7775{
7776        if (!str)
7777                return 0;
7778        hashdist = simple_strtoul(str, &str, 0);
7779        return 1;
7780}
7781__setup("hashdist=", set_hashdist);
7782#endif
7783
7784#ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
7785/*
7786 * Returns the number of pages that arch has reserved but
7787 * is not known to alloc_large_system_hash().
7788 */
7789static unsigned long __init arch_reserved_kernel_pages(void)
7790{
7791        return 0;
7792}
7793#endif
7794
7795/*
7796 * Adaptive scale is meant to reduce sizes of hash tables on large memory
7797 * machines. As memory size is increased the scale is also increased but at
7798 * slower pace.  Starting from ADAPT_SCALE_BASE (64G), every time memory
7799 * quadruples the scale is increased by one, which means the size of hash table
7800 * only doubles, instead of quadrupling as well.
7801 * Because 32-bit systems cannot have large physical memory, where this scaling
7802 * makes sense, it is disabled on such platforms.
7803 */
7804#if __BITS_PER_LONG > 32
7805#define ADAPT_SCALE_BASE        (64ul << 30)
7806#define ADAPT_SCALE_SHIFT       2
7807#define ADAPT_SCALE_NPAGES      (ADAPT_SCALE_BASE >> PAGE_SHIFT)
7808#endif
7809
7810/*
7811 * allocate a large system hash table from bootmem
7812 * - it is assumed that the hash table must contain an exact power-of-2
7813 *   quantity of entries
7814 * - limit is the number of hash buckets, not the total allocation size
7815 */
7816void *__init alloc_large_system_hash(const char *tablename,
7817                                     unsigned long bucketsize,
7818                                     unsigned long numentries,
7819                                     int scale,
7820                                     int flags,
7821                                     unsigned int *_hash_shift,
7822                                     unsigned int *_hash_mask,
7823                                     unsigned long low_limit,
7824                                     unsigned long high_limit)
7825{
7826        unsigned long long max = high_limit;
7827        unsigned long log2qty, size;
7828        void *table = NULL;
7829        gfp_t gfp_flags;
7830
7831        /* allow the kernel cmdline to have a say */
7832        if (!numentries) {
7833                /* round applicable memory size up to nearest megabyte */
7834                numentries = nr_kernel_pages;
7835                numentries -= arch_reserved_kernel_pages();
7836
7837                /* It isn't necessary when PAGE_SIZE >= 1MB */
7838                if (PAGE_SHIFT < 20)
7839                        numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
7840
7841#if __BITS_PER_LONG > 32
7842                if (!high_limit) {
7843                        unsigned long adapt;
7844
7845                        for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
7846                             adapt <<= ADAPT_SCALE_SHIFT)
7847                                scale++;
7848                }
7849#endif
7850
7851                /* limit to 1 bucket per 2^scale bytes of low memory */
7852                if (scale > PAGE_SHIFT)
7853                        numentries >>= (scale - PAGE_SHIFT);
7854                else
7855                        numentries <<= (PAGE_SHIFT - scale);
7856
7857                /* Make sure we've got at least a 0-order allocation.. */
7858                if (unlikely(flags & HASH_SMALL)) {
7859                        /* Makes no sense without HASH_EARLY */
7860                        WARN_ON(!(flags & HASH_EARLY));
7861                        if (!(numentries >> *_hash_shift)) {
7862                                numentries = 1UL << *_hash_shift;
7863                                BUG_ON(!numentries);
7864                        }
7865                } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
7866                        numentries = PAGE_SIZE / bucketsize;
7867        }
7868        numentries = roundup_pow_of_two(numentries);
7869
7870        /* limit allocation size to 1/16 total memory by default */
7871        if (max == 0) {
7872                max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
7873                do_div(max, bucketsize);
7874        }
7875        max = min(max, 0x80000000ULL);
7876
7877        if (numentries < low_limit)
7878                numentries = low_limit;
7879        if (numentries > max)
7880                numentries = max;
7881
7882        log2qty = ilog2(numentries);
7883
7884        gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
7885        do {
7886                size = bucketsize << log2qty;
7887                if (flags & HASH_EARLY) {
7888                        if (flags & HASH_ZERO)
7889                                table = memblock_alloc_nopanic(size,
7890                                                               SMP_CACHE_BYTES);
7891                        else
7892                                table = memblock_alloc_raw(size,
7893                                                           SMP_CACHE_BYTES);
7894                } else if (hashdist) {
7895                        table = __vmalloc(size, gfp_flags, PAGE_KERNEL);
7896                } else {
7897                        /*
7898                         * If bucketsize is not a power-of-two, we may free
7899                         * some pages at the end of hash table which
7900                         * alloc_pages_exact() automatically does
7901                         */
7902                        if (get_order(size) < MAX_ORDER) {
7903                                table = alloc_pages_exact(size, gfp_flags);
7904                                kmemleak_alloc(table, size, 1, gfp_flags);
7905                        }
7906                }
7907        } while (!table && size > PAGE_SIZE && --log2qty);
7908
7909        if (!table)
7910                panic("Failed to allocate %s hash table\n", tablename);
7911
7912        pr_info("%s hash table entries: %ld (order: %d, %lu bytes)\n",
7913                tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size);
7914
7915        if (_hash_shift)
7916                *_hash_shift = log2qty;
7917        if (_hash_mask)
7918                *_hash_mask = (1 << log2qty) - 1;
7919
7920        return table;
7921}
7922
7923/*
7924 * This function checks whether pageblock includes unmovable pages or not.
7925 * If @count is not zero, it is okay to include less @count unmovable pages
7926 *
7927 * PageLRU check without isolation or lru_lock could race so that
7928 * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
7929 * check without lock_page also may miss some movable non-lru pages at
7930 * race condition. So you can't expect this function should be exact.
7931 */
7932bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
7933                         int migratetype, int flags)
7934{
7935        unsigned long pfn, iter, found;
7936
7937        /*
7938         * TODO we could make this much more efficient by not checking every
7939         * page in the range if we know all of them are in MOVABLE_ZONE and
7940         * that the movable zone guarantees that pages are migratable but
7941         * the later is not the case right now unfortunatelly. E.g. movablecore
7942         * can still lead to having bootmem allocations in zone_movable.
7943         */
7944
7945        /*
7946         * CMA allocations (alloc_contig_range) really need to mark isolate
7947         * CMA pageblocks even when they are not movable in fact so consider
7948         * them movable here.
7949         */
7950        if (is_migrate_cma(migratetype) &&
7951                        is_migrate_cma(get_pageblock_migratetype(page)))
7952                return false;
7953
7954        pfn = page_to_pfn(page);
7955        for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
7956                unsigned long check = pfn + iter;
7957
7958                if (!pfn_valid_within(check))
7959                        continue;
7960
7961                page = pfn_to_page(check);
7962
7963                if (PageReserved(page))
7964                        goto unmovable;
7965
7966                /*
7967                 * If the zone is movable and we have ruled out all reserved
7968                 * pages then it should be reasonably safe to assume the rest
7969                 * is movable.
7970                 */
7971                if (zone_idx(zone) == ZONE_MOVABLE)
7972                        continue;
7973
7974                /*
7975                 * Hugepages are not in LRU lists, but they're movable.
7976                 * We need not scan over tail pages bacause we don't
7977                 * handle each tail page individually in migration.
7978                 */
7979                if (PageHuge(page)) {
7980                        struct page *head = compound_head(page);
7981                        unsigned int skip_pages;
7982
7983                        if (!hugepage_migration_supported(page_hstate(head)))
7984                                goto unmovable;
7985
7986                        skip_pages = (1 << compound_order(head)) - (page - head);
7987                        iter += skip_pages - 1;
7988                        continue;
7989                }
7990
7991                /*
7992                 * We can't use page_count without pin a page
7993                 * because another CPU can free compound page.
7994                 * This check already skips compound tails of THP
7995                 * because their page->_refcount is zero at all time.
7996                 */
7997                if (!page_ref_count(page)) {
7998                        if (PageBuddy(page))
7999                                iter += (1 << page_order(page)) - 1;
8000                        continue;
8001                }
8002
8003                /*
8004                 * The HWPoisoned page may be not in buddy system, and
8005                 * page_count() is not 0.
8006                 */
8007                if ((flags & SKIP_HWPOISON) && PageHWPoison(page))
8008                        continue;
8009
8010                if (__PageMovable(page))
8011                        continue;
8012
8013                if (!PageLRU(page))
8014                        found++;
8015                /*
8016                 * If there are RECLAIMABLE pages, we need to check
8017                 * it.  But now, memory offline itself doesn't call
8018                 * shrink_node_slabs() and it still to be fixed.
8019                 */
8020                /*
8021                 * If the page is not RAM, page_count()should be 0.
8022                 * we don't need more check. This is an _used_ not-movable page.
8023                 *
8024                 * The problematic thing here is PG_reserved pages. PG_reserved
8025                 * is set to both of a memory hole page and a _used_ kernel
8026                 * page at boot.
8027                 */
8028                if (found > count)
8029                        goto unmovable;
8030        }
8031        return false;
8032unmovable:
8033        WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE);
8034        if (flags & REPORT_FAILURE)
8035                dump_page(pfn_to_page(pfn+iter), "unmovable page");
8036        return true;
8037}
8038
8039#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
8040
8041static unsigned long pfn_max_align_down(unsigned long pfn)
8042{
8043        return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
8044                             pageblock_nr_pages) - 1);
8045}
8046
8047static unsigned long pfn_max_align_up(unsigned long pfn)
8048{
8049        return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
8050                                pageblock_nr_pages));
8051}
8052
8053/* [start, end) must belong to a single zone. */
8054static int __alloc_contig_migrate_range(struct compact_control *cc,
8055                                        unsigned long start, unsigned long end)
8056{
8057        /* This function is based on compact_zone() from compaction.c. */
8058        unsigned long nr_reclaimed;
8059        unsigned long pfn = start;
8060        unsigned int tries = 0;
8061        int ret = 0;
8062
8063        migrate_prep();
8064
8065        while (pfn < end || !list_empty(&cc->migratepages)) {
8066                if (fatal_signal_pending(current)) {
8067                        ret = -EINTR;
8068                        break;
8069                }
8070
8071                if (list_empty(&cc->migratepages)) {
8072                        cc->nr_migratepages = 0;
8073                        pfn = isolate_migratepages_range(cc, pfn, end);
8074                        if (!pfn) {
8075                                ret = -EINTR;
8076                                break;
8077                        }
8078                        tries = 0;
8079                } else if (++tries == 5) {
8080                        ret = ret < 0 ? ret : -EBUSY;
8081                        break;
8082                }
8083
8084                nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
8085                                                        &cc->migratepages);
8086                cc->nr_migratepages -= nr_reclaimed;
8087
8088                ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
8089                                    NULL, 0, cc->mode, MR_CONTIG_RANGE);
8090        }
8091        if (ret < 0) {
8092                putback_movable_pages(&cc->migratepages);
8093                return ret;
8094        }
8095        return 0;
8096}
8097
8098/**
8099 * alloc_contig_range() -- tries to allocate given range of pages
8100 * @start:      start PFN to allocate
8101 * @end:        one-past-the-last PFN to allocate
8102 * @migratetype:        migratetype of the underlaying pageblocks (either
8103 *                      #MIGRATE_MOVABLE or #MIGRATE_CMA).  All pageblocks
8104 *                      in range must have the same migratetype and it must
8105 *                      be either of the two.
8106 * @gfp_mask:   GFP mask to use during compaction
8107 *
8108 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
8109 * aligned.  The PFN range must belong to a single zone.
8110 *
8111 * The first thing this routine does is attempt to MIGRATE_ISOLATE all
8112 * pageblocks in the range.  Once isolated, the pageblocks should not
8113 * be modified by others.
8114 *
8115 * Returns zero on success or negative error code.  On success all
8116 * pages which PFN is in [start, end) are allocated for the caller and
8117 * need to be freed with free_contig_range().
8118 */
8119int alloc_contig_range(unsigned long start, unsigned long end,
8120                       unsigned migratetype, gfp_t gfp_mask)
8121{
8122        unsigned long outer_start, outer_end;
8123        unsigned int order;
8124        int ret = 0;
8125
8126        struct compact_control cc = {
8127                .nr_migratepages = 0,
8128                .order = -1,
8129                .zone = page_zone(pfn_to_page(start)),
8130                .mode = MIGRATE_SYNC,
8131                .ignore_skip_hint = true,
8132                .no_set_skip_hint = true,
8133                .gfp_mask = current_gfp_context(gfp_mask),
8134        };
8135        INIT_LIST_HEAD(&cc.migratepages);
8136
8137        /*
8138         * What we do here is we mark all pageblocks in range as
8139         * MIGRATE_ISOLATE.  Because pageblock and max order pages may
8140         * have different sizes, and due to the way page allocator
8141         * work, we align the range to biggest of the two pages so
8142         * that page allocator won't try to merge buddies from
8143         * different pageblocks and change MIGRATE_ISOLATE to some
8144         * other migration type.
8145         *
8146         * Once the pageblocks are marked as MIGRATE_ISOLATE, we
8147         * migrate the pages from an unaligned range (ie. pages that
8148         * we are interested in).  This will put all the pages in
8149         * range back to page allocator as MIGRATE_ISOLATE.
8150         *
8151         * When this is done, we take the pages in range from page
8152         * allocator removing them from the buddy system.  This way
8153         * page allocator will never consider using them.
8154         *
8155         * This lets us mark the pageblocks back as
8156         * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
8157         * aligned range but not in the unaligned, original range are
8158         * put back to page allocator so that buddy can use them.
8159         */
8160
8161        ret = start_isolate_page_range(pfn_max_align_down(start),
8162                                       pfn_max_align_up(end), migratetype, 0);
8163        if (ret)
8164                return ret;
8165
8166        /*
8167         * In case of -EBUSY, we'd like to know which page causes problem.
8168         * So, just fall through. test_pages_isolated() has a tracepoint
8169         * which will report the busy page.
8170         *
8171         * It is possible that busy pages could become available before
8172         * the call to test_pages_isolated, and the range will actually be
8173         * allocated.  So, if we fall through be sure to clear ret so that
8174         * -EBUSY is not accidentally used or returned to caller.
8175         */
8176        ret = __alloc_contig_migrate_range(&cc, start, end);
8177        if (ret && ret != -EBUSY)
8178                goto done;
8179        ret =0;
8180
8181        /*
8182         * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
8183         * aligned blocks that are marked as MIGRATE_ISOLATE.  What's
8184         * more, all pages in [start, end) are free in page allocator.
8185         * What we are going to do is to allocate all pages from
8186         * [start, end) (that is remove them from page allocator).
8187         *
8188         * The only problem is that pages at the beginning and at the
8189         * end of interesting range may be not aligned with pages that
8190         * page allocator holds, ie. they can be part of higher order
8191         * pages.  Because of this, we reserve the bigger range and
8192         * once this is done free the pages we are not interested in.
8193         *
8194         * We don't have to hold zone->lock here because the pages are
8195         * isolated thus they won't get removed from buddy.
8196         */
8197
8198        lru_add_drain_all();
8199        drain_all_pages(cc.zone);
8200
8201        order = 0;
8202        outer_start = start;
8203        while (!PageBuddy(pfn_to_page(outer_start))) {
8204                if (++order >= MAX_ORDER) {
8205                        outer_start = start;
8206                        break;
8207                }
8208                outer_start &= ~0UL << order;
8209        }
8210
8211        if (outer_start != start) {
8212                order = page_order(pfn_to_page(outer_start));
8213
8214                /*
8215                 * outer_start page could be small order buddy page and
8216                 * it doesn't include start page. Adjust outer_start
8217                 * in this case to report failed page properly
8218                 * on tracepoint in test_pages_isolated()
8219                 */
8220                if (outer_start + (1UL << order) <= start)
8221                        outer_start = start;
8222        }
8223
8224        /* Make sure the range is really isolated. */
8225        if (test_pages_isolated(outer_start, end, false)) {
8226                pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
8227                        __func__, outer_start, end);
8228                ret = -EBUSY;
8229                goto done;
8230        }
8231
8232        /* Grab isolated pages from freelists. */
8233        outer_end = isolate_freepages_range(&cc, outer_start, end);
8234        if (!outer_end) {
8235                ret = -EBUSY;
8236                goto done;
8237        }
8238
8239        /* Free head and tail (if any) */
8240        if (start != outer_start)
8241                free_contig_range(outer_start, start - outer_start);
8242        if (end != outer_end)
8243                free_contig_range(end, outer_end - end);
8244
8245done:
8246        undo_isolate_page_range(pfn_max_align_down(start),
8247                                pfn_max_align_up(end), migratetype);
8248        return ret;
8249}
8250
8251void free_contig_range(unsigned long pfn, unsigned nr_pages)
8252{
8253        unsigned int count = 0;
8254
8255        for (; nr_pages--; pfn++) {
8256                struct page *page = pfn_to_page(pfn);
8257
8258                count += page_count(page) != 1;
8259                __free_page(page);
8260        }
8261        WARN(count != 0, "%d pages are still in use!\n", count);
8262}
8263#endif
8264
8265#ifdef CONFIG_MEMORY_HOTPLUG
8266/*
8267 * The zone indicated has a new number of managed_pages; batch sizes and percpu
8268 * page high values need to be recalulated.
8269 */
8270void __meminit zone_pcp_update(struct zone *zone)
8271{
8272        unsigned cpu;
8273        mutex_lock(&pcp_batch_high_lock);
8274        for_each_possible_cpu(cpu)
8275                pageset_set_high_and_batch(zone,
8276                                per_cpu_ptr(zone->pageset, cpu));
8277        mutex_unlock(&pcp_batch_high_lock);
8278}
8279#endif
8280
8281void zone_pcp_reset(struct zone *zone)
8282{
8283        unsigned long flags;
8284        int cpu;
8285        struct per_cpu_pageset *pset;
8286
8287        /* avoid races with drain_pages()  */
8288        local_irq_save(flags);
8289        if (zone->pageset != &boot_pageset) {
8290                for_each_online_cpu(cpu) {
8291                        pset = per_cpu_ptr(zone->pageset, cpu);
8292                        drain_zonestat(zone, pset);
8293                }
8294                free_percpu(zone->pageset);
8295                zone->pageset = &boot_pageset;
8296        }
8297        local_irq_restore(flags);
8298}
8299
8300#ifdef CONFIG_MEMORY_HOTREMOVE
8301/*
8302 * All pages in the range must be in a single zone and isolated
8303 * before calling this.
8304 */
8305void
8306__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
8307{
8308        struct page *page;
8309        struct zone *zone;
8310        unsigned int order, i;
8311        unsigned long pfn;
8312        unsigned long flags;
8313        /* find the first valid pfn */
8314        for (pfn = start_pfn; pfn < end_pfn; pfn++)
8315                if (pfn_valid(pfn))
8316                        break;
8317        if (pfn == end_pfn)
8318                return;
8319        offline_mem_sections(pfn, end_pfn);
8320        zone = page_zone(pfn_to_page(pfn));
8321        spin_lock_irqsave(&zone->lock, flags);
8322        pfn = start_pfn;
8323        while (pfn < end_pfn) {
8324                if (!pfn_valid(pfn)) {
8325                        pfn++;
8326                        continue;
8327                }
8328                page = pfn_to_page(pfn);
8329                /*
8330                 * The HWPoisoned page may be not in buddy system, and
8331                 * page_count() is not 0.
8332                 */
8333                if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
8334                        pfn++;
8335                        SetPageReserved(page);
8336                        continue;
8337                }
8338
8339                BUG_ON(page_count(page));
8340                BUG_ON(!PageBuddy(page));
8341                order = page_order(page);
8342#ifdef CONFIG_DEBUG_VM
8343                pr_info("remove from free list %lx %d %lx\n",
8344                        pfn, 1 << order, end_pfn);
8345#endif
8346                list_del(&page->lru);
8347                rmv_page_order(page);
8348                zone->free_area[order].nr_free--;
8349                for (i = 0; i < (1 << order); i++)
8350                        SetPageReserved((page+i));
8351                pfn += (1 << order);
8352        }
8353        spin_unlock_irqrestore(&zone->lock, flags);
8354}
8355#endif
8356
8357bool is_free_buddy_page(struct page *page)
8358{
8359        struct zone *zone = page_zone(page);
8360        unsigned long pfn = page_to_pfn(page);
8361        unsigned long flags;
8362        unsigned int order;
8363
8364        spin_lock_irqsave(&zone->lock, flags);
8365        for (order = 0; order < MAX_ORDER; order++) {
8366                struct page *page_head = page - (pfn & ((1 << order) - 1));
8367
8368                if (PageBuddy(page_head) && page_order(page_head) >= order)
8369                        break;
8370        }
8371        spin_unlock_irqrestore(&zone->lock, flags);
8372
8373        return order < MAX_ORDER;
8374}
8375
8376#ifdef CONFIG_MEMORY_FAILURE
8377/*
8378 * Set PG_hwpoison flag if a given page is confirmed to be a free page.  This
8379 * test is performed under the zone lock to prevent a race against page
8380 * allocation.
8381 */
8382bool set_hwpoison_free_buddy_page(struct page *page)
8383{
8384        struct zone *zone = page_zone(page);
8385        unsigned long pfn = page_to_pfn(page);
8386        unsigned long flags;
8387        unsigned int order;
8388        bool hwpoisoned = false;
8389
8390        spin_lock_irqsave(&zone->lock, flags);
8391        for (order = 0; order < MAX_ORDER; order++) {
8392                struct page *page_head = page - (pfn & ((1 << order) - 1));
8393
8394                if (PageBuddy(page_head) && page_order(page_head) >= order) {
8395                        if (!TestSetPageHWPoison(page))
8396                                hwpoisoned = true;
8397                        break;
8398                }
8399        }
8400        spin_unlock_irqrestore(&zone->lock, flags);
8401
8402        return hwpoisoned;
8403}
8404#endif
8405