linux/mm/page_alloc.c
<<
>>
Prefs
   1/*
   2 *  linux/mm/page_alloc.c
   3 *
   4 *  Manages the free list, the system allocates free pages here.
   5 *  Note that kmalloc() lives in slab.c
   6 *
   7 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   8 *  Swap reorganised 29.12.95, Stephen Tweedie
   9 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  10 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
  11 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
  12 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
  13 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
  14 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
  15 */
  16
  17#include <linux/stddef.h>
  18#include <linux/mm.h>
  19#include <linux/swap.h>
  20#include <linux/interrupt.h>
  21#include <linux/pagemap.h>
  22#include <linux/jiffies.h>
  23#include <linux/bootmem.h>
  24#include <linux/compiler.h>
  25#include <linux/kernel.h>
  26#include <linux/kmemcheck.h>
  27#include <linux/module.h>
  28#include <linux/suspend.h>
  29#include <linux/pagevec.h>
  30#include <linux/blkdev.h>
  31#include <linux/slab.h>
  32#include <linux/oom.h>
  33#include <linux/notifier.h>
  34#include <linux/topology.h>
  35#include <linux/sysctl.h>
  36#include <linux/cpu.h>
  37#include <linux/cpuset.h>
  38#include <linux/memory_hotplug.h>
  39#include <linux/nodemask.h>
  40#include <linux/vmalloc.h>
  41#include <linux/mempolicy.h>
  42#include <linux/stop_machine.h>
  43#include <linux/sort.h>
  44#include <linux/pfn.h>
  45#include <linux/backing-dev.h>
  46#include <linux/fault-inject.h>
  47#include <linux/page-isolation.h>
  48#include <linux/page_cgroup.h>
  49#include <linux/debugobjects.h>
  50#include <linux/kmemleak.h>
  51#include <trace/events/kmem.h>
  52
  53#include <asm/tlbflush.h>
  54#include <asm/div64.h>
  55#include "internal.h"
  56
  57/*
  58 * Array of node states.
  59 */
  60nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
  61        [N_POSSIBLE] = NODE_MASK_ALL,
  62        [N_ONLINE] = { { [0] = 1UL } },
  63#ifndef CONFIG_NUMA
  64        [N_NORMAL_MEMORY] = { { [0] = 1UL } },
  65#ifdef CONFIG_HIGHMEM
  66        [N_HIGH_MEMORY] = { { [0] = 1UL } },
  67#endif
  68        [N_CPU] = { { [0] = 1UL } },
  69#endif  /* NUMA */
  70};
  71EXPORT_SYMBOL(node_states);
  72
  73unsigned long totalram_pages __read_mostly;
  74unsigned long totalreserve_pages __read_mostly;
  75int percpu_pagelist_fraction;
  76gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
  77
  78#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
  79int pageblock_order __read_mostly;
  80#endif
  81
  82static void __free_pages_ok(struct page *page, unsigned int order);
  83
  84/*
  85 * results with 256, 32 in the lowmem_reserve sysctl:
  86 *      1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
  87 *      1G machine -> (16M dma, 784M normal, 224M high)
  88 *      NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
  89 *      HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
  90 *      HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
  91 *
  92 * TBD: should special case ZONE_DMA32 machines here - in those we normally
  93 * don't need any ZONE_NORMAL reservation
  94 */
  95int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
  96#ifdef CONFIG_ZONE_DMA
  97         256,
  98#endif
  99#ifdef CONFIG_ZONE_DMA32
 100         256,
 101#endif
 102#ifdef CONFIG_HIGHMEM
 103         32,
 104#endif
 105         32,
 106};
 107
 108EXPORT_SYMBOL(totalram_pages);
 109
 110static char * const zone_names[MAX_NR_ZONES] = {
 111#ifdef CONFIG_ZONE_DMA
 112         "DMA",
 113#endif
 114#ifdef CONFIG_ZONE_DMA32
 115         "DMA32",
 116#endif
 117         "Normal",
 118#ifdef CONFIG_HIGHMEM
 119         "HighMem",
 120#endif
 121         "Movable",
 122};
 123
 124int min_free_kbytes = 1024;
 125
 126static unsigned long __meminitdata nr_kernel_pages;
 127static unsigned long __meminitdata nr_all_pages;
 128static unsigned long __meminitdata dma_reserve;
 129
 130#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
 131  /*
 132   * MAX_ACTIVE_REGIONS determines the maximum number of distinct
 133   * ranges of memory (RAM) that may be registered with add_active_range().
 134   * Ranges passed to add_active_range() will be merged if possible
 135   * so the number of times add_active_range() can be called is
 136   * related to the number of nodes and the number of holes
 137   */
 138  #ifdef CONFIG_MAX_ACTIVE_REGIONS
 139    /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
 140    #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
 141  #else
 142    #if MAX_NUMNODES >= 32
 143      /* If there can be many nodes, allow up to 50 holes per node */
 144      #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
 145    #else
 146      /* By default, allow up to 256 distinct regions */
 147      #define MAX_ACTIVE_REGIONS 256
 148    #endif
 149  #endif
 150
 151  static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
 152  static int __meminitdata nr_nodemap_entries;
 153  static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
 154  static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
 155  static unsigned long __initdata required_kernelcore;
 156  static unsigned long __initdata required_movablecore;
 157  static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
 158
 159  /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
 160  int movable_zone;
 161  EXPORT_SYMBOL(movable_zone);
 162#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
 163
 164#if MAX_NUMNODES > 1
 165int nr_node_ids __read_mostly = MAX_NUMNODES;
 166int nr_online_nodes __read_mostly = 1;
 167EXPORT_SYMBOL(nr_node_ids);
 168EXPORT_SYMBOL(nr_online_nodes);
 169#endif
 170
 171int page_group_by_mobility_disabled __read_mostly;
 172
 173static void set_pageblock_migratetype(struct page *page, int migratetype)
 174{
 175
 176        if (unlikely(page_group_by_mobility_disabled))
 177                migratetype = MIGRATE_UNMOVABLE;
 178
 179        set_pageblock_flags_group(page, (unsigned long)migratetype,
 180                                        PB_migrate, PB_migrate_end);
 181}
 182
 183bool oom_killer_disabled __read_mostly;
 184
 185#ifdef CONFIG_DEBUG_VM
 186static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
 187{
 188        int ret = 0;
 189        unsigned seq;
 190        unsigned long pfn = page_to_pfn(page);
 191
 192        do {
 193                seq = zone_span_seqbegin(zone);
 194                if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
 195                        ret = 1;
 196                else if (pfn < zone->zone_start_pfn)
 197                        ret = 1;
 198        } while (zone_span_seqretry(zone, seq));
 199
 200        return ret;
 201}
 202
 203static int page_is_consistent(struct zone *zone, struct page *page)
 204{
 205        if (!pfn_valid_within(page_to_pfn(page)))
 206                return 0;
 207        if (zone != page_zone(page))
 208                return 0;
 209
 210        return 1;
 211}
 212/*
 213 * Temporary debugging check for pages not lying within a given zone.
 214 */
 215static int bad_range(struct zone *zone, struct page *page)
 216{
 217        if (page_outside_zone_boundaries(zone, page))
 218                return 1;
 219        if (!page_is_consistent(zone, page))
 220                return 1;
 221
 222        return 0;
 223}
 224#else
 225static inline int bad_range(struct zone *zone, struct page *page)
 226{
 227        return 0;
 228}
 229#endif
 230
 231static void bad_page(struct page *page)
 232{
 233        static unsigned long resume;
 234        static unsigned long nr_shown;
 235        static unsigned long nr_unshown;
 236
 237        /* Don't complain about poisoned pages */
 238        if (PageHWPoison(page)) {
 239                __ClearPageBuddy(page);
 240                return;
 241        }
 242
 243        /*
 244         * Allow a burst of 60 reports, then keep quiet for that minute;
 245         * or allow a steady drip of one report per second.
 246         */
 247        if (nr_shown == 60) {
 248                if (time_before(jiffies, resume)) {
 249                        nr_unshown++;
 250                        goto out;
 251                }
 252                if (nr_unshown) {
 253                        printk(KERN_ALERT
 254                              "BUG: Bad page state: %lu messages suppressed\n",
 255                                nr_unshown);
 256                        nr_unshown = 0;
 257                }
 258                nr_shown = 0;
 259        }
 260        if (nr_shown++ == 0)
 261                resume = jiffies + 60 * HZ;
 262
 263        printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
 264                current->comm, page_to_pfn(page));
 265        printk(KERN_ALERT
 266                "page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n",
 267                page, (void *)page->flags, page_count(page),
 268                page_mapcount(page), page->mapping, page->index);
 269
 270        dump_stack();
 271out:
 272        /* Leave bad fields for debug, except PageBuddy could make trouble */
 273        __ClearPageBuddy(page);
 274        add_taint(TAINT_BAD_PAGE);
 275}
 276
 277/*
 278 * Higher-order pages are called "compound pages".  They are structured thusly:
 279 *
 280 * The first PAGE_SIZE page is called the "head page".
 281 *
 282 * The remaining PAGE_SIZE pages are called "tail pages".
 283 *
 284 * All pages have PG_compound set.  All pages have their ->private pointing at
 285 * the head page (even the head page has this).
 286 *
 287 * The first tail page's ->lru.next holds the address of the compound page's
 288 * put_page() function.  Its ->lru.prev holds the order of allocation.
 289 * This usage means that zero-order pages may not be compound.
 290 */
 291
 292static void free_compound_page(struct page *page)
 293{
 294        __free_pages_ok(page, compound_order(page));
 295}
 296
 297void prep_compound_page(struct page *page, unsigned long order)
 298{
 299        int i;
 300        int nr_pages = 1 << order;
 301
 302        set_compound_page_dtor(page, free_compound_page);
 303        set_compound_order(page, order);
 304        __SetPageHead(page);
 305        for (i = 1; i < nr_pages; i++) {
 306                struct page *p = page + i;
 307
 308                __SetPageTail(p);
 309                p->first_page = page;
 310        }
 311}
 312
 313static int destroy_compound_page(struct page *page, unsigned long order)
 314{
 315        int i;
 316        int nr_pages = 1 << order;
 317        int bad = 0;
 318
 319        if (unlikely(compound_order(page) != order) ||
 320            unlikely(!PageHead(page))) {
 321                bad_page(page);
 322                bad++;
 323        }
 324
 325        __ClearPageHead(page);
 326
 327        for (i = 1; i < nr_pages; i++) {
 328                struct page *p = page + i;
 329
 330                if (unlikely(!PageTail(p) || (p->first_page != page))) {
 331                        bad_page(page);
 332                        bad++;
 333                }
 334                __ClearPageTail(p);
 335        }
 336
 337        return bad;
 338}
 339
 340static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
 341{
 342        int i;
 343
 344        /*
 345         * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
 346         * and __GFP_HIGHMEM from hard or soft interrupt context.
 347         */
 348        VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
 349        for (i = 0; i < (1 << order); i++)
 350                clear_highpage(page + i);
 351}
 352
 353static inline void set_page_order(struct page *page, int order)
 354{
 355        set_page_private(page, order);
 356        __SetPageBuddy(page);
 357}
 358
 359static inline void rmv_page_order(struct page *page)
 360{
 361        __ClearPageBuddy(page);
 362        set_page_private(page, 0);
 363}
 364
 365/*
 366 * Locate the struct page for both the matching buddy in our
 367 * pair (buddy1) and the combined O(n+1) page they form (page).
 368 *
 369 * 1) Any buddy B1 will have an order O twin B2 which satisfies
 370 * the following equation:
 371 *     B2 = B1 ^ (1 << O)
 372 * For example, if the starting buddy (buddy2) is #8 its order
 373 * 1 buddy is #10:
 374 *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
 375 *
 376 * 2) Any buddy B will have an order O+1 parent P which
 377 * satisfies the following equation:
 378 *     P = B & ~(1 << O)
 379 *
 380 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
 381 */
 382static inline struct page *
 383__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
 384{
 385        unsigned long buddy_idx = page_idx ^ (1 << order);
 386
 387        return page + (buddy_idx - page_idx);
 388}
 389
 390static inline unsigned long
 391__find_combined_index(unsigned long page_idx, unsigned int order)
 392{
 393        return (page_idx & ~(1 << order));
 394}
 395
 396/*
 397 * This function checks whether a page is free && is the buddy
 398 * we can do coalesce a page and its buddy if
 399 * (a) the buddy is not in a hole &&
 400 * (b) the buddy is in the buddy system &&
 401 * (c) a page and its buddy have the same order &&
 402 * (d) a page and its buddy are in the same zone.
 403 *
 404 * For recording whether a page is in the buddy system, we use PG_buddy.
 405 * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
 406 *
 407 * For recording page's order, we use page_private(page).
 408 */
 409static inline int page_is_buddy(struct page *page, struct page *buddy,
 410                                                                int order)
 411{
 412        if (!pfn_valid_within(page_to_pfn(buddy)))
 413                return 0;
 414
 415        if (page_zone_id(page) != page_zone_id(buddy))
 416                return 0;
 417
 418        if (PageBuddy(buddy) && page_order(buddy) == order) {
 419                VM_BUG_ON(page_count(buddy) != 0);
 420                return 1;
 421        }
 422        return 0;
 423}
 424
 425/*
 426 * Freeing function for a buddy system allocator.
 427 *
 428 * The concept of a buddy system is to maintain direct-mapped table
 429 * (containing bit values) for memory blocks of various "orders".
 430 * The bottom level table contains the map for the smallest allocatable
 431 * units of memory (here, pages), and each level above it describes
 432 * pairs of units from the levels below, hence, "buddies".
 433 * At a high level, all that happens here is marking the table entry
 434 * at the bottom level available, and propagating the changes upward
 435 * as necessary, plus some accounting needed to play nicely with other
 436 * parts of the VM system.
 437 * At each level, we keep a list of pages, which are heads of continuous
 438 * free pages of length of (1 << order) and marked with PG_buddy. Page's
 439 * order is recorded in page_private(page) field.
 440 * So when we are allocating or freeing one, we can derive the state of the
 441 * other.  That is, if we allocate a small block, and both were   
 442 * free, the remainder of the region must be split into blocks.   
 443 * If a block is freed, and its buddy is also free, then this
 444 * triggers coalescing into a block of larger size.            
 445 *
 446 * -- wli
 447 */
 448
 449static inline void __free_one_page(struct page *page,
 450                struct zone *zone, unsigned int order,
 451                int migratetype)
 452{
 453        unsigned long page_idx;
 454
 455        if (unlikely(PageCompound(page)))
 456                if (unlikely(destroy_compound_page(page, order)))
 457                        return;
 458
 459        VM_BUG_ON(migratetype == -1);
 460
 461        page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
 462
 463        VM_BUG_ON(page_idx & ((1 << order) - 1));
 464        VM_BUG_ON(bad_range(zone, page));
 465
 466        while (order < MAX_ORDER-1) {
 467                unsigned long combined_idx;
 468                struct page *buddy;
 469
 470                buddy = __page_find_buddy(page, page_idx, order);
 471                if (!page_is_buddy(page, buddy, order))
 472                        break;
 473
 474                /* Our buddy is free, merge with it and move up one order. */
 475                list_del(&buddy->lru);
 476                zone->free_area[order].nr_free--;
 477                rmv_page_order(buddy);
 478                combined_idx = __find_combined_index(page_idx, order);
 479                page = page + (combined_idx - page_idx);
 480                page_idx = combined_idx;
 481                order++;
 482        }
 483        set_page_order(page, order);
 484        list_add(&page->lru,
 485                &zone->free_area[order].free_list[migratetype]);
 486        zone->free_area[order].nr_free++;
 487}
 488
 489#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
 490/*
 491 * free_page_mlock() -- clean up attempts to free and mlocked() page.
 492 * Page should not be on lru, so no need to fix that up.
 493 * free_pages_check() will verify...
 494 */
 495static inline void free_page_mlock(struct page *page)
 496{
 497        __dec_zone_page_state(page, NR_MLOCK);
 498        __count_vm_event(UNEVICTABLE_MLOCKFREED);
 499}
 500#else
 501static void free_page_mlock(struct page *page) { }
 502#endif
 503
 504static inline int free_pages_check(struct page *page)
 505{
 506        if (unlikely(page_mapcount(page) |
 507                (page->mapping != NULL)  |
 508                (atomic_read(&page->_count) != 0) |
 509                (page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
 510                bad_page(page);
 511                return 1;
 512        }
 513        if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
 514                page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
 515        return 0;
 516}
 517
 518/*
 519 * Frees a number of pages from the PCP lists
 520 * Assumes all pages on list are in same zone, and of same order.
 521 * count is the number of pages to free.
 522 *
 523 * If the zone was previously in an "all pages pinned" state then look to
 524 * see if this freeing clears that state.
 525 *
 526 * And clear the zone's pages_scanned counter, to hold off the "all pages are
 527 * pinned" detection logic.
 528 */
 529static void free_pcppages_bulk(struct zone *zone, int count,
 530                                        struct per_cpu_pages *pcp)
 531{
 532        int migratetype = 0;
 533        int batch_free = 0;
 534
 535        spin_lock(&zone->lock);
 536        zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
 537        zone->pages_scanned = 0;
 538
 539        __mod_zone_page_state(zone, NR_FREE_PAGES, count);
 540        while (count) {
 541                struct page *page;
 542                struct list_head *list;
 543
 544                /*
 545                 * Remove pages from lists in a round-robin fashion. A
 546                 * batch_free count is maintained that is incremented when an
 547                 * empty list is encountered.  This is so more pages are freed
 548                 * off fuller lists instead of spinning excessively around empty
 549                 * lists
 550                 */
 551                do {
 552                        batch_free++;
 553                        if (++migratetype == MIGRATE_PCPTYPES)
 554                                migratetype = 0;
 555                        list = &pcp->lists[migratetype];
 556                } while (list_empty(list));
 557
 558                do {
 559                        page = list_entry(list->prev, struct page, lru);
 560                        /* must delete as __free_one_page list manipulates */
 561                        list_del(&page->lru);
 562                        __free_one_page(page, zone, 0, migratetype);
 563                        trace_mm_page_pcpu_drain(page, 0, migratetype);
 564                } while (--count && --batch_free && !list_empty(list));
 565        }
 566        spin_unlock(&zone->lock);
 567}
 568
 569static void free_one_page(struct zone *zone, struct page *page, int order,
 570                                int migratetype)
 571{
 572        spin_lock(&zone->lock);
 573        zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
 574        zone->pages_scanned = 0;
 575
 576        __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
 577        __free_one_page(page, zone, order, migratetype);
 578        spin_unlock(&zone->lock);
 579}
 580
 581static void __free_pages_ok(struct page *page, unsigned int order)
 582{
 583        unsigned long flags;
 584        int i;
 585        int bad = 0;
 586        int wasMlocked = __TestClearPageMlocked(page);
 587
 588        kmemcheck_free_shadow(page, order);
 589
 590        for (i = 0 ; i < (1 << order) ; ++i)
 591                bad += free_pages_check(page + i);
 592        if (bad)
 593                return;
 594
 595        if (!PageHighMem(page)) {
 596                debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
 597                debug_check_no_obj_freed(page_address(page),
 598                                           PAGE_SIZE << order);
 599        }
 600        arch_free_page(page, order);
 601        kernel_map_pages(page, 1 << order, 0);
 602
 603        local_irq_save(flags);
 604        if (unlikely(wasMlocked))
 605                free_page_mlock(page);
 606        __count_vm_events(PGFREE, 1 << order);
 607        free_one_page(page_zone(page), page, order,
 608                                        get_pageblock_migratetype(page));
 609        local_irq_restore(flags);
 610}
 611
 612/*
 613 * permit the bootmem allocator to evade page validation on high-order frees
 614 */
 615void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
 616{
 617        if (order == 0) {
 618                __ClearPageReserved(page);
 619                set_page_count(page, 0);
 620                set_page_refcounted(page);
 621                __free_page(page);
 622        } else {
 623                int loop;
 624
 625                prefetchw(page);
 626                for (loop = 0; loop < BITS_PER_LONG; loop++) {
 627                        struct page *p = &page[loop];
 628
 629                        if (loop + 1 < BITS_PER_LONG)
 630                                prefetchw(p + 1);
 631                        __ClearPageReserved(p);
 632                        set_page_count(p, 0);
 633                }
 634
 635                set_page_refcounted(page);
 636                __free_pages(page, order);
 637        }
 638}
 639
 640
 641/*
 642 * The order of subdivision here is critical for the IO subsystem.
 643 * Please do not alter this order without good reasons and regression
 644 * testing. Specifically, as large blocks of memory are subdivided,
 645 * the order in which smaller blocks are delivered depends on the order
 646 * they're subdivided in this function. This is the primary factor
 647 * influencing the order in which pages are delivered to the IO
 648 * subsystem according to empirical testing, and this is also justified
 649 * by considering the behavior of a buddy system containing a single
 650 * large block of memory acted on by a series of small allocations.
 651 * This behavior is a critical factor in sglist merging's success.
 652 *
 653 * -- wli
 654 */
 655static inline void expand(struct zone *zone, struct page *page,
 656        int low, int high, struct free_area *area,
 657        int migratetype)
 658{
 659        unsigned long size = 1 << high;
 660
 661        while (high > low) {
 662                area--;
 663                high--;
 664                size >>= 1;
 665                VM_BUG_ON(bad_range(zone, &page[size]));
 666                list_add(&page[size].lru, &area->free_list[migratetype]);
 667                area->nr_free++;
 668                set_page_order(&page[size], high);
 669        }
 670}
 671
 672/*
 673 * This page is about to be returned from the page allocator
 674 */
 675static inline int check_new_page(struct page *page)
 676{
 677        if (unlikely(page_mapcount(page) |
 678                (page->mapping != NULL)  |
 679                (atomic_read(&page->_count) != 0)  |
 680                (page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
 681                bad_page(page);
 682                return 1;
 683        }
 684        return 0;
 685}
 686
 687static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
 688{
 689        int i;
 690
 691        for (i = 0; i < (1 << order); i++) {
 692                struct page *p = page + i;
 693                if (unlikely(check_new_page(p)))
 694                        return 1;
 695        }
 696
 697        set_page_private(page, 0);
 698        set_page_refcounted(page);
 699
 700        arch_alloc_page(page, order);
 701        kernel_map_pages(page, 1 << order, 1);
 702
 703        if (gfp_flags & __GFP_ZERO)
 704                prep_zero_page(page, order, gfp_flags);
 705
 706        if (order && (gfp_flags & __GFP_COMP))
 707                prep_compound_page(page, order);
 708
 709        return 0;
 710}
 711
 712/*
 713 * Go through the free lists for the given migratetype and remove
 714 * the smallest available page from the freelists
 715 */
 716static inline
 717struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
 718                                                int migratetype)
 719{
 720        unsigned int current_order;
 721        struct free_area * area;
 722        struct page *page;
 723
 724        /* Find a page of the appropriate size in the preferred list */
 725        for (current_order = order; current_order < MAX_ORDER; ++current_order) {
 726                area = &(zone->free_area[current_order]);
 727                if (list_empty(&area->free_list[migratetype]))
 728                        continue;
 729
 730                page = list_entry(area->free_list[migratetype].next,
 731                                                        struct page, lru);
 732                list_del(&page->lru);
 733                rmv_page_order(page);
 734                area->nr_free--;
 735                expand(zone, page, order, current_order, area, migratetype);
 736                return page;
 737        }
 738
 739        return NULL;
 740}
 741
 742
 743/*
 744 * This array describes the order lists are fallen back to when
 745 * the free lists for the desirable migrate type are depleted
 746 */
 747static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
 748        [MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_RESERVE },
 749        [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_RESERVE },
 750        [MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
 751        [MIGRATE_RESERVE]     = { MIGRATE_RESERVE,     MIGRATE_RESERVE,   MIGRATE_RESERVE }, /* Never used */
 752};
 753
 754/*
 755 * Move the free pages in a range to the free lists of the requested type.
 756 * Note that start_page and end_pages are not aligned on a pageblock
 757 * boundary. If alignment is required, use move_freepages_block()
 758 */
 759static int move_freepages(struct zone *zone,
 760                          struct page *start_page, struct page *end_page,
 761                          int migratetype)
 762{
 763        struct page *page;
 764        unsigned long order;
 765        int pages_moved = 0;
 766
 767#ifndef CONFIG_HOLES_IN_ZONE
 768        /*
 769         * page_zone is not safe to call in this context when
 770         * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
 771         * anyway as we check zone boundaries in move_freepages_block().
 772         * Remove at a later date when no bug reports exist related to
 773         * grouping pages by mobility
 774         */
 775        BUG_ON(page_zone(start_page) != page_zone(end_page));
 776#endif
 777
 778        for (page = start_page; page <= end_page;) {
 779                /* Make sure we are not inadvertently changing nodes */
 780                VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
 781
 782                if (!pfn_valid_within(page_to_pfn(page))) {
 783                        page++;
 784                        continue;
 785                }
 786
 787                if (!PageBuddy(page)) {
 788                        page++;
 789                        continue;
 790                }
 791
 792                order = page_order(page);
 793                list_del(&page->lru);
 794                list_add(&page->lru,
 795                        &zone->free_area[order].free_list[migratetype]);
 796                page += 1 << order;
 797                pages_moved += 1 << order;
 798        }
 799
 800        return pages_moved;
 801}
 802
 803static int move_freepages_block(struct zone *zone, struct page *page,
 804                                int migratetype)
 805{
 806        unsigned long start_pfn, end_pfn;
 807        struct page *start_page, *end_page;
 808
 809        start_pfn = page_to_pfn(page);
 810        start_pfn = start_pfn & ~(pageblock_nr_pages-1);
 811        start_page = pfn_to_page(start_pfn);
 812        end_page = start_page + pageblock_nr_pages - 1;
 813        end_pfn = start_pfn + pageblock_nr_pages - 1;
 814
 815        /* Do not cross zone boundaries */
 816        if (start_pfn < zone->zone_start_pfn)
 817                start_page = page;
 818        if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
 819                return 0;
 820
 821        return move_freepages(zone, start_page, end_page, migratetype);
 822}
 823
 824static void change_pageblock_range(struct page *pageblock_page,
 825                                        int start_order, int migratetype)
 826{
 827        int nr_pageblocks = 1 << (start_order - pageblock_order);
 828
 829        while (nr_pageblocks--) {
 830                set_pageblock_migratetype(pageblock_page, migratetype);
 831                pageblock_page += pageblock_nr_pages;
 832        }
 833}
 834
 835/* Remove an element from the buddy allocator from the fallback list */
 836static inline struct page *
 837__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
 838{
 839        struct free_area * area;
 840        int current_order;
 841        struct page *page;
 842        int migratetype, i;
 843
 844        /* Find the largest possible block of pages in the other list */
 845        for (current_order = MAX_ORDER-1; current_order >= order;
 846                                                --current_order) {
 847                for (i = 0; i < MIGRATE_TYPES - 1; i++) {
 848                        migratetype = fallbacks[start_migratetype][i];
 849
 850                        /* MIGRATE_RESERVE handled later if necessary */
 851                        if (migratetype == MIGRATE_RESERVE)
 852                                continue;
 853
 854                        area = &(zone->free_area[current_order]);
 855                        if (list_empty(&area->free_list[migratetype]))
 856                                continue;
 857
 858                        page = list_entry(area->free_list[migratetype].next,
 859                                        struct page, lru);
 860                        area->nr_free--;
 861
 862                        /*
 863                         * If breaking a large block of pages, move all free
 864                         * pages to the preferred allocation list. If falling
 865                         * back for a reclaimable kernel allocation, be more
 866                         * agressive about taking ownership of free pages
 867                         */
 868                        if (unlikely(current_order >= (pageblock_order >> 1)) ||
 869                                        start_migratetype == MIGRATE_RECLAIMABLE ||
 870                                        page_group_by_mobility_disabled) {
 871                                unsigned long pages;
 872                                pages = move_freepages_block(zone, page,
 873                                                                start_migratetype);
 874
 875                                /* Claim the whole block if over half of it is free */
 876                                if (pages >= (1 << (pageblock_order-1)) ||
 877                                                page_group_by_mobility_disabled)
 878                                        set_pageblock_migratetype(page,
 879                                                                start_migratetype);
 880
 881                                migratetype = start_migratetype;
 882                        }
 883
 884                        /* Remove the page from the freelists */
 885                        list_del(&page->lru);
 886                        rmv_page_order(page);
 887
 888                        /* Take ownership for orders >= pageblock_order */
 889                        if (current_order >= pageblock_order)
 890                                change_pageblock_range(page, current_order,
 891                                                        start_migratetype);
 892
 893                        expand(zone, page, order, current_order, area, migratetype);
 894
 895                        trace_mm_page_alloc_extfrag(page, order, current_order,
 896                                start_migratetype, migratetype);
 897
 898                        return page;
 899                }
 900        }
 901
 902        return NULL;
 903}
 904
 905/*
 906 * Do the hard work of removing an element from the buddy allocator.
 907 * Call me with the zone->lock already held.
 908 */
 909static struct page *__rmqueue(struct zone *zone, unsigned int order,
 910                                                int migratetype)
 911{
 912        struct page *page;
 913
 914retry_reserve:
 915        page = __rmqueue_smallest(zone, order, migratetype);
 916
 917        if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
 918                page = __rmqueue_fallback(zone, order, migratetype);
 919
 920                /*
 921                 * Use MIGRATE_RESERVE rather than fail an allocation. goto
 922                 * is used because __rmqueue_smallest is an inline function
 923                 * and we want just one call site
 924                 */
 925                if (!page) {
 926                        migratetype = MIGRATE_RESERVE;
 927                        goto retry_reserve;
 928                }
 929        }
 930
 931        trace_mm_page_alloc_zone_locked(page, order, migratetype);
 932        return page;
 933}
 934
 935/* 
 936 * Obtain a specified number of elements from the buddy allocator, all under
 937 * a single hold of the lock, for efficiency.  Add them to the supplied list.
 938 * Returns the number of new pages which were placed at *list.
 939 */
 940static int rmqueue_bulk(struct zone *zone, unsigned int order, 
 941                        unsigned long count, struct list_head *list,
 942                        int migratetype, int cold)
 943{
 944        int i;
 945        
 946        spin_lock(&zone->lock);
 947        for (i = 0; i < count; ++i) {
 948                struct page *page = __rmqueue(zone, order, migratetype);
 949                if (unlikely(page == NULL))
 950                        break;
 951
 952                /*
 953                 * Split buddy pages returned by expand() are received here
 954                 * in physical page order. The page is added to the callers and
 955                 * list and the list head then moves forward. From the callers
 956                 * perspective, the linked list is ordered by page number in
 957                 * some conditions. This is useful for IO devices that can
 958                 * merge IO requests if the physical pages are ordered
 959                 * properly.
 960                 */
 961                if (likely(cold == 0))
 962                        list_add(&page->lru, list);
 963                else
 964                        list_add_tail(&page->lru, list);
 965                set_page_private(page, migratetype);
 966                list = &page->lru;
 967        }
 968        __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
 969        spin_unlock(&zone->lock);
 970        return i;
 971}
 972
 973#ifdef CONFIG_NUMA
 974/*
 975 * Called from the vmstat counter updater to drain pagesets of this
 976 * currently executing processor on remote nodes after they have
 977 * expired.
 978 *
 979 * Note that this function must be called with the thread pinned to
 980 * a single processor.
 981 */
 982void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
 983{
 984        unsigned long flags;
 985        int to_drain;
 986
 987        local_irq_save(flags);
 988        if (pcp->count >= pcp->batch)
 989                to_drain = pcp->batch;
 990        else
 991                to_drain = pcp->count;
 992        free_pcppages_bulk(zone, to_drain, pcp);
 993        pcp->count -= to_drain;
 994        local_irq_restore(flags);
 995}
 996#endif
 997
 998/*
 999 * Drain pages of the indicated processor.
1000 *
1001 * The processor must either be the current processor and the
1002 * thread pinned to the current processor or a processor that
1003 * is not online.
1004 */
1005static void drain_pages(unsigned int cpu)
1006{
1007        unsigned long flags;
1008        struct zone *zone;
1009
1010        for_each_populated_zone(zone) {
1011                struct per_cpu_pageset *pset;
1012                struct per_cpu_pages *pcp;
1013
1014                pset = zone_pcp(zone, cpu);
1015
1016                pcp = &pset->pcp;
1017                local_irq_save(flags);
1018                free_pcppages_bulk(zone, pcp->count, pcp);
1019                pcp->count = 0;
1020                local_irq_restore(flags);
1021        }
1022}
1023
1024/*
1025 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
1026 */
1027void drain_local_pages(void *arg)
1028{
1029        drain_pages(smp_processor_id());
1030}
1031
1032/*
1033 * Spill all the per-cpu pages from all CPUs back into the buddy allocator
1034 */
1035void drain_all_pages(void)
1036{
1037        on_each_cpu(drain_local_pages, NULL, 1);
1038}
1039
1040#ifdef CONFIG_HIBERNATION
1041
1042void mark_free_pages(struct zone *zone)
1043{
1044        unsigned long pfn, max_zone_pfn;
1045        unsigned long flags;
1046        int order, t;
1047        struct list_head *curr;
1048
1049        if (!zone->spanned_pages)
1050                return;
1051
1052        spin_lock_irqsave(&zone->lock, flags);
1053
1054        max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1055        for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1056                if (pfn_valid(pfn)) {
1057                        struct page *page = pfn_to_page(pfn);
1058
1059                        if (!swsusp_page_is_forbidden(page))
1060                                swsusp_unset_page_free(page);
1061                }
1062
1063        for_each_migratetype_order(order, t) {
1064                list_for_each(curr, &zone->free_area[order].free_list[t]) {
1065                        unsigned long i;
1066
1067                        pfn = page_to_pfn(list_entry(curr, struct page, lru));
1068                        for (i = 0; i < (1UL << order); i++)
1069                                swsusp_set_page_free(pfn_to_page(pfn + i));
1070                }
1071        }
1072        spin_unlock_irqrestore(&zone->lock, flags);
1073}
1074#endif /* CONFIG_PM */
1075
1076/*
1077 * Free a 0-order page
1078 */
1079static void free_hot_cold_page(struct page *page, int cold)
1080{
1081        struct zone *zone = page_zone(page);
1082        struct per_cpu_pages *pcp;
1083        unsigned long flags;
1084        int migratetype;
1085        int wasMlocked = __TestClearPageMlocked(page);
1086
1087        kmemcheck_free_shadow(page, 0);
1088
1089        if (PageAnon(page))
1090                page->mapping = NULL;
1091        if (free_pages_check(page))
1092                return;
1093
1094        if (!PageHighMem(page)) {
1095                debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
1096                debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
1097        }
1098        arch_free_page(page, 0);
1099        kernel_map_pages(page, 1, 0);
1100
1101        pcp = &zone_pcp(zone, get_cpu())->pcp;
1102        migratetype = get_pageblock_migratetype(page);
1103        set_page_private(page, migratetype);
1104        local_irq_save(flags);
1105        if (unlikely(wasMlocked))
1106                free_page_mlock(page);
1107        __count_vm_event(PGFREE);
1108
1109        /*
1110         * We only track unmovable, reclaimable and movable on pcp lists.
1111         * Free ISOLATE pages back to the allocator because they are being
1112         * offlined but treat RESERVE as movable pages so we can get those
1113         * areas back if necessary. Otherwise, we may have to free
1114         * excessively into the page allocator
1115         */
1116        if (migratetype >= MIGRATE_PCPTYPES) {
1117                if (unlikely(migratetype == MIGRATE_ISOLATE)) {
1118                        free_one_page(zone, page, 0, migratetype);
1119                        goto out;
1120                }
1121                migratetype = MIGRATE_MOVABLE;
1122        }
1123
1124        if (cold)
1125                list_add_tail(&page->lru, &pcp->lists[migratetype]);
1126        else
1127                list_add(&page->lru, &pcp->lists[migratetype]);
1128        pcp->count++;
1129        if (pcp->count >= pcp->high) {
1130                free_pcppages_bulk(zone, pcp->batch, pcp);
1131                pcp->count -= pcp->batch;
1132        }
1133
1134out:
1135        local_irq_restore(flags);
1136        put_cpu();
1137}
1138
1139void free_hot_page(struct page *page)
1140{
1141        trace_mm_page_free_direct(page, 0);
1142        free_hot_cold_page(page, 0);
1143}
1144        
1145/*
1146 * split_page takes a non-compound higher-order page, and splits it into
1147 * n (1<<order) sub-pages: page[0..n]
1148 * Each sub-page must be freed individually.
1149 *
1150 * Note: this is probably too low level an operation for use in drivers.
1151 * Please consult with lkml before using this in your driver.
1152 */
1153void split_page(struct page *page, unsigned int order)
1154{
1155        int i;
1156
1157        VM_BUG_ON(PageCompound(page));
1158        VM_BUG_ON(!page_count(page));
1159
1160#ifdef CONFIG_KMEMCHECK
1161        /*
1162         * Split shadow pages too, because free(page[0]) would
1163         * otherwise free the whole shadow.
1164         */
1165        if (kmemcheck_page_is_tracked(page))
1166                split_page(virt_to_page(page[0].shadow), order);
1167#endif
1168
1169        for (i = 1; i < (1 << order); i++)
1170                set_page_refcounted(page + i);
1171}
1172
1173/*
1174 * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
1175 * we cheat by calling it from here, in the order > 0 path.  Saves a branch
1176 * or two.
1177 */
1178static inline
1179struct page *buffered_rmqueue(struct zone *preferred_zone,
1180                        struct zone *zone, int order, gfp_t gfp_flags,
1181                        int migratetype)
1182{
1183        unsigned long flags;
1184        struct page *page;
1185        int cold = !!(gfp_flags & __GFP_COLD);
1186        int cpu;
1187
1188again:
1189        cpu  = get_cpu();
1190        if (likely(order == 0)) {
1191                struct per_cpu_pages *pcp;
1192                struct list_head *list;
1193
1194                pcp = &zone_pcp(zone, cpu)->pcp;
1195                list = &pcp->lists[migratetype];
1196                local_irq_save(flags);
1197                if (list_empty(list)) {
1198                        pcp->count += rmqueue_bulk(zone, 0,
1199                                        pcp->batch, list,
1200                                        migratetype, cold);
1201                        if (unlikely(list_empty(list)))
1202                                goto failed;
1203                }
1204
1205                if (cold)
1206                        page = list_entry(list->prev, struct page, lru);
1207                else
1208                        page = list_entry(list->next, struct page, lru);
1209
1210                list_del(&page->lru);
1211                pcp->count--;
1212        } else {
1213                if (unlikely(gfp_flags & __GFP_NOFAIL)) {
1214                        /*
1215                         * __GFP_NOFAIL is not to be used in new code.
1216                         *
1217                         * All __GFP_NOFAIL callers should be fixed so that they
1218                         * properly detect and handle allocation failures.
1219                         *
1220                         * We most definitely don't want callers attempting to
1221                         * allocate greater than order-1 page units with
1222                         * __GFP_NOFAIL.
1223                         */
1224                        WARN_ON_ONCE(order > 1);
1225                }
1226                spin_lock_irqsave(&zone->lock, flags);
1227                page = __rmqueue(zone, order, migratetype);
1228                __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
1229                spin_unlock(&zone->lock);
1230                if (!page)
1231                        goto failed;
1232        }
1233
1234        __count_zone_vm_events(PGALLOC, zone, 1 << order);
1235        zone_statistics(preferred_zone, zone);
1236        local_irq_restore(flags);
1237        put_cpu();
1238
1239        VM_BUG_ON(bad_range(zone, page));
1240        if (prep_new_page(page, order, gfp_flags))
1241                goto again;
1242        return page;
1243
1244failed:
1245        local_irq_restore(flags);
1246        put_cpu();
1247        return NULL;
1248}
1249
1250/* The ALLOC_WMARK bits are used as an index to zone->watermark */
1251#define ALLOC_WMARK_MIN         WMARK_MIN
1252#define ALLOC_WMARK_LOW         WMARK_LOW
1253#define ALLOC_WMARK_HIGH        WMARK_HIGH
1254#define ALLOC_NO_WATERMARKS     0x04 /* don't check watermarks at all */
1255
1256/* Mask to get the watermark bits */
1257#define ALLOC_WMARK_MASK        (ALLOC_NO_WATERMARKS-1)
1258
1259#define ALLOC_HARDER            0x10 /* try to alloc harder */
1260#define ALLOC_HIGH              0x20 /* __GFP_HIGH set */
1261#define ALLOC_CPUSET            0x40 /* check for correct cpuset */
1262
1263#ifdef CONFIG_FAIL_PAGE_ALLOC
1264
1265static struct fail_page_alloc_attr {
1266        struct fault_attr attr;
1267
1268        u32 ignore_gfp_highmem;
1269        u32 ignore_gfp_wait;
1270        u32 min_order;
1271
1272#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1273
1274        struct dentry *ignore_gfp_highmem_file;
1275        struct dentry *ignore_gfp_wait_file;
1276        struct dentry *min_order_file;
1277
1278#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1279
1280} fail_page_alloc = {
1281        .attr = FAULT_ATTR_INITIALIZER,
1282        .ignore_gfp_wait = 1,
1283        .ignore_gfp_highmem = 1,
1284        .min_order = 1,
1285};
1286
1287static int __init setup_fail_page_alloc(char *str)
1288{
1289        return setup_fault_attr(&fail_page_alloc.attr, str);
1290}
1291__setup("fail_page_alloc=", setup_fail_page_alloc);
1292
1293static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1294{
1295        if (order < fail_page_alloc.min_order)
1296                return 0;
1297        if (gfp_mask & __GFP_NOFAIL)
1298                return 0;
1299        if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1300                return 0;
1301        if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1302                return 0;
1303
1304        return should_fail(&fail_page_alloc.attr, 1 << order);
1305}
1306
1307#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1308
1309static int __init fail_page_alloc_debugfs(void)
1310{
1311        mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1312        struct dentry *dir;
1313        int err;
1314
1315        err = init_fault_attr_dentries(&fail_page_alloc.attr,
1316                                       "fail_page_alloc");
1317        if (err)
1318                return err;
1319        dir = fail_page_alloc.attr.dentries.dir;
1320
1321        fail_page_alloc.ignore_gfp_wait_file =
1322                debugfs_create_bool("ignore-gfp-wait", mode, dir,
1323                                      &fail_page_alloc.ignore_gfp_wait);
1324
1325        fail_page_alloc.ignore_gfp_highmem_file =
1326                debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1327                                      &fail_page_alloc.ignore_gfp_highmem);
1328        fail_page_alloc.min_order_file =
1329                debugfs_create_u32("min-order", mode, dir,
1330                                   &fail_page_alloc.min_order);
1331
1332        if (!fail_page_alloc.ignore_gfp_wait_file ||
1333            !fail_page_alloc.ignore_gfp_highmem_file ||
1334            !fail_page_alloc.min_order_file) {
1335                err = -ENOMEM;
1336                debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
1337                debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
1338                debugfs_remove(fail_page_alloc.min_order_file);
1339                cleanup_fault_attr_dentries(&fail_page_alloc.attr);
1340        }
1341
1342        return err;
1343}
1344
1345late_initcall(fail_page_alloc_debugfs);
1346
1347#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1348
1349#else /* CONFIG_FAIL_PAGE_ALLOC */
1350
1351static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1352{
1353        return 0;
1354}
1355
1356#endif /* CONFIG_FAIL_PAGE_ALLOC */
1357
1358/*
1359 * Return 1 if free pages are above 'mark'. This takes into account the order
1360 * of the allocation.
1361 */
1362int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1363                      int classzone_idx, int alloc_flags)
1364{
1365        /* free_pages my go negative - that's OK */
1366        long min = mark;
1367        long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
1368        int o;
1369
1370        if (alloc_flags & ALLOC_HIGH)
1371                min -= min / 2;
1372        if (alloc_flags & ALLOC_HARDER)
1373                min -= min / 4;
1374
1375        if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1376                return 0;
1377        for (o = 0; o < order; o++) {
1378                /* At the next order, this order's pages become unavailable */
1379                free_pages -= z->free_area[o].nr_free << o;
1380
1381                /* Require fewer higher order pages to be free */
1382                min >>= 1;
1383
1384                if (free_pages <= min)
1385                        return 0;
1386        }
1387        return 1;
1388}
1389
1390#ifdef CONFIG_NUMA
1391/*
1392 * zlc_setup - Setup for "zonelist cache".  Uses cached zone data to
1393 * skip over zones that are not allowed by the cpuset, or that have
1394 * been recently (in last second) found to be nearly full.  See further
1395 * comments in mmzone.h.  Reduces cache footprint of zonelist scans
1396 * that have to skip over a lot of full or unallowed zones.
1397 *
1398 * If the zonelist cache is present in the passed in zonelist, then
1399 * returns a pointer to the allowed node mask (either the current
1400 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
1401 *
1402 * If the zonelist cache is not available for this zonelist, does
1403 * nothing and returns NULL.
1404 *
1405 * If the fullzones BITMAP in the zonelist cache is stale (more than
1406 * a second since last zap'd) then we zap it out (clear its bits.)
1407 *
1408 * We hold off even calling zlc_setup, until after we've checked the
1409 * first zone in the zonelist, on the theory that most allocations will
1410 * be satisfied from that first zone, so best to examine that zone as
1411 * quickly as we can.
1412 */
1413static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1414{
1415        struct zonelist_cache *zlc;     /* cached zonelist speedup info */
1416        nodemask_t *allowednodes;       /* zonelist_cache approximation */
1417
1418        zlc = zonelist->zlcache_ptr;
1419        if (!zlc)
1420                return NULL;
1421
1422        if (time_after(jiffies, zlc->last_full_zap + HZ)) {
1423                bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1424                zlc->last_full_zap = jiffies;
1425        }
1426
1427        allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1428                                        &cpuset_current_mems_allowed :
1429                                        &node_states[N_HIGH_MEMORY];
1430        return allowednodes;
1431}
1432
1433/*
1434 * Given 'z' scanning a zonelist, run a couple of quick checks to see
1435 * if it is worth looking at further for free memory:
1436 *  1) Check that the zone isn't thought to be full (doesn't have its
1437 *     bit set in the zonelist_cache fullzones BITMAP).
1438 *  2) Check that the zones node (obtained from the zonelist_cache
1439 *     z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1440 * Return true (non-zero) if zone is worth looking at further, or
1441 * else return false (zero) if it is not.
1442 *
1443 * This check -ignores- the distinction between various watermarks,
1444 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ...  If a zone is
1445 * found to be full for any variation of these watermarks, it will
1446 * be considered full for up to one second by all requests, unless
1447 * we are so low on memory on all allowed nodes that we are forced
1448 * into the second scan of the zonelist.
1449 *
1450 * In the second scan we ignore this zonelist cache and exactly
1451 * apply the watermarks to all zones, even it is slower to do so.
1452 * We are low on memory in the second scan, and should leave no stone
1453 * unturned looking for a free page.
1454 */
1455static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1456                                                nodemask_t *allowednodes)
1457{
1458        struct zonelist_cache *zlc;     /* cached zonelist speedup info */
1459        int i;                          /* index of *z in zonelist zones */
1460        int n;                          /* node that zone *z is on */
1461
1462        zlc = zonelist->zlcache_ptr;
1463        if (!zlc)
1464                return 1;
1465
1466        i = z - zonelist->_zonerefs;
1467        n = zlc->z_to_n[i];
1468
1469        /* This zone is worth trying if it is allowed but not full */
1470        return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1471}
1472
1473/*
1474 * Given 'z' scanning a zonelist, set the corresponding bit in
1475 * zlc->fullzones, so that subsequent attempts to allocate a page
1476 * from that zone don't waste time re-examining it.
1477 */
1478static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1479{
1480        struct zonelist_cache *zlc;     /* cached zonelist speedup info */
1481        int i;                          /* index of *z in zonelist zones */
1482
1483        zlc = zonelist->zlcache_ptr;
1484        if (!zlc)
1485                return;
1486
1487        i = z - zonelist->_zonerefs;
1488
1489        set_bit(i, zlc->fullzones);
1490}
1491
1492#else   /* CONFIG_NUMA */
1493
1494static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1495{
1496        return NULL;
1497}
1498
1499static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1500                                nodemask_t *allowednodes)
1501{
1502        return 1;
1503}
1504
1505static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1506{
1507}
1508#endif  /* CONFIG_NUMA */
1509
1510/*
1511 * get_page_from_freelist goes through the zonelist trying to allocate
1512 * a page.
1513 */
1514static struct page *
1515get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1516                struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
1517                struct zone *preferred_zone, int migratetype)
1518{
1519        struct zoneref *z;
1520        struct page *page = NULL;
1521        int classzone_idx;
1522        struct zone *zone;
1523        nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1524        int zlc_active = 0;             /* set if using zonelist_cache */
1525        int did_zlc_setup = 0;          /* just call zlc_setup() one time */
1526
1527        classzone_idx = zone_idx(preferred_zone);
1528zonelist_scan:
1529        /*
1530         * Scan zonelist, looking for a zone with enough free.
1531         * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1532         */
1533        for_each_zone_zonelist_nodemask(zone, z, zonelist,
1534                                                high_zoneidx, nodemask) {
1535                if (NUMA_BUILD && zlc_active &&
1536                        !zlc_zone_worth_trying(zonelist, z, allowednodes))
1537                                continue;
1538                if ((alloc_flags & ALLOC_CPUSET) &&
1539                        !cpuset_zone_allowed_softwall(zone, gfp_mask))
1540                                goto try_next_zone;
1541
1542                BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
1543                if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
1544                        unsigned long mark;
1545                        int ret;
1546
1547                        mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
1548                        if (zone_watermark_ok(zone, order, mark,
1549                                    classzone_idx, alloc_flags))
1550                                goto try_this_zone;
1551
1552                        if (zone_reclaim_mode == 0)
1553                                goto this_zone_full;
1554
1555                        ret = zone_reclaim(zone, gfp_mask, order);
1556                        switch (ret) {
1557                        case ZONE_RECLAIM_NOSCAN:
1558                                /* did not scan */
1559                                goto try_next_zone;
1560                        case ZONE_RECLAIM_FULL:
1561                                /* scanned but unreclaimable */
1562                                goto this_zone_full;
1563                        default:
1564                                /* did we reclaim enough */
1565                                if (!zone_watermark_ok(zone, order, mark,
1566                                                classzone_idx, alloc_flags))
1567                                        goto this_zone_full;
1568                        }
1569                }
1570
1571try_this_zone:
1572                page = buffered_rmqueue(preferred_zone, zone, order,
1573                                                gfp_mask, migratetype);
1574                if (page)
1575                        break;
1576this_zone_full:
1577                if (NUMA_BUILD)
1578                        zlc_mark_zone_full(zonelist, z);
1579try_next_zone:
1580                if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
1581                        /*
1582                         * we do zlc_setup after the first zone is tried but only
1583                         * if there are multiple nodes make it worthwhile
1584                         */
1585                        allowednodes = zlc_setup(zonelist, alloc_flags);
1586                        zlc_active = 1;
1587                        did_zlc_setup = 1;
1588                }
1589        }
1590
1591        if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1592                /* Disable zlc cache for second zonelist scan */
1593                zlc_active = 0;
1594                goto zonelist_scan;
1595        }
1596        return page;
1597}
1598
1599static inline int
1600should_alloc_retry(gfp_t gfp_mask, unsigned int order,
1601                                unsigned long pages_reclaimed)
1602{
1603        /* Do not loop if specifically requested */
1604        if (gfp_mask & __GFP_NORETRY)
1605                return 0;
1606
1607        /*
1608         * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
1609         * means __GFP_NOFAIL, but that may not be true in other
1610         * implementations.
1611         */
1612        if (order <= PAGE_ALLOC_COSTLY_ORDER)
1613                return 1;
1614
1615        /*
1616         * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
1617         * specified, then we retry until we no longer reclaim any pages
1618         * (above), or we've reclaimed an order of pages at least as
1619         * large as the allocation's order. In both cases, if the
1620         * allocation still fails, we stop retrying.
1621         */
1622        if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
1623                return 1;
1624
1625        /*
1626         * Don't let big-order allocations loop unless the caller
1627         * explicitly requests that.
1628         */
1629        if (gfp_mask & __GFP_NOFAIL)
1630                return 1;
1631
1632        return 0;
1633}
1634
1635static inline struct page *
1636__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
1637        struct zonelist *zonelist, enum zone_type high_zoneidx,
1638        nodemask_t *nodemask, struct zone *preferred_zone,
1639        int migratetype)
1640{
1641        struct page *page;
1642
1643        /* Acquire the OOM killer lock for the zones in zonelist */
1644        if (!try_set_zone_oom(zonelist, gfp_mask)) {
1645                schedule_timeout_uninterruptible(1);
1646                return NULL;
1647        }
1648
1649        /*
1650         * Go through the zonelist yet one more time, keep very high watermark
1651         * here, this is only to catch a parallel oom killing, we must fail if
1652         * we're still under heavy pressure.
1653         */
1654        page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
1655                order, zonelist, high_zoneidx,
1656                ALLOC_WMARK_HIGH|ALLOC_CPUSET,
1657                preferred_zone, migratetype);
1658        if (page)
1659                goto out;
1660
1661        /* The OOM killer will not help higher order allocs */
1662        if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_NOFAIL))
1663                goto out;
1664
1665        /* Exhausted what can be done so it's blamo time */
1666        out_of_memory(zonelist, gfp_mask, order);
1667
1668out:
1669        clear_zonelist_oom(zonelist, gfp_mask);
1670        return page;
1671}
1672
1673/* The really slow allocator path where we enter direct reclaim */
1674static inline struct page *
1675__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
1676        struct zonelist *zonelist, enum zone_type high_zoneidx,
1677        nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1678        int migratetype, unsigned long *did_some_progress)
1679{
1680        struct page *page = NULL;
1681        struct reclaim_state reclaim_state;
1682        struct task_struct *p = current;
1683
1684        cond_resched();
1685
1686        /* We now go into synchronous reclaim */
1687        cpuset_memory_pressure_bump();
1688        p->flags |= PF_MEMALLOC;
1689        lockdep_set_current_reclaim_state(gfp_mask);
1690        reclaim_state.reclaimed_slab = 0;
1691        p->reclaim_state = &reclaim_state;
1692
1693        *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
1694
1695        p->reclaim_state = NULL;
1696        lockdep_clear_current_reclaim_state();
1697        p->flags &= ~PF_MEMALLOC;
1698
1699        cond_resched();
1700
1701        if (order != 0)
1702                drain_all_pages();
1703
1704        if (likely(*did_some_progress))
1705                page = get_page_from_freelist(gfp_mask, nodemask, order,
1706                                        zonelist, high_zoneidx,
1707                                        alloc_flags, preferred_zone,
1708                                        migratetype);
1709        return page;
1710}
1711
1712/*
1713 * This is called in the allocator slow-path if the allocation request is of
1714 * sufficient urgency to ignore watermarks and take other desperate measures
1715 */
1716static inline struct page *
1717__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
1718        struct zonelist *zonelist, enum zone_type high_zoneidx,
1719        nodemask_t *nodemask, struct zone *preferred_zone,
1720        int migratetype)
1721{
1722        struct page *page;
1723
1724        do {
1725                page = get_page_from_freelist(gfp_mask, nodemask, order,
1726                        zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
1727                        preferred_zone, migratetype);
1728
1729                if (!page && gfp_mask & __GFP_NOFAIL)
1730                        congestion_wait(BLK_RW_ASYNC, HZ/50);
1731        } while (!page && (gfp_mask & __GFP_NOFAIL));
1732
1733        return page;
1734}
1735
1736static inline
1737void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
1738                                                enum zone_type high_zoneidx)
1739{
1740        struct zoneref *z;
1741        struct zone *zone;
1742
1743        for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
1744                wakeup_kswapd(zone, order);
1745}
1746
1747static inline int
1748gfp_to_alloc_flags(gfp_t gfp_mask)
1749{
1750        struct task_struct *p = current;
1751        int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
1752        const gfp_t wait = gfp_mask & __GFP_WAIT;
1753
1754        /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
1755        BUILD_BUG_ON(__GFP_HIGH != ALLOC_HIGH);
1756
1757        /*
1758         * The caller may dip into page reserves a bit more if the caller
1759         * cannot run direct reclaim, or if the caller has realtime scheduling
1760         * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
1761         * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
1762         */
1763        alloc_flags |= (gfp_mask & __GFP_HIGH);
1764
1765        if (!wait) {
1766                alloc_flags |= ALLOC_HARDER;
1767                /*
1768                 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
1769                 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1770                 */
1771                alloc_flags &= ~ALLOC_CPUSET;
1772        } else if (unlikely(rt_task(p)) && !in_interrupt())
1773                alloc_flags |= ALLOC_HARDER;
1774
1775        if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
1776                if (!in_interrupt() &&
1777                    ((p->flags & PF_MEMALLOC) ||
1778                     unlikely(test_thread_flag(TIF_MEMDIE))))
1779                        alloc_flags |= ALLOC_NO_WATERMARKS;
1780        }
1781
1782        return alloc_flags;
1783}
1784
1785static inline struct page *
1786__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
1787        struct zonelist *zonelist, enum zone_type high_zoneidx,
1788        nodemask_t *nodemask, struct zone *preferred_zone,
1789        int migratetype)
1790{
1791        const gfp_t wait = gfp_mask & __GFP_WAIT;
1792        struct page *page = NULL;
1793        int alloc_flags;
1794        unsigned long pages_reclaimed = 0;
1795        unsigned long did_some_progress;
1796        struct task_struct *p = current;
1797
1798        /*
1799         * In the slowpath, we sanity check order to avoid ever trying to
1800         * reclaim >= MAX_ORDER areas which will never succeed. Callers may
1801         * be using allocators in order of preference for an area that is
1802         * too large.
1803         */
1804        if (order >= MAX_ORDER) {
1805                WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
1806                return NULL;
1807        }
1808
1809        /*
1810         * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
1811         * __GFP_NOWARN set) should not cause reclaim since the subsystem
1812         * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
1813         * using a larger set of nodes after it has established that the
1814         * allowed per node queues are empty and that nodes are
1815         * over allocated.
1816         */
1817        if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
1818                goto nopage;
1819
1820restart:
1821        wake_all_kswapd(order, zonelist, high_zoneidx);
1822
1823        /*
1824         * OK, we're below the kswapd watermark and have kicked background
1825         * reclaim. Now things get more complex, so set up alloc_flags according
1826         * to how we want to proceed.
1827         */
1828        alloc_flags = gfp_to_alloc_flags(gfp_mask);
1829
1830        /* This is the last chance, in general, before the goto nopage. */
1831        page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
1832                        high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
1833                        preferred_zone, migratetype);
1834        if (page)
1835                goto got_pg;
1836
1837rebalance:
1838        /* Allocate without watermarks if the context allows */
1839        if (alloc_flags & ALLOC_NO_WATERMARKS) {
1840                page = __alloc_pages_high_priority(gfp_mask, order,
1841                                zonelist, high_zoneidx, nodemask,
1842                                preferred_zone, migratetype);
1843                if (page)
1844                        goto got_pg;
1845        }
1846
1847        /* Atomic allocations - we can't balance anything */
1848        if (!wait)
1849                goto nopage;
1850
1851        /* Avoid recursion of direct reclaim */
1852        if (p->flags & PF_MEMALLOC)
1853                goto nopage;
1854
1855        /* Avoid allocations with no watermarks from looping endlessly */
1856        if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
1857                goto nopage;
1858
1859        /* Try direct reclaim and then allocating */
1860        page = __alloc_pages_direct_reclaim(gfp_mask, order,
1861                                        zonelist, high_zoneidx,
1862                                        nodemask,
1863                                        alloc_flags, preferred_zone,
1864                                        migratetype, &did_some_progress);
1865        if (page)
1866                goto got_pg;
1867
1868        /*
1869         * If we failed to make any progress reclaiming, then we are
1870         * running out of options and have to consider going OOM
1871         */
1872        if (!did_some_progress) {
1873                if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
1874                        if (oom_killer_disabled)
1875                                goto nopage;
1876                        page = __alloc_pages_may_oom(gfp_mask, order,
1877                                        zonelist, high_zoneidx,
1878                                        nodemask, preferred_zone,
1879                                        migratetype);
1880                        if (page)
1881                                goto got_pg;
1882
1883                        /*
1884                         * The OOM killer does not trigger for high-order
1885                         * ~__GFP_NOFAIL allocations so if no progress is being
1886                         * made, there are no other options and retrying is
1887                         * unlikely to help.
1888                         */
1889                        if (order > PAGE_ALLOC_COSTLY_ORDER &&
1890                                                !(gfp_mask & __GFP_NOFAIL))
1891                                goto nopage;
1892
1893                        goto restart;
1894                }
1895        }
1896
1897        /* Check if we should retry the allocation */
1898        pages_reclaimed += did_some_progress;
1899        if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
1900                /* Wait for some write requests to complete then retry */
1901                congestion_wait(BLK_RW_ASYNC, HZ/50);
1902                goto rebalance;
1903        }
1904
1905nopage:
1906        if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
1907                printk(KERN_WARNING "%s: page allocation failure."
1908                        " order:%d, mode:0x%x\n",
1909                        p->comm, order, gfp_mask);
1910                dump_stack();
1911                show_mem();
1912        }
1913        return page;
1914got_pg:
1915        if (kmemcheck_enabled)
1916                kmemcheck_pagealloc_alloc(page, order, gfp_mask);
1917        return page;
1918
1919}
1920
1921/*
1922 * This is the 'heart' of the zoned buddy allocator.
1923 */
1924struct page *
1925__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
1926                        struct zonelist *zonelist, nodemask_t *nodemask)
1927{
1928        enum zone_type high_zoneidx = gfp_zone(gfp_mask);
1929        struct zone *preferred_zone;
1930        struct page *page;
1931        int migratetype = allocflags_to_migratetype(gfp_mask);
1932
1933        gfp_mask &= gfp_allowed_mask;
1934
1935        lockdep_trace_alloc(gfp_mask);
1936
1937        might_sleep_if(gfp_mask & __GFP_WAIT);
1938
1939        if (should_fail_alloc_page(gfp_mask, order))
1940                return NULL;
1941
1942        /*
1943         * Check the zones suitable for the gfp_mask contain at least one
1944         * valid zone. It's possible to have an empty zonelist as a result
1945         * of GFP_THISNODE and a memoryless node
1946         */
1947        if (unlikely(!zonelist->_zonerefs->zone))
1948                return NULL;
1949
1950        /* The preferred zone is used for statistics later */
1951        first_zones_zonelist(zonelist, high_zoneidx, nodemask, &preferred_zone);
1952        if (!preferred_zone)
1953                return NULL;
1954
1955        /* First allocation attempt */
1956        page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
1957                        zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
1958                        preferred_zone, migratetype);
1959        if (unlikely(!page))
1960                page = __alloc_pages_slowpath(gfp_mask, order,
1961                                zonelist, high_zoneidx, nodemask,
1962                                preferred_zone, migratetype);
1963
1964        trace_mm_page_alloc(page, order, gfp_mask, migratetype);
1965        return page;
1966}
1967EXPORT_SYMBOL(__alloc_pages_nodemask);
1968
1969/*
1970 * Common helper functions.
1971 */
1972unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1973{
1974        struct page *page;
1975
1976        /*
1977         * __get_free_pages() returns a 32-bit address, which cannot represent
1978         * a highmem page
1979         */
1980        VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
1981
1982        page = alloc_pages(gfp_mask, order);
1983        if (!page)
1984                return 0;
1985        return (unsigned long) page_address(page);
1986}
1987EXPORT_SYMBOL(__get_free_pages);
1988
1989unsigned long get_zeroed_page(gfp_t gfp_mask)
1990{
1991        return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
1992}
1993EXPORT_SYMBOL(get_zeroed_page);
1994
1995void __pagevec_free(struct pagevec *pvec)
1996{
1997        int i = pagevec_count(pvec);
1998
1999        while (--i >= 0) {
2000                trace_mm_pagevec_free(pvec->pages[i], pvec->cold);
2001                free_hot_cold_page(pvec->pages[i], pvec->cold);
2002        }
2003}
2004
2005void __free_pages(struct page *page, unsigned int order)
2006{
2007        if (put_page_testzero(page)) {
2008                trace_mm_page_free_direct(page, order);
2009                if (order == 0)
2010                        free_hot_page(page);
2011                else
2012                        __free_pages_ok(page, order);
2013        }
2014}
2015
2016EXPORT_SYMBOL(__free_pages);
2017
2018void free_pages(unsigned long addr, unsigned int order)
2019{
2020        if (addr != 0) {
2021                VM_BUG_ON(!virt_addr_valid((void *)addr));
2022                __free_pages(virt_to_page((void *)addr), order);
2023        }
2024}
2025
2026EXPORT_SYMBOL(free_pages);
2027
2028/**
2029 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
2030 * @size: the number of bytes to allocate
2031 * @gfp_mask: GFP flags for the allocation
2032 *
2033 * This function is similar to alloc_pages(), except that it allocates the
2034 * minimum number of pages to satisfy the request.  alloc_pages() can only
2035 * allocate memory in power-of-two pages.
2036 *
2037 * This function is also limited by MAX_ORDER.
2038 *
2039 * Memory allocated by this function must be released by free_pages_exact().
2040 */
2041void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
2042{
2043        unsigned int order = get_order(size);
2044        unsigned long addr;
2045
2046        addr = __get_free_pages(gfp_mask, order);
2047        if (addr) {
2048                unsigned long alloc_end = addr + (PAGE_SIZE << order);
2049                unsigned long used = addr + PAGE_ALIGN(size);
2050
2051                split_page(virt_to_page((void *)addr), order);
2052                while (used < alloc_end) {
2053                        free_page(used);
2054                        used += PAGE_SIZE;
2055                }
2056        }
2057
2058        return (void *)addr;
2059}
2060EXPORT_SYMBOL(alloc_pages_exact);
2061
2062/**
2063 * free_pages_exact - release memory allocated via alloc_pages_exact()
2064 * @virt: the value returned by alloc_pages_exact.
2065 * @size: size of allocation, same value as passed to alloc_pages_exact().
2066 *
2067 * Release the memory allocated by a previous call to alloc_pages_exact.
2068 */
2069void free_pages_exact(void *virt, size_t size)
2070{
2071        unsigned long addr = (unsigned long)virt;
2072        unsigned long end = addr + PAGE_ALIGN(size);
2073
2074        while (addr < end) {
2075                free_page(addr);
2076                addr += PAGE_SIZE;
2077        }
2078}
2079EXPORT_SYMBOL(free_pages_exact);
2080
2081static unsigned int nr_free_zone_pages(int offset)
2082{
2083        struct zoneref *z;
2084        struct zone *zone;
2085
2086        /* Just pick one node, since fallback list is circular */
2087        unsigned int sum = 0;
2088
2089        struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
2090
2091        for_each_zone_zonelist(zone, z, zonelist, offset) {
2092                unsigned long size = zone->present_pages;
2093                unsigned long high = high_wmark_pages(zone);
2094                if (size > high)
2095                        sum += size - high;
2096        }
2097
2098        return sum;
2099}
2100
2101/*
2102 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
2103 */
2104unsigned int nr_free_buffer_pages(void)
2105{
2106        return nr_free_zone_pages(gfp_zone(GFP_USER));
2107}
2108EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
2109
2110/*
2111 * Amount of free RAM allocatable within all zones
2112 */
2113unsigned int nr_free_pagecache_pages(void)
2114{
2115        return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
2116}
2117
2118static inline void show_node(struct zone *zone)
2119{
2120        if (NUMA_BUILD)
2121                printk("Node %d ", zone_to_nid(zone));
2122}
2123
2124void si_meminfo(struct sysinfo *val)
2125{
2126        val->totalram = totalram_pages;
2127        val->sharedram = 0;
2128        val->freeram = global_page_state(NR_FREE_PAGES);
2129        val->bufferram = nr_blockdev_pages();
2130        val->totalhigh = totalhigh_pages;
2131        val->freehigh = nr_free_highpages();
2132        val->mem_unit = PAGE_SIZE;
2133}
2134
2135EXPORT_SYMBOL(si_meminfo);
2136
2137#ifdef CONFIG_NUMA
2138void si_meminfo_node(struct sysinfo *val, int nid)
2139{
2140        pg_data_t *pgdat = NODE_DATA(nid);
2141
2142        val->totalram = pgdat->node_present_pages;
2143        val->freeram = node_page_state(nid, NR_FREE_PAGES);
2144#ifdef CONFIG_HIGHMEM
2145        val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
2146        val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
2147                        NR_FREE_PAGES);
2148#else
2149        val->totalhigh = 0;
2150        val->freehigh = 0;
2151#endif
2152        val->mem_unit = PAGE_SIZE;
2153}
2154#endif
2155
2156#define K(x) ((x) << (PAGE_SHIFT-10))
2157
2158/*
2159 * Show free area list (used inside shift_scroll-lock stuff)
2160 * We also calculate the percentage fragmentation. We do this by counting the
2161 * memory on each free list with the exception of the first item on the list.
2162 */
2163void show_free_areas(void)
2164{
2165        int cpu;
2166        struct zone *zone;
2167
2168        for_each_populated_zone(zone) {
2169                show_node(zone);
2170                printk("%s per-cpu:\n", zone->name);
2171
2172                for_each_online_cpu(cpu) {
2173                        struct per_cpu_pageset *pageset;
2174
2175                        pageset = zone_pcp(zone, cpu);
2176
2177                        printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
2178                               cpu, pageset->pcp.high,
2179                               pageset->pcp.batch, pageset->pcp.count);
2180                }
2181        }
2182
2183        printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
2184                " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
2185                " unevictable:%lu"
2186                " dirty:%lu writeback:%lu unstable:%lu\n"
2187                " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
2188                " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
2189                global_page_state(NR_ACTIVE_ANON),
2190                global_page_state(NR_INACTIVE_ANON),
2191                global_page_state(NR_ISOLATED_ANON),
2192                global_page_state(NR_ACTIVE_FILE),
2193                global_page_state(NR_INACTIVE_FILE),
2194                global_page_state(NR_ISOLATED_FILE),
2195                global_page_state(NR_UNEVICTABLE),
2196                global_page_state(NR_FILE_DIRTY),
2197                global_page_state(NR_WRITEBACK),
2198                global_page_state(NR_UNSTABLE_NFS),
2199                global_page_state(NR_FREE_PAGES),
2200                global_page_state(NR_SLAB_RECLAIMABLE),
2201                global_page_state(NR_SLAB_UNRECLAIMABLE),
2202                global_page_state(NR_FILE_MAPPED),
2203                global_page_state(NR_SHMEM),
2204                global_page_state(NR_PAGETABLE),
2205                global_page_state(NR_BOUNCE));
2206
2207        for_each_populated_zone(zone) {
2208                int i;
2209
2210                show_node(zone);
2211                printk("%s"
2212                        " free:%lukB"
2213                        " min:%lukB"
2214                        " low:%lukB"
2215                        " high:%lukB"
2216                        " active_anon:%lukB"
2217                        " inactive_anon:%lukB"
2218                        " active_file:%lukB"
2219                        " inactive_file:%lukB"
2220                        " unevictable:%lukB"
2221                        " isolated(anon):%lukB"
2222                        " isolated(file):%lukB"
2223                        " present:%lukB"
2224                        " mlocked:%lukB"
2225                        " dirty:%lukB"
2226                        " writeback:%lukB"
2227                        " mapped:%lukB"
2228                        " shmem:%lukB"
2229                        " slab_reclaimable:%lukB"
2230                        " slab_unreclaimable:%lukB"
2231                        " kernel_stack:%lukB"
2232                        " pagetables:%lukB"
2233                        " unstable:%lukB"
2234                        " bounce:%lukB"
2235                        " writeback_tmp:%lukB"
2236                        " pages_scanned:%lu"
2237                        " all_unreclaimable? %s"
2238                        "\n",
2239                        zone->name,
2240                        K(zone_page_state(zone, NR_FREE_PAGES)),
2241                        K(min_wmark_pages(zone)),
2242                        K(low_wmark_pages(zone)),
2243                        K(high_wmark_pages(zone)),
2244                        K(zone_page_state(zone, NR_ACTIVE_ANON)),
2245                        K(zone_page_state(zone, NR_INACTIVE_ANON)),
2246                        K(zone_page_state(zone, NR_ACTIVE_FILE)),
2247                        K(zone_page_state(zone, NR_INACTIVE_FILE)),
2248                        K(zone_page_state(zone, NR_UNEVICTABLE)),
2249                        K(zone_page_state(zone, NR_ISOLATED_ANON)),
2250                        K(zone_page_state(zone, NR_ISOLATED_FILE)),
2251                        K(zone->present_pages),
2252                        K(zone_page_state(zone, NR_MLOCK)),
2253                        K(zone_page_state(zone, NR_FILE_DIRTY)),
2254                        K(zone_page_state(zone, NR_WRITEBACK)),
2255                        K(zone_page_state(zone, NR_FILE_MAPPED)),
2256                        K(zone_page_state(zone, NR_SHMEM)),
2257                        K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
2258                        K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
2259                        zone_page_state(zone, NR_KERNEL_STACK) *
2260                                THREAD_SIZE / 1024,
2261                        K(zone_page_state(zone, NR_PAGETABLE)),
2262                        K(zone_page_state(zone, NR_UNSTABLE_NFS)),
2263                        K(zone_page_state(zone, NR_BOUNCE)),
2264                        K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
2265                        zone->pages_scanned,
2266                        (zone_is_all_unreclaimable(zone) ? "yes" : "no")
2267                        );
2268                printk("lowmem_reserve[]:");
2269                for (i = 0; i < MAX_NR_ZONES; i++)
2270                        printk(" %lu", zone->lowmem_reserve[i]);
2271                printk("\n");
2272        }
2273
2274        for_each_populated_zone(zone) {
2275                unsigned long nr[MAX_ORDER], flags, order, total = 0;
2276
2277                show_node(zone);
2278                printk("%s: ", zone->name);
2279
2280                spin_lock_irqsave(&zone->lock, flags);
2281                for (order = 0; order < MAX_ORDER; order++) {
2282                        nr[order] = zone->free_area[order].nr_free;
2283                        total += nr[order] << order;
2284                }
2285                spin_unlock_irqrestore(&zone->lock, flags);
2286                for (order = 0; order < MAX_ORDER; order++)
2287                        printk("%lu*%lukB ", nr[order], K(1UL) << order);
2288                printk("= %lukB\n", K(total));
2289        }
2290
2291        printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
2292
2293        show_swap_cache_info();
2294}
2295
2296static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
2297{
2298        zoneref->zone = zone;
2299        zoneref->zone_idx = zone_idx(zone);
2300}
2301
2302/*
2303 * Builds allocation fallback zone lists.
2304 *
2305 * Add all populated zones of a node to the zonelist.
2306 */
2307static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
2308                                int nr_zones, enum zone_type zone_type)
2309{
2310        struct zone *zone;
2311
2312        BUG_ON(zone_type >= MAX_NR_ZONES);
2313        zone_type++;
2314
2315        do {
2316                zone_type--;
2317                zone = pgdat->node_zones + zone_type;
2318                if (populated_zone(zone)) {
2319                        zoneref_set_zone(zone,
2320                                &zonelist->_zonerefs[nr_zones++]);
2321                        check_highest_zone(zone_type);
2322                }
2323
2324        } while (zone_type);
2325        return nr_zones;
2326}
2327
2328
2329/*
2330 *  zonelist_order:
2331 *  0 = automatic detection of better ordering.
2332 *  1 = order by ([node] distance, -zonetype)
2333 *  2 = order by (-zonetype, [node] distance)
2334 *
2335 *  If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
2336 *  the same zonelist. So only NUMA can configure this param.
2337 */
2338#define ZONELIST_ORDER_DEFAULT  0
2339#define ZONELIST_ORDER_NODE     1
2340#define ZONELIST_ORDER_ZONE     2
2341
2342/* zonelist order in the kernel.
2343 * set_zonelist_order() will set this to NODE or ZONE.
2344 */
2345static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
2346static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
2347
2348
2349#ifdef CONFIG_NUMA
2350/* The value user specified ....changed by config */
2351static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2352/* string for sysctl */
2353#define NUMA_ZONELIST_ORDER_LEN 16
2354char numa_zonelist_order[16] = "default";
2355
2356/*
2357 * interface for configure zonelist ordering.
2358 * command line option "numa_zonelist_order"
2359 *      = "[dD]efault   - default, automatic configuration.
2360 *      = "[nN]ode      - order by node locality, then by zone within node
2361 *      = "[zZ]one      - order by zone, then by locality within zone
2362 */
2363
2364static int __parse_numa_zonelist_order(char *s)
2365{
2366        if (*s == 'd' || *s == 'D') {
2367                user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2368        } else if (*s == 'n' || *s == 'N') {
2369                user_zonelist_order = ZONELIST_ORDER_NODE;
2370        } else if (*s == 'z' || *s == 'Z') {
2371                user_zonelist_order = ZONELIST_ORDER_ZONE;
2372        } else {
2373                printk(KERN_WARNING
2374                        "Ignoring invalid numa_zonelist_order value:  "
2375                        "%s\n", s);
2376                return -EINVAL;
2377        }
2378        return 0;
2379}
2380
2381static __init int setup_numa_zonelist_order(char *s)
2382{
2383        if (s)
2384                return __parse_numa_zonelist_order(s);
2385        return 0;
2386}
2387early_param("numa_zonelist_order", setup_numa_zonelist_order);
2388
2389/*
2390 * sysctl handler for numa_zonelist_order
2391 */
2392int numa_zonelist_order_handler(ctl_table *table, int write,
2393                void __user *buffer, size_t *length,
2394                loff_t *ppos)
2395{
2396        char saved_string[NUMA_ZONELIST_ORDER_LEN];
2397        int ret;
2398
2399        if (write)
2400                strncpy(saved_string, (char*)table->data,
2401                        NUMA_ZONELIST_ORDER_LEN);
2402        ret = proc_dostring(table, write, buffer, length, ppos);
2403        if (ret)
2404                return ret;
2405        if (write) {
2406                int oldval = user_zonelist_order;
2407                if (__parse_numa_zonelist_order((char*)table->data)) {
2408                        /*
2409                         * bogus value.  restore saved string
2410                         */
2411                        strncpy((char*)table->data, saved_string,
2412                                NUMA_ZONELIST_ORDER_LEN);
2413                        user_zonelist_order = oldval;
2414                } else if (oldval != user_zonelist_order)
2415                        build_all_zonelists();
2416        }
2417        return 0;
2418}
2419
2420
2421#define MAX_NODE_LOAD (nr_online_nodes)
2422static int node_load[MAX_NUMNODES];
2423
2424/**
2425 * find_next_best_node - find the next node that should appear in a given node's fallback list
2426 * @node: node whose fallback list we're appending
2427 * @used_node_mask: nodemask_t of already used nodes
2428 *
2429 * We use a number of factors to determine which is the next node that should
2430 * appear on a given node's fallback list.  The node should not have appeared
2431 * already in @node's fallback list, and it should be the next closest node
2432 * according to the distance array (which contains arbitrary distance values
2433 * from each node to each node in the system), and should also prefer nodes
2434 * with no CPUs, since presumably they'll have very little allocation pressure
2435 * on them otherwise.
2436 * It returns -1 if no node is found.
2437 */
2438static int find_next_best_node(int node, nodemask_t *used_node_mask)
2439{
2440        int n, val;
2441        int min_val = INT_MAX;
2442        int best_node = -1;
2443        const struct cpumask *tmp = cpumask_of_node(0);
2444
2445        /* Use the local node if we haven't already */
2446        if (!node_isset(node, *used_node_mask)) {
2447                node_set(node, *used_node_mask);
2448                return node;
2449        }
2450
2451        for_each_node_state(n, N_HIGH_MEMORY) {
2452
2453                /* Don't want a node to appear more than once */
2454                if (node_isset(n, *used_node_mask))
2455                        continue;
2456
2457                /* Use the distance array to find the distance */
2458                val = node_distance(node, n);
2459
2460                /* Penalize nodes under us ("prefer the next node") */
2461                val += (n < node);
2462
2463                /* Give preference to headless and unused nodes */
2464                tmp = cpumask_of_node(n);
2465                if (!cpumask_empty(tmp))
2466                        val += PENALTY_FOR_NODE_WITH_CPUS;
2467
2468                /* Slight preference for less loaded node */
2469                val *= (MAX_NODE_LOAD*MAX_NUMNODES);
2470                val += node_load[n];
2471
2472                if (val < min_val) {
2473                        min_val = val;
2474                        best_node = n;
2475                }
2476        }
2477
2478        if (best_node >= 0)
2479                node_set(best_node, *used_node_mask);
2480
2481        return best_node;
2482}
2483
2484
2485/*
2486 * Build zonelists ordered by node and zones within node.
2487 * This results in maximum locality--normal zone overflows into local
2488 * DMA zone, if any--but risks exhausting DMA zone.
2489 */
2490static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
2491{
2492        int j;
2493        struct zonelist *zonelist;
2494
2495        zonelist = &pgdat->node_zonelists[0];
2496        for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
2497                ;
2498        j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2499                                                        MAX_NR_ZONES - 1);
2500        zonelist->_zonerefs[j].zone = NULL;
2501        zonelist->_zonerefs[j].zone_idx = 0;
2502}
2503
2504/*
2505 * Build gfp_thisnode zonelists
2506 */
2507static void build_thisnode_zonelists(pg_data_t *pgdat)
2508{
2509        int j;
2510        struct zonelist *zonelist;
2511
2512        zonelist = &pgdat->node_zonelists[1];
2513        j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2514        zonelist->_zonerefs[j].zone = NULL;
2515        zonelist->_zonerefs[j].zone_idx = 0;
2516}
2517
2518/*
2519 * Build zonelists ordered by zone and nodes within zones.
2520 * This results in conserving DMA zone[s] until all Normal memory is
2521 * exhausted, but results in overflowing to remote node while memory
2522 * may still exist in local DMA zone.
2523 */
2524static int node_order[MAX_NUMNODES];
2525
2526static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
2527{
2528        int pos, j, node;
2529        int zone_type;          /* needs to be signed */
2530        struct zone *z;
2531        struct zonelist *zonelist;
2532
2533        zonelist = &pgdat->node_zonelists[0];
2534        pos = 0;
2535        for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
2536                for (j = 0; j < nr_nodes; j++) {
2537                        node = node_order[j];
2538                        z = &NODE_DATA(node)->node_zones[zone_type];
2539                        if (populated_zone(z)) {
2540                                zoneref_set_zone(z,
2541                                        &zonelist->_zonerefs[pos++]);
2542                                check_highest_zone(zone_type);
2543                        }
2544                }
2545        }
2546        zonelist->_zonerefs[pos].zone = NULL;
2547        zonelist->_zonerefs[pos].zone_idx = 0;
2548}
2549
2550static int default_zonelist_order(void)
2551{
2552        int nid, zone_type;
2553        unsigned long low_kmem_size,total_size;
2554        struct zone *z;
2555        int average_size;
2556        /*
2557         * ZONE_DMA and ZONE_DMA32 can be very small area in the sytem.
2558         * If they are really small and used heavily, the system can fall
2559         * into OOM very easily.
2560         * This function detect ZONE_DMA/DMA32 size and confgigures zone order.
2561         */
2562        /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
2563        low_kmem_size = 0;
2564        total_size = 0;
2565        for_each_online_node(nid) {
2566                for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2567                        z = &NODE_DATA(nid)->node_zones[zone_type];
2568                        if (populated_zone(z)) {
2569                                if (zone_type < ZONE_NORMAL)
2570                                        low_kmem_size += z->present_pages;
2571                                total_size += z->present_pages;
2572                        }
2573                }
2574        }
2575        if (!low_kmem_size ||  /* there are no DMA area. */
2576            low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
2577                return ZONELIST_ORDER_NODE;
2578        /*
2579         * look into each node's config.
2580         * If there is a node whose DMA/DMA32 memory is very big area on
2581         * local memory, NODE_ORDER may be suitable.
2582         */
2583        average_size = total_size /
2584                                (nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
2585        for_each_online_node(nid) {
2586                low_kmem_size = 0;
2587                total_size = 0;
2588                for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2589                        z = &NODE_DATA(nid)->node_zones[zone_type];
2590                        if (populated_zone(z)) {
2591                                if (zone_type < ZONE_NORMAL)
2592                                        low_kmem_size += z->present_pages;
2593                                total_size += z->present_pages;
2594                        }
2595                }
2596                if (low_kmem_size &&
2597                    total_size > average_size && /* ignore small node */
2598                    low_kmem_size > total_size * 70/100)
2599                        return ZONELIST_ORDER_NODE;
2600        }
2601        return ZONELIST_ORDER_ZONE;
2602}
2603
2604static void set_zonelist_order(void)
2605{
2606        if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
2607                current_zonelist_order = default_zonelist_order();
2608        else
2609                current_zonelist_order = user_zonelist_order;
2610}
2611
2612static void build_zonelists(pg_data_t *pgdat)
2613{
2614        int j, node, load;
2615        enum zone_type i;
2616        nodemask_t used_mask;
2617        int local_node, prev_node;
2618        struct zonelist *zonelist;
2619        int order = current_zonelist_order;
2620
2621        /* initialize zonelists */
2622        for (i = 0; i < MAX_ZONELISTS; i++) {
2623                zonelist = pgdat->node_zonelists + i;
2624                zonelist->_zonerefs[0].zone = NULL;
2625                zonelist->_zonerefs[0].zone_idx = 0;
2626        }
2627
2628        /* NUMA-aware ordering of nodes */
2629        local_node = pgdat->node_id;
2630        load = nr_online_nodes;
2631        prev_node = local_node;
2632        nodes_clear(used_mask);
2633
2634        memset(node_order, 0, sizeof(node_order));
2635        j = 0;
2636
2637        while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
2638                int distance = node_distance(local_node, node);
2639
2640                /*
2641                 * If another node is sufficiently far away then it is better
2642                 * to reclaim pages in a zone before going off node.
2643                 */
2644                if (distance > RECLAIM_DISTANCE)
2645                        zone_reclaim_mode = 1;
2646
2647                /*
2648                 * We don't want to pressure a particular node.
2649                 * So adding penalty to the first node in same
2650                 * distance group to make it round-robin.
2651                 */
2652                if (distance != node_distance(local_node, prev_node))
2653                        node_load[node] = load;
2654
2655                prev_node = node;
2656                load--;
2657                if (order == ZONELIST_ORDER_NODE)
2658                        build_zonelists_in_node_order(pgdat, node);
2659                else
2660                        node_order[j++] = node; /* remember order */
2661        }
2662
2663        if (order == ZONELIST_ORDER_ZONE) {
2664                /* calculate node order -- i.e., DMA last! */
2665                build_zonelists_in_zone_order(pgdat, j);
2666        }
2667
2668        build_thisnode_zonelists(pgdat);
2669}
2670
2671/* Construct the zonelist performance cache - see further mmzone.h */
2672static void build_zonelist_cache(pg_data_t *pgdat)
2673{
2674        struct zonelist *zonelist;
2675        struct zonelist_cache *zlc;
2676        struct zoneref *z;
2677
2678        zonelist = &pgdat->node_zonelists[0];
2679        zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
2680        bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
2681        for (z = zonelist->_zonerefs; z->zone; z++)
2682                zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
2683}
2684
2685
2686#else   /* CONFIG_NUMA */
2687
2688static void set_zonelist_order(void)
2689{
2690        current_zonelist_order = ZONELIST_ORDER_ZONE;
2691}
2692
2693static void build_zonelists(pg_data_t *pgdat)
2694{
2695        int node, local_node;
2696        enum zone_type j;
2697        struct zonelist *zonelist;
2698
2699        local_node = pgdat->node_id;
2700
2701        zonelist = &pgdat->node_zonelists[0];
2702        j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2703
2704        /*
2705         * Now we build the zonelist so that it contains the zones
2706         * of all the other nodes.
2707         * We don't want to pressure a particular node, so when
2708         * building the zones for node N, we make sure that the
2709         * zones coming right after the local ones are those from
2710         * node N+1 (modulo N)
2711         */
2712        for (node = local_node + 1; node < MAX_NUMNODES; node++) {
2713                if (!node_online(node))
2714                        continue;
2715                j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2716                                                        MAX_NR_ZONES - 1);
2717        }
2718        for (node = 0; node < local_node; node++) {
2719                if (!node_online(node))
2720                        continue;
2721                j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2722                                                        MAX_NR_ZONES - 1);
2723        }
2724
2725        zonelist->_zonerefs[j].zone = NULL;
2726        zonelist->_zonerefs[j].zone_idx = 0;
2727}
2728
2729/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
2730static void build_zonelist_cache(pg_data_t *pgdat)
2731{
2732        pgdat->node_zonelists[0].zlcache_ptr = NULL;
2733}
2734
2735#endif  /* CONFIG_NUMA */
2736
2737/* return values int ....just for stop_machine() */
2738static int __build_all_zonelists(void *dummy)
2739{
2740        int nid;
2741
2742#ifdef CONFIG_NUMA
2743        memset(node_load, 0, sizeof(node_load));
2744#endif
2745        for_each_online_node(nid) {
2746                pg_data_t *pgdat = NODE_DATA(nid);
2747
2748                build_zonelists(pgdat);
2749                build_zonelist_cache(pgdat);
2750        }
2751        return 0;
2752}
2753
2754void build_all_zonelists(void)
2755{
2756        set_zonelist_order();
2757
2758        if (system_state == SYSTEM_BOOTING) {
2759                __build_all_zonelists(NULL);
2760                mminit_verify_zonelist();
2761                cpuset_init_current_mems_allowed();
2762        } else {
2763                /* we have to stop all cpus to guarantee there is no user
2764                   of zonelist */
2765                stop_machine(__build_all_zonelists, NULL, NULL);
2766                /* cpuset refresh routine should be here */
2767        }
2768        vm_total_pages = nr_free_pagecache_pages();
2769        /*
2770         * Disable grouping by mobility if the number of pages in the
2771         * system is too low to allow the mechanism to work. It would be
2772         * more accurate, but expensive to check per-zone. This check is
2773         * made on memory-hotadd so a system can start with mobility
2774         * disabled and enable it later
2775         */
2776        if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
2777                page_group_by_mobility_disabled = 1;
2778        else
2779                page_group_by_mobility_disabled = 0;
2780
2781        printk("Built %i zonelists in %s order, mobility grouping %s.  "
2782                "Total pages: %ld\n",
2783                        nr_online_nodes,
2784                        zonelist_order_name[current_zonelist_order],
2785                        page_group_by_mobility_disabled ? "off" : "on",
2786                        vm_total_pages);
2787#ifdef CONFIG_NUMA
2788        printk("Policy zone: %s\n", zone_names[policy_zone]);
2789#endif
2790}
2791
2792/*
2793 * Helper functions to size the waitqueue hash table.
2794 * Essentially these want to choose hash table sizes sufficiently
2795 * large so that collisions trying to wait on pages are rare.
2796 * But in fact, the number of active page waitqueues on typical
2797 * systems is ridiculously low, less than 200. So this is even
2798 * conservative, even though it seems large.
2799 *
2800 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
2801 * waitqueues, i.e. the size of the waitq table given the number of pages.
2802 */
2803#define PAGES_PER_WAITQUEUE     256
2804
2805#ifndef CONFIG_MEMORY_HOTPLUG
2806static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2807{
2808        unsigned long size = 1;
2809
2810        pages /= PAGES_PER_WAITQUEUE;
2811
2812        while (size < pages)
2813                size <<= 1;
2814
2815        /*
2816         * Once we have dozens or even hundreds of threads sleeping
2817         * on IO we've got bigger problems than wait queue collision.
2818         * Limit the size of the wait table to a reasonable size.
2819         */
2820        size = min(size, 4096UL);
2821
2822        return max(size, 4UL);
2823}
2824#else
2825/*
2826 * A zone's size might be changed by hot-add, so it is not possible to determine
2827 * a suitable size for its wait_table.  So we use the maximum size now.
2828 *
2829 * The max wait table size = 4096 x sizeof(wait_queue_head_t).   ie:
2830 *
2831 *    i386 (preemption config)    : 4096 x 16 = 64Kbyte.
2832 *    ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
2833 *    ia64, x86-64 (preemption)   : 4096 x 24 = 96Kbyte.
2834 *
2835 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
2836 * or more by the traditional way. (See above).  It equals:
2837 *
2838 *    i386, x86-64, powerpc(4K page size) : =  ( 2G + 1M)byte.
2839 *    ia64(16K page size)                 : =  ( 8G + 4M)byte.
2840 *    powerpc (64K page size)             : =  (32G +16M)byte.
2841 */
2842static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2843{
2844        return 4096UL;
2845}
2846#endif
2847
2848/*
2849 * This is an integer logarithm so that shifts can be used later
2850 * to extract the more random high bits from the multiplicative
2851 * hash function before the remainder is taken.
2852 */
2853static inline unsigned long wait_table_bits(unsigned long size)
2854{
2855        return ffz(~size);
2856}
2857
2858#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
2859
2860/*
2861 * Mark a number of pageblocks as MIGRATE_RESERVE. The number
2862 * of blocks reserved is based on min_wmark_pages(zone). The memory within
2863 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
2864 * higher will lead to a bigger reserve which will get freed as contiguous
2865 * blocks as reclaim kicks in
2866 */
2867static void setup_zone_migrate_reserve(struct zone *zone)
2868{
2869        unsigned long start_pfn, pfn, end_pfn;
2870        struct page *page;
2871        unsigned long block_migratetype;
2872        int reserve;
2873
2874        /* Get the start pfn, end pfn and the number of blocks to reserve */
2875        start_pfn = zone->zone_start_pfn;
2876        end_pfn = start_pfn + zone->spanned_pages;
2877        reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
2878                                                        pageblock_order;
2879
2880        /*
2881         * Reserve blocks are generally in place to help high-order atomic
2882         * allocations that are short-lived. A min_free_kbytes value that
2883         * would result in more than 2 reserve blocks for atomic allocations
2884         * is assumed to be in place to help anti-fragmentation for the
2885         * future allocation of hugepages at runtime.
2886         */
2887        reserve = min(2, reserve);
2888
2889        for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
2890                if (!pfn_valid(pfn))
2891                        continue;
2892                page = pfn_to_page(pfn);
2893
2894                /* Watch out for overlapping nodes */
2895                if (page_to_nid(page) != zone_to_nid(zone))
2896                        continue;
2897
2898                /* Blocks with reserved pages will never free, skip them. */
2899                if (PageReserved(page))
2900                        continue;
2901
2902                block_migratetype = get_pageblock_migratetype(page);
2903
2904                /* If this block is reserved, account for it */
2905                if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
2906                        reserve--;
2907                        continue;
2908                }
2909
2910                /* Suitable for reserving if this block is movable */
2911                if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
2912                        set_pageblock_migratetype(page, MIGRATE_RESERVE);
2913                        move_freepages_block(zone, page, MIGRATE_RESERVE);
2914                        reserve--;
2915                        continue;
2916                }
2917
2918                /*
2919                 * If the reserve is met and this is a previous reserved block,
2920                 * take it back
2921                 */
2922                if (block_migratetype == MIGRATE_RESERVE) {
2923                        set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2924                        move_freepages_block(zone, page, MIGRATE_MOVABLE);
2925                }
2926        }
2927}
2928
2929/*
2930 * Initially all pages are reserved - free ones are freed
2931 * up by free_all_bootmem() once the early boot process is
2932 * done. Non-atomic initialization, single-pass.
2933 */
2934void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
2935                unsigned long start_pfn, enum memmap_context context)
2936{
2937        struct page *page;
2938        unsigned long end_pfn = start_pfn + size;
2939        unsigned long pfn;
2940        struct zone *z;
2941
2942        if (highest_memmap_pfn < end_pfn - 1)
2943                highest_memmap_pfn = end_pfn - 1;
2944
2945        z = &NODE_DATA(nid)->node_zones[zone];
2946        for (pfn = start_pfn; pfn < end_pfn; pfn++) {
2947                /*
2948                 * There can be holes in boot-time mem_map[]s
2949                 * handed to this function.  They do not
2950                 * exist on hotplugged memory.
2951                 */
2952                if (context == MEMMAP_EARLY) {
2953                        if (!early_pfn_valid(pfn))
2954                                continue;
2955                        if (!early_pfn_in_nid(pfn, nid))
2956                                continue;
2957                }
2958                page = pfn_to_page(pfn);
2959                set_page_links(page, zone, nid, pfn);
2960                mminit_verify_page_links(page, zone, nid, pfn);
2961                init_page_count(page);
2962                reset_page_mapcount(page);
2963                SetPageReserved(page);
2964                /*
2965                 * Mark the block movable so that blocks are reserved for
2966                 * movable at startup. This will force kernel allocations
2967                 * to reserve their blocks rather than leaking throughout
2968                 * the address space during boot when many long-lived
2969                 * kernel allocations are made. Later some blocks near
2970                 * the start are marked MIGRATE_RESERVE by
2971                 * setup_zone_migrate_reserve()
2972                 *
2973                 * bitmap is created for zone's valid pfn range. but memmap
2974                 * can be created for invalid pages (for alignment)
2975                 * check here not to call set_pageblock_migratetype() against
2976                 * pfn out of zone.
2977                 */
2978                if ((z->zone_start_pfn <= pfn)
2979                    && (pfn < z->zone_start_pfn + z->spanned_pages)
2980                    && !(pfn & (pageblock_nr_pages - 1)))
2981                        set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2982
2983                INIT_LIST_HEAD(&page->lru);
2984#ifdef WANT_PAGE_VIRTUAL
2985                /* The shift won't overflow because ZONE_NORMAL is below 4G. */
2986                if (!is_highmem_idx(zone))
2987                        set_page_address(page, __va(pfn << PAGE_SHIFT));
2988#endif
2989        }
2990}
2991
2992static void __meminit zone_init_free_lists(struct zone *zone)
2993{
2994        int order, t;
2995        for_each_migratetype_order(order, t) {
2996                INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
2997                zone->free_area[order].nr_free = 0;
2998        }
2999}
3000
3001#ifndef __HAVE_ARCH_MEMMAP_INIT
3002#define memmap_init(size, nid, zone, start_pfn) \
3003        memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
3004#endif
3005
3006static int zone_batchsize(struct zone *zone)
3007{
3008#ifdef CONFIG_MMU
3009        int batch;
3010
3011        /*
3012         * The per-cpu-pages pools are set to around 1000th of the
3013         * size of the zone.  But no more than 1/2 of a meg.
3014         *
3015         * OK, so we don't know how big the cache is.  So guess.
3016         */
3017        batch = zone->present_pages / 1024;
3018        if (batch * PAGE_SIZE > 512 * 1024)
3019                batch = (512 * 1024) / PAGE_SIZE;
3020        batch /= 4;             /* We effectively *= 4 below */
3021        if (batch < 1)
3022                batch = 1;
3023
3024        /*
3025         * Clamp the batch to a 2^n - 1 value. Having a power
3026         * of 2 value was found to be more likely to have
3027         * suboptimal cache aliasing properties in some cases.
3028         *
3029         * For example if 2 tasks are alternately allocating
3030         * batches of pages, one task can end up with a lot
3031         * of pages of one half of the possible page colors
3032         * and the other with pages of the other colors.
3033         */
3034        batch = rounddown_pow_of_two(batch + batch/2) - 1;
3035
3036        return batch;
3037
3038#else
3039        /* The deferral and batching of frees should be suppressed under NOMMU
3040         * conditions.
3041         *
3042         * The problem is that NOMMU needs to be able to allocate large chunks
3043         * of contiguous memory as there's no hardware page translation to
3044         * assemble apparent contiguous memory from discontiguous pages.
3045         *
3046         * Queueing large contiguous runs of pages for batching, however,
3047         * causes the pages to actually be freed in smaller chunks.  As there
3048         * can be a significant delay between the individual batches being
3049         * recycled, this leads to the once large chunks of space being
3050         * fragmented and becoming unavailable for high-order allocations.
3051         */
3052        return 0;
3053#endif
3054}
3055
3056static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
3057{
3058        struct per_cpu_pages *pcp;
3059        int migratetype;
3060
3061        memset(p, 0, sizeof(*p));
3062
3063        pcp = &p->pcp;
3064        pcp->count = 0;
3065        pcp->high = 6 * batch;
3066        pcp->batch = max(1UL, 1 * batch);
3067        for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
3068                INIT_LIST_HEAD(&pcp->lists[migratetype]);
3069}
3070
3071/*
3072 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
3073 * to the value high for the pageset p.
3074 */
3075
3076static void setup_pagelist_highmark(struct per_cpu_pageset *p,
3077                                unsigned long high)
3078{
3079        struct per_cpu_pages *pcp;
3080
3081        pcp = &p->pcp;
3082        pcp->high = high;
3083        pcp->batch = max(1UL, high/4);
3084        if ((high/4) > (PAGE_SHIFT * 8))
3085                pcp->batch = PAGE_SHIFT * 8;
3086}
3087
3088
3089#ifdef CONFIG_NUMA
3090/*
3091 * Boot pageset table. One per cpu which is going to be used for all
3092 * zones and all nodes. The parameters will be set in such a way
3093 * that an item put on a list will immediately be handed over to
3094 * the buddy list. This is safe since pageset manipulation is done
3095 * with interrupts disabled.
3096 *
3097 * Some NUMA counter updates may also be caught by the boot pagesets.
3098 *
3099 * The boot_pagesets must be kept even after bootup is complete for
3100 * unused processors and/or zones. They do play a role for bootstrapping
3101 * hotplugged processors.
3102 *
3103 * zoneinfo_show() and maybe other functions do
3104 * not check if the processor is online before following the pageset pointer.
3105 * Other parts of the kernel may not check if the zone is available.
3106 */
3107static struct per_cpu_pageset boot_pageset[NR_CPUS];
3108
3109/*
3110 * Dynamically allocate memory for the
3111 * per cpu pageset array in struct zone.
3112 */
3113static int __cpuinit process_zones(int cpu)
3114{
3115        struct zone *zone, *dzone;
3116        int node = cpu_to_node(cpu);
3117
3118        node_set_state(node, N_CPU);    /* this node has a cpu */
3119
3120        for_each_populated_zone(zone) {
3121                zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
3122                                         GFP_KERNEL, node);
3123                if (!zone_pcp(zone, cpu))
3124                        goto bad;
3125
3126                setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
3127
3128                if (percpu_pagelist_fraction)
3129                        setup_pagelist_highmark(zone_pcp(zone, cpu),
3130                                (zone->present_pages / percpu_pagelist_fraction));
3131        }
3132
3133        return 0;
3134bad:
3135        for_each_zone(dzone) {
3136                if (!populated_zone(dzone))
3137                        continue;
3138                if (dzone == zone)
3139                        break;
3140                kfree(zone_pcp(dzone, cpu));
3141                zone_pcp(dzone, cpu) = &boot_pageset[cpu];
3142        }
3143        return -ENOMEM;
3144}
3145
3146static inline void free_zone_pagesets(int cpu)
3147{
3148        struct zone *zone;
3149
3150        for_each_zone(zone) {
3151                struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
3152
3153                /* Free per_cpu_pageset if it is slab allocated */
3154                if (pset != &boot_pageset[cpu])
3155                        kfree(pset);
3156                zone_pcp(zone, cpu) = &boot_pageset[cpu];
3157        }
3158}
3159
3160static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
3161                unsigned long action,
3162                void *hcpu)
3163{
3164        int cpu = (long)hcpu;
3165        int ret = NOTIFY_OK;
3166
3167        switch (action) {
3168        case CPU_UP_PREPARE:
3169        case CPU_UP_PREPARE_FROZEN:
3170                if (process_zones(cpu))
3171                        ret = NOTIFY_BAD;
3172                break;
3173        case CPU_UP_CANCELED:
3174        case CPU_UP_CANCELED_FROZEN:
3175        case CPU_DEAD:
3176        case CPU_DEAD_FROZEN:
3177                free_zone_pagesets(cpu);
3178                break;
3179        default:
3180                break;
3181        }
3182        return ret;
3183}
3184
3185static struct notifier_block __cpuinitdata pageset_notifier =
3186        { &pageset_cpuup_callback, NULL, 0 };
3187
3188void __init setup_per_cpu_pageset(void)
3189{
3190        int err;
3191
3192        /* Initialize per_cpu_pageset for cpu 0.
3193         * A cpuup callback will do this for every cpu
3194         * as it comes online
3195         */
3196        err = process_zones(smp_processor_id());
3197        BUG_ON(err);
3198        register_cpu_notifier(&pageset_notifier);
3199}
3200
3201#endif
3202
3203static noinline __init_refok
3204int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
3205{
3206        int i;
3207        struct pglist_data *pgdat = zone->zone_pgdat;
3208        size_t alloc_size;
3209
3210        /*
3211         * The per-page waitqueue mechanism uses hashed waitqueues
3212         * per zone.
3213         */
3214        zone->wait_table_hash_nr_entries =
3215                 wait_table_hash_nr_entries(zone_size_pages);
3216        zone->wait_table_bits =
3217                wait_table_bits(zone->wait_table_hash_nr_entries);
3218        alloc_size = zone->wait_table_hash_nr_entries
3219                                        * sizeof(wait_queue_head_t);
3220
3221        if (!slab_is_available()) {
3222                zone->wait_table = (wait_queue_head_t *)
3223                        alloc_bootmem_node(pgdat, alloc_size);
3224        } else {
3225                /*
3226                 * This case means that a zone whose size was 0 gets new memory
3227                 * via memory hot-add.
3228                 * But it may be the case that a new node was hot-added.  In
3229                 * this case vmalloc() will not be able to use this new node's
3230                 * memory - this wait_table must be initialized to use this new
3231                 * node itself as well.
3232                 * To use this new node's memory, further consideration will be
3233                 * necessary.
3234                 */
3235                zone->wait_table = vmalloc(alloc_size);
3236        }
3237        if (!zone->wait_table)
3238                return -ENOMEM;
3239
3240        for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
3241                init_waitqueue_head(zone->wait_table + i);
3242
3243        return 0;
3244}
3245
3246static int __zone_pcp_update(void *data)
3247{
3248        struct zone *zone = data;
3249        int cpu;
3250        unsigned long batch = zone_batchsize(zone), flags;
3251
3252        for (cpu = 0; cpu < NR_CPUS; cpu++) {
3253                struct per_cpu_pageset *pset;
3254                struct per_cpu_pages *pcp;
3255
3256                pset = zone_pcp(zone, cpu);
3257                pcp = &pset->pcp;
3258
3259                local_irq_save(flags);
3260                free_pcppages_bulk(zone, pcp->count, pcp);
3261                setup_pageset(pset, batch);
3262                local_irq_restore(flags);
3263        }
3264        return 0;
3265}
3266
3267void zone_pcp_update(struct zone *zone)
3268{
3269        stop_machine(__zone_pcp_update, zone, NULL);
3270}
3271
3272static __meminit void zone_pcp_init(struct zone *zone)
3273{
3274        int cpu;
3275        unsigned long batch = zone_batchsize(zone);
3276
3277        for (cpu = 0; cpu < NR_CPUS; cpu++) {
3278#ifdef CONFIG_NUMA
3279                /* Early boot. Slab allocator not functional yet */
3280                zone_pcp(zone, cpu) = &boot_pageset[cpu];
3281                setup_pageset(&boot_pageset[cpu],0);
3282#else
3283                setup_pageset(zone_pcp(zone,cpu), batch);
3284#endif
3285        }
3286        if (zone->present_pages)
3287                printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%lu\n",
3288                        zone->name, zone->present_pages, batch);
3289}
3290
3291__meminit int init_currently_empty_zone(struct zone *zone,
3292                                        unsigned long zone_start_pfn,
3293                                        unsigned long size,
3294                                        enum memmap_context context)
3295{
3296        struct pglist_data *pgdat = zone->zone_pgdat;
3297        int ret;
3298        ret = zone_wait_table_init(zone, size);
3299        if (ret)
3300                return ret;
3301        pgdat->nr_zones = zone_idx(zone) + 1;
3302
3303        zone->zone_start_pfn = zone_start_pfn;
3304
3305        mminit_dprintk(MMINIT_TRACE, "memmap_init",
3306                        "Initialising map node %d zone %lu pfns %lu -> %lu\n",
3307                        pgdat->node_id,
3308                        (unsigned long)zone_idx(zone),
3309                        zone_start_pfn, (zone_start_pfn + size));
3310
3311        zone_init_free_lists(zone);
3312
3313        return 0;
3314}
3315
3316#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3317/*
3318 * Basic iterator support. Return the first range of PFNs for a node
3319 * Note: nid == MAX_NUMNODES returns first region regardless of node
3320 */
3321static int __meminit first_active_region_index_in_nid(int nid)
3322{
3323        int i;
3324
3325        for (i = 0; i < nr_nodemap_entries; i++)
3326                if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
3327                        return i;
3328
3329        return -1;
3330}
3331
3332/*
3333 * Basic iterator support. Return the next active range of PFNs for a node
3334 * Note: nid == MAX_NUMNODES returns next region regardless of node
3335 */
3336static int __meminit next_active_region_index_in_nid(int index, int nid)
3337{
3338        for (index = index + 1; index < nr_nodemap_entries; index++)
3339                if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
3340                        return index;
3341
3342        return -1;
3343}
3344
3345#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
3346/*
3347 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
3348 * Architectures may implement their own version but if add_active_range()
3349 * was used and there are no special requirements, this is a convenient
3350 * alternative
3351 */
3352int __meminit __early_pfn_to_nid(unsigned long pfn)
3353{
3354        int i;
3355
3356        for (i = 0; i < nr_nodemap_entries; i++) {
3357                unsigned long start_pfn = early_node_map[i].start_pfn;
3358                unsigned long end_pfn = early_node_map[i].end_pfn;
3359
3360                if (start_pfn <= pfn && pfn < end_pfn)
3361                        return early_node_map[i].nid;
3362        }
3363        /* This is a memory hole */
3364        return -1;
3365}
3366#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
3367
3368int __meminit early_pfn_to_nid(unsigned long pfn)
3369{
3370        int nid;
3371
3372        nid = __early_pfn_to_nid(pfn);
3373        if (nid >= 0)
3374                return nid;
3375        /* just returns 0 */
3376        return 0;
3377}
3378
3379#ifdef CONFIG_NODES_SPAN_OTHER_NODES
3380bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
3381{
3382        int nid;
3383
3384        nid = __early_pfn_to_nid(pfn);
3385        if (nid >= 0 && nid != node)
3386                return false;
3387        return true;
3388}
3389#endif
3390
3391/* Basic iterator support to walk early_node_map[] */
3392#define for_each_active_range_index_in_nid(i, nid) \
3393        for (i = first_active_region_index_in_nid(nid); i != -1; \
3394                                i = next_active_region_index_in_nid(i, nid))
3395
3396/**
3397 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
3398 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
3399 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
3400 *
3401 * If an architecture guarantees that all ranges registered with
3402 * add_active_ranges() contain no holes and may be freed, this
3403 * this function may be used instead of calling free_bootmem() manually.
3404 */
3405void __init free_bootmem_with_active_regions(int nid,
3406                                                unsigned long max_low_pfn)
3407{
3408        int i;
3409
3410        for_each_active_range_index_in_nid(i, nid) {
3411                unsigned long size_pages = 0;
3412                unsigned long end_pfn = early_node_map[i].end_pfn;
3413
3414                if (early_node_map[i].start_pfn >= max_low_pfn)
3415                        continue;
3416
3417                if (end_pfn > max_low_pfn)
3418                        end_pfn = max_low_pfn;
3419
3420                size_pages = end_pfn - early_node_map[i].start_pfn;
3421                free_bootmem_node(NODE_DATA(early_node_map[i].nid),
3422                                PFN_PHYS(early_node_map[i].start_pfn),
3423                                size_pages << PAGE_SHIFT);
3424        }
3425}
3426
3427void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
3428{
3429        int i;
3430        int ret;
3431
3432        for_each_active_range_index_in_nid(i, nid) {
3433                ret = work_fn(early_node_map[i].start_pfn,
3434                              early_node_map[i].end_pfn, data);
3435                if (ret)
3436                        break;
3437        }
3438}
3439/**
3440 * sparse_memory_present_with_active_regions - Call memory_present for each active range
3441 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
3442 *
3443 * If an architecture guarantees that all ranges registered with
3444 * add_active_ranges() contain no holes and may be freed, this
3445 * function may be used instead of calling memory_present() manually.
3446 */
3447void __init sparse_memory_present_with_active_regions(int nid)
3448{
3449        int i;
3450
3451        for_each_active_range_index_in_nid(i, nid)
3452                memory_present(early_node_map[i].nid,
3453                                early_node_map[i].start_pfn,
3454                                early_node_map[i].end_pfn);
3455}
3456
3457/**
3458 * get_pfn_range_for_nid - Return the start and end page frames for a node
3459 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
3460 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
3461 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
3462 *
3463 * It returns the start and end page frame of a node based on information
3464 * provided by an arch calling add_active_range(). If called for a node
3465 * with no available memory, a warning is printed and the start and end
3466 * PFNs will be 0.
3467 */
3468void __meminit get_pfn_range_for_nid(unsigned int nid,
3469                        unsigned long *start_pfn, unsigned long *end_pfn)
3470{
3471        int i;
3472        *start_pfn = -1UL;
3473        *end_pfn = 0;
3474
3475        for_each_active_range_index_in_nid(i, nid) {
3476                *start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
3477                *end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
3478        }
3479
3480        if (*start_pfn == -1UL)
3481                *start_pfn = 0;
3482}
3483
3484/*
3485 * This finds a zone that can be used for ZONE_MOVABLE pages. The
3486 * assumption is made that zones within a node are ordered in monotonic
3487 * increasing memory addresses so that the "highest" populated zone is used
3488 */
3489static void __init find_usable_zone_for_movable(void)
3490{
3491        int zone_index;
3492        for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3493                if (zone_index == ZONE_MOVABLE)
3494                        continue;
3495
3496                if (arch_zone_highest_possible_pfn[zone_index] >
3497                                arch_zone_lowest_possible_pfn[zone_index])
3498                        break;
3499        }
3500
3501        VM_BUG_ON(zone_index == -1);
3502        movable_zone = zone_index;
3503}
3504
3505/*
3506 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
3507 * because it is sized independant of architecture. Unlike the other zones,
3508 * the starting point for ZONE_MOVABLE is not fixed. It may be different
3509 * in each node depending on the size of each node and how evenly kernelcore
3510 * is distributed. This helper function adjusts the zone ranges
3511 * provided by the architecture for a given node by using the end of the
3512 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
3513 * zones within a node are in order of monotonic increases memory addresses
3514 */
3515static void __meminit adjust_zone_range_for_zone_movable(int nid,
3516                                        unsigned long zone_type,
3517                                        unsigned long node_start_pfn,
3518                                        unsigned long node_end_pfn,
3519                                        unsigned long *zone_start_pfn,
3520                                        unsigned long *zone_end_pfn)
3521{
3522        /* Only adjust if ZONE_MOVABLE is on this node */
3523        if (zone_movable_pfn[nid]) {
3524                /* Size ZONE_MOVABLE */
3525                if (zone_type == ZONE_MOVABLE) {
3526                        *zone_start_pfn = zone_movable_pfn[nid];
3527                        *zone_end_pfn = min(node_end_pfn,
3528                                arch_zone_highest_possible_pfn[movable_zone]);
3529
3530                /* Adjust for ZONE_MOVABLE starting within this range */
3531                } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
3532                                *zone_end_pfn > zone_movable_pfn[nid]) {
3533                        *zone_end_pfn = zone_movable_pfn[nid];
3534
3535                /* Check if this whole range is within ZONE_MOVABLE */
3536                } else if (*zone_start_pfn >= zone_movable_pfn[nid])
3537                        *zone_start_pfn = *zone_end_pfn;
3538        }
3539}
3540
3541/*
3542 * Return the number of pages a zone spans in a node, including holes
3543 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
3544 */
3545static unsigned long __meminit zone_spanned_pages_in_node(int nid,
3546                                        unsigned long zone_type,
3547                                        unsigned long *ignored)
3548{
3549        unsigned long node_start_pfn, node_end_pfn;
3550        unsigned long zone_start_pfn, zone_end_pfn;
3551
3552        /* Get the start and end of the node and zone */
3553        get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3554        zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
3555        zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
3556        adjust_zone_range_for_zone_movable(nid, zone_type,
3557                                node_start_pfn, node_end_pfn,
3558                                &zone_start_pfn, &zone_end_pfn);
3559
3560        /* Check that this node has pages within the zone's required range */
3561        if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
3562                return 0;
3563
3564        /* Move the zone boundaries inside the node if necessary */
3565        zone_end_pfn = min(zone_end_pfn, node_end_pfn);
3566        zone_start_pfn = max(zone_start_pfn, node_start_pfn);
3567
3568        /* Return the spanned pages */
3569        return zone_end_pfn - zone_start_pfn;
3570}
3571
3572/*
3573 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
3574 * then all holes in the requested range will be accounted for.
3575 */
3576static unsigned long __meminit __absent_pages_in_range(int nid,
3577                                unsigned long range_start_pfn,
3578                                unsigned long range_end_pfn)
3579{
3580        int i = 0;
3581        unsigned long prev_end_pfn = 0, hole_pages = 0;
3582        unsigned long start_pfn;
3583
3584        /* Find the end_pfn of the first active range of pfns in the node */
3585        i = first_active_region_index_in_nid(nid);
3586        if (i == -1)
3587                return 0;
3588
3589        prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3590
3591        /* Account for ranges before physical memory on this node */
3592        if (early_node_map[i].start_pfn > range_start_pfn)
3593                hole_pages = prev_end_pfn - range_start_pfn;
3594
3595        /* Find all holes for the zone within the node */
3596        for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
3597
3598                /* No need to continue if prev_end_pfn is outside the zone */
3599                if (prev_end_pfn >= range_end_pfn)
3600                        break;
3601
3602                /* Make sure the end of the zone is not within the hole */
3603                start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3604                prev_end_pfn = max(prev_end_pfn, range_start_pfn);
3605
3606                /* Update the hole size cound and move on */
3607                if (start_pfn > range_start_pfn) {
3608                        BUG_ON(prev_end_pfn > start_pfn);
3609                        hole_pages += start_pfn - prev_end_pfn;
3610                }
3611                prev_end_pfn = early_node_map[i].end_pfn;
3612        }
3613
3614        /* Account for ranges past physical memory on this node */
3615        if (range_end_pfn > prev_end_pfn)
3616                hole_pages += range_end_pfn -
3617                                max(range_start_pfn, prev_end_pfn);
3618
3619        return hole_pages;
3620}
3621
3622/**
3623 * absent_pages_in_range - Return number of page frames in holes within a range
3624 * @start_pfn: The start PFN to start searching for holes
3625 * @end_pfn: The end PFN to stop searching for holes
3626 *
3627 * It returns the number of pages frames in memory holes within a range.
3628 */
3629unsigned long __init absent_pages_in_range(unsigned long start_pfn,
3630                                                        unsigned long end_pfn)
3631{
3632        return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
3633}
3634
3635/* Return the number of page frames in holes in a zone on a node */
3636static unsigned long __meminit zone_absent_pages_in_node(int nid,
3637                                        unsigned long zone_type,
3638                                        unsigned long *ignored)
3639{
3640        unsigned long node_start_pfn, node_end_pfn;
3641        unsigned long zone_start_pfn, zone_end_pfn;
3642
3643        get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3644        zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
3645                                                        node_start_pfn);
3646        zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
3647                                                        node_end_pfn);
3648
3649        adjust_zone_range_for_zone_movable(nid, zone_type,
3650                        node_start_pfn, node_end_pfn,
3651                        &zone_start_pfn, &zone_end_pfn);
3652        return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
3653}
3654
3655#else
3656static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
3657                                        unsigned long zone_type,
3658                                        unsigned long *zones_size)
3659{
3660        return zones_size[zone_type];
3661}
3662
3663static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
3664                                                unsigned long zone_type,
3665                                                unsigned long *zholes_size)
3666{
3667        if (!zholes_size)
3668                return 0;
3669
3670        return zholes_size[zone_type];
3671}
3672
3673#endif
3674
3675static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
3676                unsigned long *zones_size, unsigned long *zholes_size)
3677{
3678        unsigned long realtotalpages, totalpages = 0;
3679        enum zone_type i;
3680
3681        for (i = 0; i < MAX_NR_ZONES; i++)
3682                totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
3683                                                                zones_size);
3684        pgdat->node_spanned_pages = totalpages;
3685
3686        realtotalpages = totalpages;
3687        for (i = 0; i < MAX_NR_ZONES; i++)
3688                realtotalpages -=
3689                        zone_absent_pages_in_node(pgdat->node_id, i,
3690                                                                zholes_size);
3691        pgdat->node_present_pages = realtotalpages;
3692        printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
3693                                                        realtotalpages);
3694}
3695
3696#ifndef CONFIG_SPARSEMEM
3697/*
3698 * Calculate the size of the zone->blockflags rounded to an unsigned long
3699 * Start by making sure zonesize is a multiple of pageblock_order by rounding
3700 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
3701 * round what is now in bits to nearest long in bits, then return it in
3702 * bytes.
3703 */
3704static unsigned long __init usemap_size(unsigned long zonesize)
3705{
3706        unsigned long usemapsize;
3707
3708        usemapsize = roundup(zonesize, pageblock_nr_pages);
3709        usemapsize = usemapsize >> pageblock_order;
3710        usemapsize *= NR_PAGEBLOCK_BITS;
3711        usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
3712
3713        return usemapsize / 8;
3714}
3715
3716static void __init setup_usemap(struct pglist_data *pgdat,
3717                                struct zone *zone, unsigned long zonesize)
3718{
3719        unsigned long usemapsize = usemap_size(zonesize);
3720        zone->pageblock_flags = NULL;
3721        if (usemapsize)
3722                zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
3723}
3724#else
3725static void inline setup_usemap(struct pglist_data *pgdat,
3726                                struct zone *zone, unsigned long zonesize) {}
3727#endif /* CONFIG_SPARSEMEM */
3728
3729#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
3730
3731/* Return a sensible default order for the pageblock size. */
3732static inline int pageblock_default_order(void)
3733{
3734        if (HPAGE_SHIFT > PAGE_SHIFT)
3735                return HUGETLB_PAGE_ORDER;
3736
3737        return MAX_ORDER-1;
3738}
3739
3740/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
3741static inline void __init set_pageblock_order(unsigned int order)
3742{
3743        /* Check that pageblock_nr_pages has not already been setup */
3744        if (pageblock_order)
3745                return;
3746
3747        /*
3748         * Assume the largest contiguous order of interest is a huge page.
3749         * This value may be variable depending on boot parameters on IA64
3750         */
3751        pageblock_order = order;
3752}
3753#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3754
3755/*
3756 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
3757 * and pageblock_default_order() are unused as pageblock_order is set
3758 * at compile-time. See include/linux/pageblock-flags.h for the values of
3759 * pageblock_order based on the kernel config
3760 */
3761static inline int pageblock_default_order(unsigned int order)
3762{
3763        return MAX_ORDER-1;
3764}
3765#define set_pageblock_order(x)  do {} while (0)
3766
3767#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3768
3769/*
3770 * Set up the zone data structures:
3771 *   - mark all pages reserved
3772 *   - mark all memory queues empty
3773 *   - clear the memory bitmaps
3774 */
3775static void __paginginit free_area_init_core(struct pglist_data *pgdat,
3776                unsigned long *zones_size, unsigned long *zholes_size)
3777{
3778        enum zone_type j;
3779        int nid = pgdat->node_id;
3780        unsigned long zone_start_pfn = pgdat->node_start_pfn;
3781        int ret;
3782
3783        pgdat_resize_init(pgdat);
3784        pgdat->nr_zones = 0;
3785        init_waitqueue_head(&pgdat->kswapd_wait);
3786        pgdat->kswapd_max_order = 0;
3787        pgdat_page_cgroup_init(pgdat);
3788        
3789        for (j = 0; j < MAX_NR_ZONES; j++) {
3790                struct zone *zone = pgdat->node_zones + j;
3791                unsigned long size, realsize, memmap_pages;
3792                enum lru_list l;
3793
3794                size = zone_spanned_pages_in_node(nid, j, zones_size);
3795                realsize = size - zone_absent_pages_in_node(nid, j,
3796                                                                zholes_size);
3797
3798                /*
3799                 * Adjust realsize so that it accounts for how much memory
3800                 * is used by this zone for memmap. This affects the watermark
3801                 * and per-cpu initialisations
3802                 */
3803                memmap_pages =
3804                        PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
3805                if (realsize >= memmap_pages) {
3806                        realsize -= memmap_pages;
3807                        if (memmap_pages)
3808                                printk(KERN_DEBUG
3809                                       "  %s zone: %lu pages used for memmap\n",
3810                                       zone_names[j], memmap_pages);
3811                } else
3812                        printk(KERN_WARNING
3813                                "  %s zone: %lu pages exceeds realsize %lu\n",
3814                                zone_names[j], memmap_pages, realsize);
3815
3816                /* Account for reserved pages */
3817                if (j == 0 && realsize > dma_reserve) {
3818                        realsize -= dma_reserve;
3819                        printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
3820                                        zone_names[0], dma_reserve);
3821                }
3822
3823                if (!is_highmem_idx(j))
3824                        nr_kernel_pages += realsize;
3825                nr_all_pages += realsize;
3826
3827                zone->spanned_pages = size;
3828                zone->present_pages = realsize;
3829#ifdef CONFIG_NUMA
3830                zone->node = nid;
3831                zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
3832                                                / 100;
3833                zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
3834#endif
3835                zone->name = zone_names[j];
3836                spin_lock_init(&zone->lock);
3837                spin_lock_init(&zone->lru_lock);
3838                zone_seqlock_init(zone);
3839                zone->zone_pgdat = pgdat;
3840
3841                zone->prev_priority = DEF_PRIORITY;
3842
3843                zone_pcp_init(zone);
3844                for_each_lru(l) {
3845                        INIT_LIST_HEAD(&zone->lru[l].list);
3846                        zone->reclaim_stat.nr_saved_scan[l] = 0;
3847                }
3848                zone->reclaim_stat.recent_rotated[0] = 0;
3849                zone->reclaim_stat.recent_rotated[1] = 0;
3850                zone->reclaim_stat.recent_scanned[0] = 0;
3851                zone->reclaim_stat.recent_scanned[1] = 0;
3852                zap_zone_vm_stats(zone);
3853                zone->flags = 0;
3854                if (!size)
3855                        continue;
3856
3857                set_pageblock_order(pageblock_default_order());
3858                setup_usemap(pgdat, zone, size);
3859                ret = init_currently_empty_zone(zone, zone_start_pfn,
3860                                                size, MEMMAP_EARLY);
3861                BUG_ON(ret);
3862                memmap_init(size, nid, j, zone_start_pfn);
3863                zone_start_pfn += size;
3864        }
3865}
3866
3867static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
3868{
3869        /* Skip empty nodes */
3870        if (!pgdat->node_spanned_pages)
3871                return;
3872
3873#ifdef CONFIG_FLAT_NODE_MEM_MAP
3874        /* ia64 gets its own node_mem_map, before this, without bootmem */
3875        if (!pgdat->node_mem_map) {
3876                unsigned long size, start, end;
3877                struct page *map;
3878
3879                /*
3880                 * The zone's endpoints aren't required to be MAX_ORDER
3881                 * aligned but the node_mem_map endpoints must be in order
3882                 * for the buddy allocator to function correctly.
3883                 */
3884                start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
3885                end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
3886                end = ALIGN(end, MAX_ORDER_NR_PAGES);
3887                size =  (end - start) * sizeof(struct page);
3888                map = alloc_remap(pgdat->node_id, size);
3889                if (!map)
3890                        map = alloc_bootmem_node(pgdat, size);
3891                pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
3892        }
3893#ifndef CONFIG_NEED_MULTIPLE_NODES
3894        /*
3895         * With no DISCONTIG, the global mem_map is just set as node 0's
3896         */
3897        if (pgdat == NODE_DATA(0)) {
3898                mem_map = NODE_DATA(0)->node_mem_map;
3899#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3900                if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
3901                        mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
3902#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
3903        }
3904#endif
3905#endif /* CONFIG_FLAT_NODE_MEM_MAP */
3906}
3907
3908void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
3909                unsigned long node_start_pfn, unsigned long *zholes_size)
3910{
3911        pg_data_t *pgdat = NODE_DATA(nid);
3912
3913        pgdat->node_id = nid;
3914        pgdat->node_start_pfn = node_start_pfn;
3915        calculate_node_totalpages(pgdat, zones_size, zholes_size);
3916
3917        alloc_node_mem_map(pgdat);
3918#ifdef CONFIG_FLAT_NODE_MEM_MAP
3919        printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
3920                nid, (unsigned long)pgdat,
3921                (unsigned long)pgdat->node_mem_map);
3922#endif
3923
3924        free_area_init_core(pgdat, zones_size, zholes_size);
3925}
3926
3927#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3928
3929#if MAX_NUMNODES > 1
3930/*
3931 * Figure out the number of possible node ids.
3932 */
3933static void __init setup_nr_node_ids(void)
3934{
3935        unsigned int node;
3936        unsigned int highest = 0;
3937
3938        for_each_node_mask(node, node_possible_map)
3939                highest = node;
3940        nr_node_ids = highest + 1;
3941}
3942#else
3943static inline void setup_nr_node_ids(void)
3944{
3945}
3946#endif
3947
3948/**
3949 * add_active_range - Register a range of PFNs backed by physical memory
3950 * @nid: The node ID the range resides on
3951 * @start_pfn: The start PFN of the available physical memory
3952 * @end_pfn: The end PFN of the available physical memory
3953 *
3954 * These ranges are stored in an early_node_map[] and later used by
3955 * free_area_init_nodes() to calculate zone sizes and holes. If the
3956 * range spans a memory hole, it is up to the architecture to ensure
3957 * the memory is not freed by the bootmem allocator. If possible
3958 * the range being registered will be merged with existing ranges.
3959 */
3960void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3961                                                unsigned long end_pfn)
3962{
3963        int i;
3964
3965        mminit_dprintk(MMINIT_TRACE, "memory_register",
3966                        "Entering add_active_range(%d, %#lx, %#lx) "
3967                        "%d entries of %d used\n",
3968                        nid, start_pfn, end_pfn,
3969                        nr_nodemap_entries, MAX_ACTIVE_REGIONS);
3970
3971        mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
3972
3973        /* Merge with existing active regions if possible */
3974        for (i = 0; i < nr_nodemap_entries; i++) {
3975                if (early_node_map[i].nid != nid)
3976                        continue;
3977
3978                /* Skip if an existing region covers this new one */
3979                if (start_pfn >= early_node_map[i].start_pfn &&
3980                                end_pfn <= early_node_map[i].end_pfn)
3981                        return;
3982
3983                /* Merge forward if suitable */
3984                if (start_pfn <= early_node_map[i].end_pfn &&
3985                                end_pfn > early_node_map[i].end_pfn) {
3986                        early_node_map[i].end_pfn = end_pfn;
3987                        return;
3988                }
3989
3990                /* Merge backward if suitable */
3991                if (start_pfn < early_node_map[i].end_pfn &&
3992                                end_pfn >= early_node_map[i].start_pfn) {
3993                        early_node_map[i].start_pfn = start_pfn;
3994                        return;
3995                }
3996        }
3997
3998        /* Check that early_node_map is large enough */
3999        if (i >= MAX_ACTIVE_REGIONS) {
4000                printk(KERN_CRIT "More than %d memory regions, truncating\n",
4001                                                        MAX_ACTIVE_REGIONS);
4002                return;
4003        }
4004
4005        early_node_map[i].nid = nid;
4006        early_node_map[i].start_pfn = start_pfn;
4007        early_node_map[i].end_pfn = end_pfn;
4008        nr_nodemap_entries = i + 1;
4009}
4010
4011/**
4012 * remove_active_range - Shrink an existing registered range of PFNs
4013 * @nid: The node id the range is on that should be shrunk
4014 * @start_pfn: The new PFN of the range
4015 * @end_pfn: The new PFN of the range
4016 *
4017 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
4018 * The map is kept near the end physical page range that has already been
4019 * registered. This function allows an arch to shrink an existing registered
4020 * range.
4021 */
4022void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
4023                                unsigned long end_pfn)
4024{
4025        int i, j;
4026        int removed = 0;
4027
4028        printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
4029                          nid, start_pfn, end_pfn);
4030
4031        /* Find the old active region end and shrink */
4032        for_each_active_range_index_in_nid(i, nid) {
4033                if (early_node_map[i].start_pfn >= start_pfn &&
4034                    early_node_map[i].end_pfn <= end_pfn) {
4035                        /* clear it */
4036                        early_node_map[i].start_pfn = 0;
4037                        early_node_map[i].end_pfn = 0;
4038                        removed = 1;
4039                        continue;
4040                }
4041                if (early_node_map[i].start_pfn < start_pfn &&
4042                    early_node_map[i].end_pfn > start_pfn) {
4043                        unsigned long temp_end_pfn = early_node_map[i].end_pfn;
4044                        early_node_map[i].end_pfn = start_pfn;
4045                        if (temp_end_pfn > end_pfn)
4046                                add_active_range(nid, end_pfn, temp_end_pfn);
4047                        continue;
4048                }
4049                if (early_node_map[i].start_pfn >= start_pfn &&
4050                    early_node_map[i].end_pfn > end_pfn &&
4051                    early_node_map[i].start_pfn < end_pfn) {
4052                        early_node_map[i].start_pfn = end_pfn;
4053                        continue;
4054                }
4055        }
4056
4057        if (!removed)
4058                return;
4059
4060        /* remove the blank ones */
4061        for (i = nr_nodemap_entries - 1; i > 0; i--) {
4062                if (early_node_map[i].nid != nid)
4063                        continue;
4064                if (early_node_map[i].end_pfn)
4065                        continue;
4066                /* we found it, get rid of it */
4067                for (j = i; j < nr_nodemap_entries - 1; j++)
4068                        memcpy(&early_node_map[j], &early_node_map[j+1],
4069                                sizeof(early_node_map[j]));
4070                j = nr_nodemap_entries - 1;
4071                memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
4072                nr_nodemap_entries--;
4073        }
4074}
4075
4076/**
4077 * remove_all_active_ranges - Remove all currently registered regions
4078 *
4079 * During discovery, it may be found that a table like SRAT is invalid
4080 * and an alternative discovery method must be used. This function removes
4081 * all currently registered regions.
4082 */
4083void __init remove_all_active_ranges(void)
4084{
4085        memset(early_node_map, 0, sizeof(early_node_map));
4086        nr_nodemap_entries = 0;
4087}
4088
4089/* Compare two active node_active_regions */
4090static int __init cmp_node_active_region(const void *a, const void *b)
4091{
4092        struct node_active_region *arange = (struct node_active_region *)a;
4093        struct node_active_region *brange = (struct node_active_region *)b;
4094
4095        /* Done this way to avoid overflows */
4096        if (arange->start_pfn > brange->start_pfn)
4097                return 1;
4098        if (arange->start_pfn < brange->start_pfn)
4099                return -1;
4100
4101        return 0;
4102}
4103
4104/* sort the node_map by start_pfn */
4105static void __init sort_node_map(void)
4106{
4107        sort(early_node_map, (size_t)nr_nodemap_entries,
4108                        sizeof(struct node_active_region),
4109                        cmp_node_active_region, NULL);
4110}
4111
4112/* Find the lowest pfn for a node */
4113static unsigned long __init find_min_pfn_for_node(int nid)
4114{
4115        int i;
4116        unsigned long min_pfn = ULONG_MAX;
4117
4118        /* Assuming a sorted map, the first range found has the starting pfn */
4119        for_each_active_range_index_in_nid(i, nid)
4120                min_pfn = min(min_pfn, early_node_map[i].start_pfn);
4121
4122        if (min_pfn == ULONG_MAX) {
4123                printk(KERN_WARNING
4124                        "Could not find start_pfn for node %d\n", nid);
4125                return 0;
4126        }
4127
4128        return min_pfn;
4129}
4130
4131/**
4132 * find_min_pfn_with_active_regions - Find the minimum PFN registered
4133 *
4134 * It returns the minimum PFN based on information provided via
4135 * add_active_range().
4136 */
4137unsigned long __init find_min_pfn_with_active_regions(void)
4138{
4139        return find_min_pfn_for_node(MAX_NUMNODES);
4140}
4141
4142/*
4143 * early_calculate_totalpages()
4144 * Sum pages in active regions for movable zone.
4145 * Populate N_HIGH_MEMORY for calculating usable_nodes.
4146 */
4147static unsigned long __init early_calculate_totalpages(void)
4148{
4149        int i;
4150        unsigned long totalpages = 0;
4151
4152        for (i = 0; i < nr_nodemap_entries; i++) {
4153                unsigned long pages = early_node_map[i].end_pfn -
4154                                                early_node_map[i].start_pfn;
4155                totalpages += pages;
4156                if (pages)
4157                        node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
4158        }
4159        return totalpages;
4160}
4161
4162/*
4163 * Find the PFN the Movable zone begins in each node. Kernel memory
4164 * is spread evenly between nodes as long as the nodes have enough
4165 * memory. When they don't, some nodes will have more kernelcore than
4166 * others
4167 */
4168static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
4169{
4170        int i, nid;
4171        unsigned long usable_startpfn;
4172        unsigned long kernelcore_node, kernelcore_remaining;
4173        /* save the state before borrow the nodemask */
4174        nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
4175        unsigned long totalpages = early_calculate_totalpages();
4176        int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
4177
4178        /*
4179         * If movablecore was specified, calculate what size of
4180         * kernelcore that corresponds so that memory usable for
4181         * any allocation type is evenly spread. If both kernelcore
4182         * and movablecore are specified, then the value of kernelcore
4183         * will be used for required_kernelcore if it's greater than
4184         * what movablecore would have allowed.
4185         */
4186        if (required_movablecore) {
4187                unsigned long corepages;
4188
4189                /*
4190                 * Round-up so that ZONE_MOVABLE is at least as large as what
4191                 * was requested by the user
4192                 */
4193                required_movablecore =
4194                        roundup(required_movablecore, MAX_ORDER_NR_PAGES);
4195                corepages = totalpages - required_movablecore;
4196
4197                required_kernelcore = max(required_kernelcore, corepages);
4198        }
4199
4200        /* If kernelcore was not specified, there is no ZONE_MOVABLE */
4201        if (!required_kernelcore)
4202                goto out;
4203
4204        /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
4205        find_usable_zone_for_movable();
4206        usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
4207
4208restart:
4209        /* Spread kernelcore memory as evenly as possible throughout nodes */
4210        kernelcore_node = required_kernelcore / usable_nodes;
4211        for_each_node_state(nid, N_HIGH_MEMORY) {
4212                /*
4213                 * Recalculate kernelcore_node if the division per node
4214                 * now exceeds what is necessary to satisfy the requested
4215                 * amount of memory for the kernel
4216                 */
4217                if (required_kernelcore < kernelcore_node)
4218                        kernelcore_node = required_kernelcore / usable_nodes;
4219
4220                /*
4221                 * As the map is walked, we track how much memory is usable
4222                 * by the kernel using kernelcore_remaining. When it is
4223                 * 0, the rest of the node is usable by ZONE_MOVABLE
4224                 */
4225                kernelcore_remaining = kernelcore_node;
4226
4227                /* Go through each range of PFNs within this node */
4228                for_each_active_range_index_in_nid(i, nid) {
4229                        unsigned long start_pfn, end_pfn;
4230                        unsigned long size_pages;
4231
4232                        start_pfn = max(early_node_map[i].start_pfn,
4233                                                zone_movable_pfn[nid]);
4234                        end_pfn = early_node_map[i].end_pfn;
4235                        if (start_pfn >= end_pfn)
4236                                continue;
4237
4238                        /* Account for what is only usable for kernelcore */
4239                        if (start_pfn < usable_startpfn) {
4240                                unsigned long kernel_pages;
4241                                kernel_pages = min(end_pfn, usable_startpfn)
4242                                                                - start_pfn;
4243
4244                                kernelcore_remaining -= min(kernel_pages,
4245                                                        kernelcore_remaining);
4246                                required_kernelcore -= min(kernel_pages,
4247                                                        required_kernelcore);
4248
4249                                /* Continue if range is now fully accounted */
4250                                if (end_pfn <= usable_startpfn) {
4251
4252                                        /*
4253                                         * Push zone_movable_pfn to the end so
4254                                         * that if we have to rebalance
4255                                         * kernelcore across nodes, we will
4256                                         * not double account here
4257                                         */
4258                                        zone_movable_pfn[nid] = end_pfn;
4259                                        continue;
4260                                }
4261                                start_pfn = usable_startpfn;
4262                        }
4263
4264                        /*
4265                         * The usable PFN range for ZONE_MOVABLE is from
4266                         * start_pfn->end_pfn. Calculate size_pages as the
4267                         * number of pages used as kernelcore
4268                         */
4269                        size_pages = end_pfn - start_pfn;
4270                        if (size_pages > kernelcore_remaining)
4271                                size_pages = kernelcore_remaining;
4272                        zone_movable_pfn[nid] = start_pfn + size_pages;
4273
4274                        /*
4275                         * Some kernelcore has been met, update counts and
4276                         * break if the kernelcore for this node has been
4277                         * satisified
4278                         */
4279                        required_kernelcore -= min(required_kernelcore,
4280                                                                size_pages);
4281                        kernelcore_remaining -= size_pages;
4282                        if (!kernelcore_remaining)
4283                                break;
4284                }
4285        }
4286
4287        /*
4288         * If there is still required_kernelcore, we do another pass with one
4289         * less node in the count. This will push zone_movable_pfn[nid] further
4290         * along on the nodes that still have memory until kernelcore is
4291         * satisified
4292         */
4293        usable_nodes--;
4294        if (usable_nodes && required_kernelcore > usable_nodes)
4295                goto restart;
4296
4297        /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
4298        for (nid = 0; nid < MAX_NUMNODES; nid++)
4299                zone_movable_pfn[nid] =
4300                        roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
4301
4302out:
4303        /* restore the node_state */
4304        node_states[N_HIGH_MEMORY] = saved_node_state;
4305}
4306
4307/* Any regular memory on that node ? */
4308static void check_for_regular_memory(pg_data_t *pgdat)
4309{
4310#ifdef CONFIG_HIGHMEM
4311        enum zone_type zone_type;
4312
4313        for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
4314                struct zone *zone = &pgdat->node_zones[zone_type];
4315                if (zone->present_pages)
4316                        node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
4317        }
4318#endif
4319}
4320
4321/**
4322 * free_area_init_nodes - Initialise all pg_data_t and zone data
4323 * @max_zone_pfn: an array of max PFNs for each zone
4324 *
4325 * This will call free_area_init_node() for each active node in the system.
4326 * Using the page ranges provided by add_active_range(), the size of each
4327 * zone in each node and their holes is calculated. If the maximum PFN
4328 * between two adjacent zones match, it is assumed that the zone is empty.
4329 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
4330 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
4331 * starts where the previous one ended. For example, ZONE_DMA32 starts
4332 * at arch_max_dma_pfn.
4333 */
4334void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4335{
4336        unsigned long nid;
4337        int i;
4338
4339        /* Sort early_node_map as initialisation assumes it is sorted */
4340        sort_node_map();
4341
4342        /* Record where the zone boundaries are */
4343        memset(arch_zone_lowest_possible_pfn, 0,
4344                                sizeof(arch_zone_lowest_possible_pfn));
4345        memset(arch_zone_highest_possible_pfn, 0,
4346                                sizeof(arch_zone_highest_possible_pfn));
4347        arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
4348        arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
4349        for (i = 1; i < MAX_NR_ZONES; i++) {
4350                if (i == ZONE_MOVABLE)
4351                        continue;
4352                arch_zone_lowest_possible_pfn[i] =
4353                        arch_zone_highest_possible_pfn[i-1];
4354                arch_zone_highest_possible_pfn[i] =
4355                        max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
4356        }
4357        arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
4358        arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
4359
4360        /* Find the PFNs that ZONE_MOVABLE begins at in each node */
4361        memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
4362        find_zone_movable_pfns_for_nodes(zone_movable_pfn);
4363
4364        /* Print out the zone ranges */
4365        printk("Zone PFN ranges:\n");
4366        for (i = 0; i < MAX_NR_ZONES; i++) {
4367                if (i == ZONE_MOVABLE)
4368                        continue;
4369                printk("  %-8s %0#10lx -> %0#10lx\n",
4370                                zone_names[i],
4371                                arch_zone_lowest_possible_pfn[i],
4372                                arch_zone_highest_possible_pfn[i]);
4373        }
4374
4375        /* Print out the PFNs ZONE_MOVABLE begins at in each node */
4376        printk("Movable zone start PFN for each node\n");
4377        for (i = 0; i < MAX_NUMNODES; i++) {
4378                if (zone_movable_pfn[i])
4379                        printk("  Node %d: %lu\n", i, zone_movable_pfn[i]);
4380        }
4381
4382        /* Print out the early_node_map[] */
4383        printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
4384        for (i = 0; i < nr_nodemap_entries; i++)
4385                printk("  %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
4386                                                early_node_map[i].start_pfn,
4387                                                early_node_map[i].end_pfn);
4388
4389        /* Initialise every node */
4390        mminit_verify_pageflags_layout();
4391        setup_nr_node_ids();
4392        for_each_online_node(nid) {
4393                pg_data_t *pgdat = NODE_DATA(nid);
4394                free_area_init_node(nid, NULL,
4395                                find_min_pfn_for_node(nid), NULL);
4396
4397                /* Any memory on that node */
4398                if (pgdat->node_present_pages)
4399                        node_set_state(nid, N_HIGH_MEMORY);
4400                check_for_regular_memory(pgdat);
4401        }
4402}
4403
4404static int __init cmdline_parse_core(char *p, unsigned long *core)
4405{
4406        unsigned long long coremem;
4407        if (!p)
4408                return -EINVAL;
4409
4410        coremem = memparse(p, &p);
4411        *core = coremem >> PAGE_SHIFT;
4412
4413        /* Paranoid check that UL is enough for the coremem value */
4414        WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
4415
4416        return 0;
4417}
4418
4419/*
4420 * kernelcore=size sets the amount of memory for use for allocations that
4421 * cannot be reclaimed or migrated.
4422 */
4423static int __init cmdline_parse_kernelcore(char *p)
4424{
4425        return cmdline_parse_core(p, &required_kernelcore);
4426}
4427
4428/*
4429 * movablecore=size sets the amount of memory for use for allocations that
4430 * can be reclaimed or migrated.
4431 */
4432static int __init cmdline_parse_movablecore(char *p)
4433{
4434        return cmdline_parse_core(p, &required_movablecore);
4435}
4436
4437early_param("kernelcore", cmdline_parse_kernelcore);
4438early_param("movablecore", cmdline_parse_movablecore);
4439
4440#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
4441
4442/**
4443 * set_dma_reserve - set the specified number of pages reserved in the first zone
4444 * @new_dma_reserve: The number of pages to mark reserved
4445 *
4446 * The per-cpu batchsize and zone watermarks are determined by present_pages.
4447 * In the DMA zone, a significant percentage may be consumed by kernel image
4448 * and other unfreeable allocations which can skew the watermarks badly. This
4449 * function may optionally be used to account for unfreeable pages in the
4450 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
4451 * smaller per-cpu batchsize.
4452 */
4453void __init set_dma_reserve(unsigned long new_dma_reserve)
4454{
4455        dma_reserve = new_dma_reserve;
4456}
4457
4458#ifndef CONFIG_NEED_MULTIPLE_NODES
4459struct pglist_data __refdata contig_page_data = { .bdata = &bootmem_node_data[0] };
4460EXPORT_SYMBOL(contig_page_data);
4461#endif
4462
4463void __init free_area_init(unsigned long *zones_size)
4464{
4465        free_area_init_node(0, zones_size,
4466                        __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
4467}
4468
4469static int page_alloc_cpu_notify(struct notifier_block *self,
4470                                 unsigned long action, void *hcpu)
4471{
4472        int cpu = (unsigned long)hcpu;
4473
4474        if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
4475                drain_pages(cpu);
4476
4477                /*
4478                 * Spill the event counters of the dead processor
4479                 * into the current processors event counters.
4480                 * This artificially elevates the count of the current
4481                 * processor.
4482                 */
4483                vm_events_fold_cpu(cpu);
4484
4485                /*
4486                 * Zero the differential counters of the dead processor
4487                 * so that the vm statistics are consistent.
4488                 *
4489                 * This is only okay since the processor is dead and cannot
4490                 * race with what we are doing.
4491                 */
4492                refresh_cpu_vm_stats(cpu);
4493        }
4494        return NOTIFY_OK;
4495}
4496
4497void __init page_alloc_init(void)
4498{
4499        hotcpu_notifier(page_alloc_cpu_notify, 0);
4500}
4501
4502/*
4503 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
4504 *      or min_free_kbytes changes.
4505 */
4506static void calculate_totalreserve_pages(void)
4507{
4508        struct pglist_data *pgdat;
4509        unsigned long reserve_pages = 0;
4510        enum zone_type i, j;
4511
4512        for_each_online_pgdat(pgdat) {
4513                for (i = 0; i < MAX_NR_ZONES; i++) {
4514                        struct zone *zone = pgdat->node_zones + i;
4515                        unsigned long max = 0;
4516
4517                        /* Find valid and maximum lowmem_reserve in the zone */
4518                        for (j = i; j < MAX_NR_ZONES; j++) {
4519                                if (zone->lowmem_reserve[j] > max)
4520                                        max = zone->lowmem_reserve[j];
4521                        }
4522
4523                        /* we treat the high watermark as reserved pages. */
4524                        max += high_wmark_pages(zone);
4525
4526                        if (max > zone->present_pages)
4527                                max = zone->present_pages;
4528                        reserve_pages += max;
4529                }
4530        }
4531        totalreserve_pages = reserve_pages;
4532}
4533
4534/*
4535 * setup_per_zone_lowmem_reserve - called whenever
4536 *      sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone
4537 *      has a correct pages reserved value, so an adequate number of
4538 *      pages are left in the zone after a successful __alloc_pages().
4539 */
4540static void setup_per_zone_lowmem_reserve(void)
4541{
4542        struct pglist_data *pgdat;
4543        enum zone_type j, idx;
4544
4545        for_each_online_pgdat(pgdat) {
4546                for (j = 0; j < MAX_NR_ZONES; j++) {
4547                        struct zone *zone = pgdat->node_zones + j;
4548                        unsigned long present_pages = zone->present_pages;
4549
4550                        zone->lowmem_reserve[j] = 0;
4551
4552                        idx = j;
4553                        while (idx) {
4554                                struct zone *lower_zone;
4555
4556                                idx--;
4557
4558                                if (sysctl_lowmem_reserve_ratio[idx] < 1)
4559                                        sysctl_lowmem_reserve_ratio[idx] = 1;
4560
4561                                lower_zone = pgdat->node_zones + idx;
4562                                lower_zone->lowmem_reserve[j] = present_pages /
4563                                        sysctl_lowmem_reserve_ratio[idx];
4564                                present_pages += lower_zone->present_pages;
4565                        }
4566                }
4567        }
4568
4569        /* update totalreserve_pages */
4570        calculate_totalreserve_pages();
4571}
4572
4573/**
4574 * setup_per_zone_wmarks - called when min_free_kbytes changes
4575 * or when memory is hot-{added|removed}
4576 *
4577 * Ensures that the watermark[min,low,high] values for each zone are set
4578 * correctly with respect to min_free_kbytes.
4579 */
4580void setup_per_zone_wmarks(void)
4581{
4582        unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
4583        unsigned long lowmem_pages = 0;
4584        struct zone *zone;
4585        unsigned long flags;
4586
4587        /* Calculate total number of !ZONE_HIGHMEM pages */
4588        for_each_zone(zone) {
4589                if (!is_highmem(zone))
4590                        lowmem_pages += zone->present_pages;
4591        }
4592
4593        for_each_zone(zone) {
4594                u64 tmp;
4595
4596                spin_lock_irqsave(&zone->lock, flags);
4597                tmp = (u64)pages_min * zone->present_pages;
4598                do_div(tmp, lowmem_pages);
4599                if (is_highmem(zone)) {
4600                        /*
4601                         * __GFP_HIGH and PF_MEMALLOC allocations usually don't
4602                         * need highmem pages, so cap pages_min to a small
4603                         * value here.
4604                         *
4605                         * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
4606                         * deltas controls asynch page reclaim, and so should
4607                         * not be capped for highmem.
4608                         */
4609                        int min_pages;
4610
4611                        min_pages = zone->present_pages / 1024;
4612                        if (min_pages < SWAP_CLUSTER_MAX)
4613                                min_pages = SWAP_CLUSTER_MAX;
4614                        if (min_pages > 128)
4615                                min_pages = 128;
4616                        zone->watermark[WMARK_MIN] = min_pages;
4617                } else {
4618                        /*
4619                         * If it's a lowmem zone, reserve a number of pages
4620                         * proportionate to the zone's size.
4621                         */
4622                        zone->watermark[WMARK_MIN] = tmp;
4623                }
4624
4625                zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
4626                zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
4627                setup_zone_migrate_reserve(zone);
4628                spin_unlock_irqrestore(&zone->lock, flags);
4629        }
4630
4631        /* update totalreserve_pages */
4632        calculate_totalreserve_pages();
4633}
4634
4635/*
4636 * The inactive anon list should be small enough that the VM never has to
4637 * do too much work, but large enough that each inactive page has a chance
4638 * to be referenced again before it is swapped out.
4639 *
4640 * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
4641 * INACTIVE_ANON pages on this zone's LRU, maintained by the
4642 * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
4643 * the anonymous pages are kept on the inactive list.
4644 *
4645 * total     target    max
4646 * memory    ratio     inactive anon
4647 * -------------------------------------
4648 *   10MB       1         5MB
4649 *  100MB       1        50MB
4650 *    1GB       3       250MB
4651 *   10GB      10       0.9GB
4652 *  100GB      31         3GB
4653 *    1TB     101        10GB
4654 *   10TB     320        32GB
4655 */
4656void calculate_zone_inactive_ratio(struct zone *zone)
4657{
4658        unsigned int gb, ratio;
4659
4660        /* Zone size in gigabytes */
4661        gb = zone->present_pages >> (30 - PAGE_SHIFT);
4662        if (gb)
4663                ratio = int_sqrt(10 * gb);
4664        else
4665                ratio = 1;
4666
4667        zone->inactive_ratio = ratio;
4668}
4669
4670static void __init setup_per_zone_inactive_ratio(void)
4671{
4672        struct zone *zone;
4673
4674        for_each_zone(zone)
4675                calculate_zone_inactive_ratio(zone);
4676}
4677
4678/*
4679 * Initialise min_free_kbytes.
4680 *
4681 * For small machines we want it small (128k min).  For large machines
4682 * we want it large (64MB max).  But it is not linear, because network
4683 * bandwidth does not increase linearly with machine size.  We use
4684 *
4685 *      min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
4686 *      min_free_kbytes = sqrt(lowmem_kbytes * 16)
4687 *
4688 * which yields
4689 *
4690 * 16MB:        512k
4691 * 32MB:        724k
4692 * 64MB:        1024k
4693 * 128MB:       1448k
4694 * 256MB:       2048k
4695 * 512MB:       2896k
4696 * 1024MB:      4096k
4697 * 2048MB:      5792k
4698 * 4096MB:      8192k
4699 * 8192MB:      11584k
4700 * 16384MB:     16384k
4701 */
4702static int __init init_per_zone_wmark_min(void)
4703{
4704        unsigned long lowmem_kbytes;
4705
4706        lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
4707
4708        min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
4709        if (min_free_kbytes < 128)
4710                min_free_kbytes = 128;
4711        if (min_free_kbytes > 65536)
4712                min_free_kbytes = 65536;
4713        setup_per_zone_wmarks();
4714        setup_per_zone_lowmem_reserve();
4715        setup_per_zone_inactive_ratio();
4716        return 0;
4717}
4718module_init(init_per_zone_wmark_min)
4719
4720/*
4721 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 
4722 *      that we can call two helper functions whenever min_free_kbytes
4723 *      changes.
4724 */
4725int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 
4726        void __user *buffer, size_t *length, loff_t *ppos)
4727{
4728        proc_dointvec(table, write, buffer, length, ppos);
4729        if (write)
4730                setup_per_zone_wmarks();
4731        return 0;
4732}
4733
4734#ifdef CONFIG_NUMA
4735int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
4736        void __user *buffer, size_t *length, loff_t *ppos)
4737{
4738        struct zone *zone;
4739        int rc;
4740
4741        rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
4742        if (rc)
4743                return rc;
4744
4745        for_each_zone(zone)
4746                zone->min_unmapped_pages = (zone->present_pages *
4747                                sysctl_min_unmapped_ratio) / 100;
4748        return 0;
4749}
4750
4751int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
4752        void __user *buffer, size_t *length, loff_t *ppos)
4753{
4754        struct zone *zone;
4755        int rc;
4756
4757        rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
4758        if (rc)
4759                return rc;
4760
4761        for_each_zone(zone)
4762                zone->min_slab_pages = (zone->present_pages *
4763                                sysctl_min_slab_ratio) / 100;
4764        return 0;
4765}
4766#endif
4767
4768/*
4769 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
4770 *      proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
4771 *      whenever sysctl_lowmem_reserve_ratio changes.
4772 *
4773 * The reserve ratio obviously has absolutely no relation with the
4774 * minimum watermarks. The lowmem reserve ratio can only make sense
4775 * if in function of the boot time zone sizes.
4776 */
4777int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
4778        void __user *buffer, size_t *length, loff_t *ppos)
4779{
4780        proc_dointvec_minmax(table, write, buffer, length, ppos);
4781        setup_per_zone_lowmem_reserve();
4782        return 0;
4783}
4784
4785/*
4786 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
4787 * cpu.  It is the fraction of total pages in each zone that a hot per cpu pagelist
4788 * can have before it gets flushed back to buddy allocator.
4789 */
4790
4791int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
4792        void __user *buffer, size_t *length, loff_t *ppos)
4793{
4794        struct zone *zone;
4795        unsigned int cpu;
4796        int ret;
4797
4798        ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
4799        if (!write || (ret == -EINVAL))
4800                return ret;
4801        for_each_populated_zone(zone) {
4802                for_each_online_cpu(cpu) {
4803                        unsigned long  high;
4804                        high = zone->present_pages / percpu_pagelist_fraction;
4805                        setup_pagelist_highmark(zone_pcp(zone, cpu), high);
4806                }
4807        }
4808        return 0;
4809}
4810
4811int hashdist = HASHDIST_DEFAULT;
4812
4813#ifdef CONFIG_NUMA
4814static int __init set_hashdist(char *str)
4815{
4816        if (!str)
4817                return 0;
4818        hashdist = simple_strtoul(str, &str, 0);
4819        return 1;
4820}
4821__setup("hashdist=", set_hashdist);
4822#endif
4823
4824/*
4825 * allocate a large system hash table from bootmem
4826 * - it is assumed that the hash table must contain an exact power-of-2
4827 *   quantity of entries
4828 * - limit is the number of hash buckets, not the total allocation size
4829 */
4830void *__init alloc_large_system_hash(const char *tablename,
4831                                     unsigned long bucketsize,
4832                                     unsigned long numentries,
4833                                     int scale,
4834                                     int flags,
4835                                     unsigned int *_hash_shift,
4836                                     unsigned int *_hash_mask,
4837                                     unsigned long limit)
4838{
4839        unsigned long long max = limit;
4840        unsigned long log2qty, size;
4841        void *table = NULL;
4842
4843        /* allow the kernel cmdline to have a say */
4844        if (!numentries) {
4845                /* round applicable memory size up to nearest megabyte */
4846                numentries = nr_kernel_pages;
4847                numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
4848                numentries >>= 20 - PAGE_SHIFT;
4849                numentries <<= 20 - PAGE_SHIFT;
4850
4851                /* limit to 1 bucket per 2^scale bytes of low memory */
4852                if (scale > PAGE_SHIFT)
4853                        numentries >>= (scale - PAGE_SHIFT);
4854                else
4855                        numentries <<= (PAGE_SHIFT - scale);
4856
4857                /* Make sure we've got at least a 0-order allocation.. */
4858                if (unlikely(flags & HASH_SMALL)) {
4859                        /* Makes no sense without HASH_EARLY */
4860                        WARN_ON(!(flags & HASH_EARLY));
4861                        if (!(numentries >> *_hash_shift)) {
4862                                numentries = 1UL << *_hash_shift;
4863                                BUG_ON(!numentries);
4864                        }
4865                } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
4866                        numentries = PAGE_SIZE / bucketsize;
4867        }
4868        numentries = roundup_pow_of_two(numentries);
4869
4870        /* limit allocation size to 1/16 total memory by default */
4871        if (max == 0) {
4872                max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
4873                do_div(max, bucketsize);
4874        }
4875
4876        if (numentries > max)
4877                numentries = max;
4878
4879        log2qty = ilog2(numentries);
4880
4881        do {
4882                size = bucketsize << log2qty;
4883                if (flags & HASH_EARLY)
4884                        table = alloc_bootmem_nopanic(size);
4885                else if (hashdist)
4886                        table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
4887                else {
4888                        /*
4889                         * If bucketsize is not a power-of-two, we may free
4890                         * some pages at the end of hash table which
4891                         * alloc_pages_exact() automatically does
4892                         */
4893                        if (get_order(size) < MAX_ORDER) {
4894                                table = alloc_pages_exact(size, GFP_ATOMIC);
4895                                kmemleak_alloc(table, size, 1, GFP_ATOMIC);
4896                        }
4897                }
4898        } while (!table && size > PAGE_SIZE && --log2qty);
4899
4900        if (!table)
4901                panic("Failed to allocate %s hash table\n", tablename);
4902
4903        printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n",
4904               tablename,
4905               (1U << log2qty),
4906               ilog2(size) - PAGE_SHIFT,
4907               size);
4908
4909        if (_hash_shift)
4910                *_hash_shift = log2qty;
4911        if (_hash_mask)
4912                *_hash_mask = (1 << log2qty) - 1;
4913
4914        return table;
4915}
4916
4917/* Return a pointer to the bitmap storing bits affecting a block of pages */
4918static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
4919                                                        unsigned long pfn)
4920{
4921#ifdef CONFIG_SPARSEMEM
4922        return __pfn_to_section(pfn)->pageblock_flags;
4923#else
4924        return zone->pageblock_flags;
4925#endif /* CONFIG_SPARSEMEM */
4926}
4927
4928static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
4929{
4930#ifdef CONFIG_SPARSEMEM
4931        pfn &= (PAGES_PER_SECTION-1);
4932        return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4933#else
4934        pfn = pfn - zone->zone_start_pfn;
4935        return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4936#endif /* CONFIG_SPARSEMEM */
4937}
4938
4939/**
4940 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
4941 * @page: The page within the block of interest
4942 * @start_bitidx: The first bit of interest to retrieve
4943 * @end_bitidx: The last bit of interest
4944 * returns pageblock_bits flags
4945 */
4946unsigned long get_pageblock_flags_group(struct page *page,
4947                                        int start_bitidx, int end_bitidx)
4948{
4949        struct zone *zone;
4950        unsigned long *bitmap;
4951        unsigned long pfn, bitidx;
4952        unsigned long flags = 0;
4953        unsigned long value = 1;
4954
4955        zone = page_zone(page);
4956        pfn = page_to_pfn(page);
4957        bitmap = get_pageblock_bitmap(zone, pfn);
4958        bitidx = pfn_to_bitidx(zone, pfn);
4959
4960        for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4961                if (test_bit(bitidx + start_bitidx, bitmap))
4962                        flags |= value;
4963
4964        return flags;
4965}
4966
4967/**
4968 * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
4969 * @page: The page within the block of interest
4970 * @start_bitidx: The first bit of interest
4971 * @end_bitidx: The last bit of interest
4972 * @flags: The flags to set
4973 */
4974void set_pageblock_flags_group(struct page *page, unsigned long flags,
4975                                        int start_bitidx, int end_bitidx)
4976{
4977        struct zone *zone;
4978        unsigned long *bitmap;
4979        unsigned long pfn, bitidx;
4980        unsigned long value = 1;
4981
4982        zone = page_zone(page);
4983        pfn = page_to_pfn(page);
4984        bitmap = get_pageblock_bitmap(zone, pfn);
4985        bitidx = pfn_to_bitidx(zone, pfn);
4986        VM_BUG_ON(pfn < zone->zone_start_pfn);
4987        VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
4988
4989        for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4990                if (flags & value)
4991                        __set_bit(bitidx + start_bitidx, bitmap);
4992                else
4993                        __clear_bit(bitidx + start_bitidx, bitmap);
4994}
4995
4996/*
4997 * This is designed as sub function...plz see page_isolation.c also.
4998 * set/clear page block's type to be ISOLATE.
4999 * page allocater never alloc memory from ISOLATE block.
5000 */
5001
5002int set_migratetype_isolate(struct page *page)
5003{
5004        struct zone *zone;
5005        unsigned long flags;
5006        int ret = -EBUSY;
5007        int zone_idx;
5008
5009        zone = page_zone(page);
5010        zone_idx = zone_idx(zone);
5011        spin_lock_irqsave(&zone->lock, flags);
5012        /*
5013         * In future, more migrate types will be able to be isolation target.
5014         */
5015        if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE &&
5016            zone_idx != ZONE_MOVABLE)
5017                goto out;
5018        set_pageblock_migratetype(page, MIGRATE_ISOLATE);
5019        move_freepages_block(zone, page, MIGRATE_ISOLATE);
5020        ret = 0;
5021out:
5022        spin_unlock_irqrestore(&zone->lock, flags);
5023        if (!ret)
5024                drain_all_pages();
5025        return ret;
5026}
5027
5028void unset_migratetype_isolate(struct page *page)
5029{
5030        struct zone *zone;
5031        unsigned long flags;
5032        zone = page_zone(page);
5033        spin_lock_irqsave(&zone->lock, flags);
5034        if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
5035                goto out;
5036        set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5037        move_freepages_block(zone, page, MIGRATE_MOVABLE);
5038out:
5039        spin_unlock_irqrestore(&zone->lock, flags);
5040}
5041
5042#ifdef CONFIG_MEMORY_HOTREMOVE
5043/*
5044 * All pages in the range must be isolated before calling this.
5045 */
5046void
5047__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
5048{
5049        struct page *page;
5050        struct zone *zone;
5051        int order, i;
5052        unsigned long pfn;
5053        unsigned long flags;
5054        /* find the first valid pfn */
5055        for (pfn = start_pfn; pfn < end_pfn; pfn++)
5056                if (pfn_valid(pfn))
5057                        break;
5058        if (pfn == end_pfn)
5059                return;
5060        zone = page_zone(pfn_to_page(pfn));
5061        spin_lock_irqsave(&zone->lock, flags);
5062        pfn = start_pfn;
5063        while (pfn < end_pfn) {
5064                if (!pfn_valid(pfn)) {
5065                        pfn++;
5066                        continue;
5067                }
5068                page = pfn_to_page(pfn);
5069                BUG_ON(page_count(page));
5070                BUG_ON(!PageBuddy(page));
5071                order = page_order(page);
5072#ifdef CONFIG_DEBUG_VM
5073                printk(KERN_INFO "remove from free list %lx %d %lx\n",
5074                       pfn, 1 << order, end_pfn);
5075#endif
5076                list_del(&page->lru);
5077                rmv_page_order(page);
5078                zone->free_area[order].nr_free--;
5079                __mod_zone_page_state(zone, NR_FREE_PAGES,
5080                                      - (1UL << order));
5081                for (i = 0; i < (1 << order); i++)
5082                        SetPageReserved((page+i));
5083                pfn += (1 << order);
5084        }
5085        spin_unlock_irqrestore(&zone->lock, flags);
5086}
5087#endif
5088