linux/mm/page_alloc.c
<<
>>
Prefs
   1/*
   2 *  linux/mm/page_alloc.c
   3 *
   4 *  Manages the free list, the system allocates free pages here.
   5 *  Note that kmalloc() lives in slab.c
   6 *
   7 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   8 *  Swap reorganised 29.12.95, Stephen Tweedie
   9 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  10 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
  11 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
  12 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
  13 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
  14 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
  15 */
  16
  17#include <linux/stddef.h>
  18#include <linux/mm.h>
  19#include <linux/swap.h>
  20#include <linux/interrupt.h>
  21#include <linux/pagemap.h>
  22#include <linux/bootmem.h>
  23#include <linux/compiler.h>
  24#include <linux/kernel.h>
  25#include <linux/module.h>
  26#include <linux/suspend.h>
  27#include <linux/pagevec.h>
  28#include <linux/blkdev.h>
  29#include <linux/slab.h>
  30#include <linux/oom.h>
  31#include <linux/notifier.h>
  32#include <linux/topology.h>
  33#include <linux/sysctl.h>
  34#include <linux/cpu.h>
  35#include <linux/cpuset.h>
  36#include <linux/memory_hotplug.h>
  37#include <linux/nodemask.h>
  38#include <linux/vmalloc.h>
  39#include <linux/mempolicy.h>
  40#include <linux/stop_machine.h>
  41#include <linux/sort.h>
  42#include <linux/pfn.h>
  43#include <linux/backing-dev.h>
  44#include <linux/fault-inject.h>
  45#include <linux/page-isolation.h>
  46
  47#include <asm/tlbflush.h>
  48#include <asm/div64.h>
  49#include "internal.h"
  50
  51/*
  52 * Array of node states.
  53 */
  54nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
  55        [N_POSSIBLE] = NODE_MASK_ALL,
  56        [N_ONLINE] = { { [0] = 1UL } },
  57#ifndef CONFIG_NUMA
  58        [N_NORMAL_MEMORY] = { { [0] = 1UL } },
  59#ifdef CONFIG_HIGHMEM
  60        [N_HIGH_MEMORY] = { { [0] = 1UL } },
  61#endif
  62        [N_CPU] = { { [0] = 1UL } },
  63#endif  /* NUMA */
  64};
  65EXPORT_SYMBOL(node_states);
  66
  67unsigned long totalram_pages __read_mostly;
  68unsigned long totalreserve_pages __read_mostly;
  69long nr_swap_pages;
  70int percpu_pagelist_fraction;
  71
  72#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
  73int pageblock_order __read_mostly;
  74#endif
  75
  76static void __free_pages_ok(struct page *page, unsigned int order);
  77
  78/*
  79 * results with 256, 32 in the lowmem_reserve sysctl:
  80 *      1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
  81 *      1G machine -> (16M dma, 784M normal, 224M high)
  82 *      NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
  83 *      HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
  84 *      HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
  85 *
  86 * TBD: should special case ZONE_DMA32 machines here - in those we normally
  87 * don't need any ZONE_NORMAL reservation
  88 */
  89int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
  90#ifdef CONFIG_ZONE_DMA
  91         256,
  92#endif
  93#ifdef CONFIG_ZONE_DMA32
  94         256,
  95#endif
  96#ifdef CONFIG_HIGHMEM
  97         32,
  98#endif
  99         32,
 100};
 101
 102EXPORT_SYMBOL(totalram_pages);
 103
 104static char * const zone_names[MAX_NR_ZONES] = {
 105#ifdef CONFIG_ZONE_DMA
 106         "DMA",
 107#endif
 108#ifdef CONFIG_ZONE_DMA32
 109         "DMA32",
 110#endif
 111         "Normal",
 112#ifdef CONFIG_HIGHMEM
 113         "HighMem",
 114#endif
 115         "Movable",
 116};
 117
 118int min_free_kbytes = 1024;
 119
 120unsigned long __meminitdata nr_kernel_pages;
 121unsigned long __meminitdata nr_all_pages;
 122static unsigned long __meminitdata dma_reserve;
 123
 124#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
 125  /*
 126   * MAX_ACTIVE_REGIONS determines the maximum number of distinct
 127   * ranges of memory (RAM) that may be registered with add_active_range().
 128   * Ranges passed to add_active_range() will be merged if possible
 129   * so the number of times add_active_range() can be called is
 130   * related to the number of nodes and the number of holes
 131   */
 132  #ifdef CONFIG_MAX_ACTIVE_REGIONS
 133    /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
 134    #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
 135  #else
 136    #if MAX_NUMNODES >= 32
 137      /* If there can be many nodes, allow up to 50 holes per node */
 138      #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
 139    #else
 140      /* By default, allow up to 256 distinct regions */
 141      #define MAX_ACTIVE_REGIONS 256
 142    #endif
 143  #endif
 144
 145  static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
 146  static int __meminitdata nr_nodemap_entries;
 147  static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
 148  static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
 149#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
 150  static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES];
 151  static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES];
 152#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
 153  unsigned long __initdata required_kernelcore;
 154  static unsigned long __initdata required_movablecore;
 155  unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
 156
 157  /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
 158  int movable_zone;
 159  EXPORT_SYMBOL(movable_zone);
 160#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
 161
 162#if MAX_NUMNODES > 1
 163int nr_node_ids __read_mostly = MAX_NUMNODES;
 164EXPORT_SYMBOL(nr_node_ids);
 165#endif
 166
 167int page_group_by_mobility_disabled __read_mostly;
 168
 169static void set_pageblock_migratetype(struct page *page, int migratetype)
 170{
 171        set_pageblock_flags_group(page, (unsigned long)migratetype,
 172                                        PB_migrate, PB_migrate_end);
 173}
 174
 175#ifdef CONFIG_DEBUG_VM
 176static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
 177{
 178        int ret = 0;
 179        unsigned seq;
 180        unsigned long pfn = page_to_pfn(page);
 181
 182        do {
 183                seq = zone_span_seqbegin(zone);
 184                if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
 185                        ret = 1;
 186                else if (pfn < zone->zone_start_pfn)
 187                        ret = 1;
 188        } while (zone_span_seqretry(zone, seq));
 189
 190        return ret;
 191}
 192
 193static int page_is_consistent(struct zone *zone, struct page *page)
 194{
 195        if (!pfn_valid_within(page_to_pfn(page)))
 196                return 0;
 197        if (zone != page_zone(page))
 198                return 0;
 199
 200        return 1;
 201}
 202/*
 203 * Temporary debugging check for pages not lying within a given zone.
 204 */
 205static int bad_range(struct zone *zone, struct page *page)
 206{
 207        if (page_outside_zone_boundaries(zone, page))
 208                return 1;
 209        if (!page_is_consistent(zone, page))
 210                return 1;
 211
 212        return 0;
 213}
 214#else
 215static inline int bad_range(struct zone *zone, struct page *page)
 216{
 217        return 0;
 218}
 219#endif
 220
 221static void bad_page(struct page *page)
 222{
 223        printk(KERN_EMERG "Bad page state in process '%s'\n"
 224                KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
 225                KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
 226                KERN_EMERG "Backtrace:\n",
 227                current->comm, page, (int)(2*sizeof(unsigned long)),
 228                (unsigned long)page->flags, page->mapping,
 229                page_mapcount(page), page_count(page));
 230        dump_stack();
 231        page->flags &= ~(1 << PG_lru    |
 232                        1 << PG_private |
 233                        1 << PG_locked  |
 234                        1 << PG_active  |
 235                        1 << PG_dirty   |
 236                        1 << PG_reclaim |
 237                        1 << PG_slab    |
 238                        1 << PG_swapcache |
 239                        1 << PG_writeback |
 240                        1 << PG_buddy );
 241        set_page_count(page, 0);
 242        reset_page_mapcount(page);
 243        page->mapping = NULL;
 244        add_taint(TAINT_BAD_PAGE);
 245}
 246
 247/*
 248 * Higher-order pages are called "compound pages".  They are structured thusly:
 249 *
 250 * The first PAGE_SIZE page is called the "head page".
 251 *
 252 * The remaining PAGE_SIZE pages are called "tail pages".
 253 *
 254 * All pages have PG_compound set.  All pages have their ->private pointing at
 255 * the head page (even the head page has this).
 256 *
 257 * The first tail page's ->lru.next holds the address of the compound page's
 258 * put_page() function.  Its ->lru.prev holds the order of allocation.
 259 * This usage means that zero-order pages may not be compound.
 260 */
 261
 262static void free_compound_page(struct page *page)
 263{
 264        __free_pages_ok(page, compound_order(page));
 265}
 266
 267static void prep_compound_page(struct page *page, unsigned long order)
 268{
 269        int i;
 270        int nr_pages = 1 << order;
 271
 272        set_compound_page_dtor(page, free_compound_page);
 273        set_compound_order(page, order);
 274        __SetPageHead(page);
 275        for (i = 1; i < nr_pages; i++) {
 276                struct page *p = page + i;
 277
 278                __SetPageTail(p);
 279                p->first_page = page;
 280        }
 281}
 282
 283static void destroy_compound_page(struct page *page, unsigned long order)
 284{
 285        int i;
 286        int nr_pages = 1 << order;
 287
 288        if (unlikely(compound_order(page) != order))
 289                bad_page(page);
 290
 291        if (unlikely(!PageHead(page)))
 292                        bad_page(page);
 293        __ClearPageHead(page);
 294        for (i = 1; i < nr_pages; i++) {
 295                struct page *p = page + i;
 296
 297                if (unlikely(!PageTail(p) |
 298                                (p->first_page != page)))
 299                        bad_page(page);
 300                __ClearPageTail(p);
 301        }
 302}
 303
 304static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
 305{
 306        int i;
 307
 308        /*
 309         * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
 310         * and __GFP_HIGHMEM from hard or soft interrupt context.
 311         */
 312        VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
 313        for (i = 0; i < (1 << order); i++)
 314                clear_highpage(page + i);
 315}
 316
 317static inline void set_page_order(struct page *page, int order)
 318{
 319        set_page_private(page, order);
 320        __SetPageBuddy(page);
 321}
 322
 323static inline void rmv_page_order(struct page *page)
 324{
 325        __ClearPageBuddy(page);
 326        set_page_private(page, 0);
 327}
 328
 329/*
 330 * Locate the struct page for both the matching buddy in our
 331 * pair (buddy1) and the combined O(n+1) page they form (page).
 332 *
 333 * 1) Any buddy B1 will have an order O twin B2 which satisfies
 334 * the following equation:
 335 *     B2 = B1 ^ (1 << O)
 336 * For example, if the starting buddy (buddy2) is #8 its order
 337 * 1 buddy is #10:
 338 *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
 339 *
 340 * 2) Any buddy B will have an order O+1 parent P which
 341 * satisfies the following equation:
 342 *     P = B & ~(1 << O)
 343 *
 344 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
 345 */
 346static inline struct page *
 347__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
 348{
 349        unsigned long buddy_idx = page_idx ^ (1 << order);
 350
 351        return page + (buddy_idx - page_idx);
 352}
 353
 354static inline unsigned long
 355__find_combined_index(unsigned long page_idx, unsigned int order)
 356{
 357        return (page_idx & ~(1 << order));
 358}
 359
 360/*
 361 * This function checks whether a page is free && is the buddy
 362 * we can do coalesce a page and its buddy if
 363 * (a) the buddy is not in a hole &&
 364 * (b) the buddy is in the buddy system &&
 365 * (c) a page and its buddy have the same order &&
 366 * (d) a page and its buddy are in the same zone.
 367 *
 368 * For recording whether a page is in the buddy system, we use PG_buddy.
 369 * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
 370 *
 371 * For recording page's order, we use page_private(page).
 372 */
 373static inline int page_is_buddy(struct page *page, struct page *buddy,
 374                                                                int order)
 375{
 376        if (!pfn_valid_within(page_to_pfn(buddy)))
 377                return 0;
 378
 379        if (page_zone_id(page) != page_zone_id(buddy))
 380                return 0;
 381
 382        if (PageBuddy(buddy) && page_order(buddy) == order) {
 383                BUG_ON(page_count(buddy) != 0);
 384                return 1;
 385        }
 386        return 0;
 387}
 388
 389/*
 390 * Freeing function for a buddy system allocator.
 391 *
 392 * The concept of a buddy system is to maintain direct-mapped table
 393 * (containing bit values) for memory blocks of various "orders".
 394 * The bottom level table contains the map for the smallest allocatable
 395 * units of memory (here, pages), and each level above it describes
 396 * pairs of units from the levels below, hence, "buddies".
 397 * At a high level, all that happens here is marking the table entry
 398 * at the bottom level available, and propagating the changes upward
 399 * as necessary, plus some accounting needed to play nicely with other
 400 * parts of the VM system.
 401 * At each level, we keep a list of pages, which are heads of continuous
 402 * free pages of length of (1 << order) and marked with PG_buddy. Page's
 403 * order is recorded in page_private(page) field.
 404 * So when we are allocating or freeing one, we can derive the state of the
 405 * other.  That is, if we allocate a small block, and both were   
 406 * free, the remainder of the region must be split into blocks.   
 407 * If a block is freed, and its buddy is also free, then this
 408 * triggers coalescing into a block of larger size.            
 409 *
 410 * -- wli
 411 */
 412
 413static inline void __free_one_page(struct page *page,
 414                struct zone *zone, unsigned int order)
 415{
 416        unsigned long page_idx;
 417        int order_size = 1 << order;
 418        int migratetype = get_pageblock_migratetype(page);
 419
 420        if (unlikely(PageCompound(page)))
 421                destroy_compound_page(page, order);
 422
 423        page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
 424
 425        VM_BUG_ON(page_idx & (order_size - 1));
 426        VM_BUG_ON(bad_range(zone, page));
 427
 428        __mod_zone_page_state(zone, NR_FREE_PAGES, order_size);
 429        while (order < MAX_ORDER-1) {
 430                unsigned long combined_idx;
 431                struct page *buddy;
 432
 433                buddy = __page_find_buddy(page, page_idx, order);
 434                if (!page_is_buddy(page, buddy, order))
 435                        break;          /* Move the buddy up one level. */
 436
 437                list_del(&buddy->lru);
 438                zone->free_area[order].nr_free--;
 439                rmv_page_order(buddy);
 440                combined_idx = __find_combined_index(page_idx, order);
 441                page = page + (combined_idx - page_idx);
 442                page_idx = combined_idx;
 443                order++;
 444        }
 445        set_page_order(page, order);
 446        list_add(&page->lru,
 447                &zone->free_area[order].free_list[migratetype]);
 448        zone->free_area[order].nr_free++;
 449}
 450
 451static inline int free_pages_check(struct page *page)
 452{
 453        if (unlikely(page_mapcount(page) |
 454                (page->mapping != NULL)  |
 455                (page_count(page) != 0)  |
 456                (page->flags & (
 457                        1 << PG_lru     |
 458                        1 << PG_private |
 459                        1 << PG_locked  |
 460                        1 << PG_active  |
 461                        1 << PG_slab    |
 462                        1 << PG_swapcache |
 463                        1 << PG_writeback |
 464                        1 << PG_reserved |
 465                        1 << PG_buddy ))))
 466                bad_page(page);
 467        if (PageDirty(page))
 468                __ClearPageDirty(page);
 469        /*
 470         * For now, we report if PG_reserved was found set, but do not
 471         * clear it, and do not free the page.  But we shall soon need
 472         * to do more, for when the ZERO_PAGE count wraps negative.
 473         */
 474        return PageReserved(page);
 475}
 476
 477/*
 478 * Frees a list of pages. 
 479 * Assumes all pages on list are in same zone, and of same order.
 480 * count is the number of pages to free.
 481 *
 482 * If the zone was previously in an "all pages pinned" state then look to
 483 * see if this freeing clears that state.
 484 *
 485 * And clear the zone's pages_scanned counter, to hold off the "all pages are
 486 * pinned" detection logic.
 487 */
 488static void free_pages_bulk(struct zone *zone, int count,
 489                                        struct list_head *list, int order)
 490{
 491        spin_lock(&zone->lock);
 492        zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
 493        zone->pages_scanned = 0;
 494        while (count--) {
 495                struct page *page;
 496
 497                VM_BUG_ON(list_empty(list));
 498                page = list_entry(list->prev, struct page, lru);
 499                /* have to delete it as __free_one_page list manipulates */
 500                list_del(&page->lru);
 501                __free_one_page(page, zone, order);
 502        }
 503        spin_unlock(&zone->lock);
 504}
 505
 506static void free_one_page(struct zone *zone, struct page *page, int order)
 507{
 508        spin_lock(&zone->lock);
 509        zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
 510        zone->pages_scanned = 0;
 511        __free_one_page(page, zone, order);
 512        spin_unlock(&zone->lock);
 513}
 514
 515static void __free_pages_ok(struct page *page, unsigned int order)
 516{
 517        unsigned long flags;
 518        int i;
 519        int reserved = 0;
 520
 521        for (i = 0 ; i < (1 << order) ; ++i)
 522                reserved += free_pages_check(page + i);
 523        if (reserved)
 524                return;
 525
 526        if (!PageHighMem(page))
 527                debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
 528        arch_free_page(page, order);
 529        kernel_map_pages(page, 1 << order, 0);
 530
 531        local_irq_save(flags);
 532        __count_vm_events(PGFREE, 1 << order);
 533        free_one_page(page_zone(page), page, order);
 534        local_irq_restore(flags);
 535}
 536
 537/*
 538 * permit the bootmem allocator to evade page validation on high-order frees
 539 */
 540void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
 541{
 542        if (order == 0) {
 543                __ClearPageReserved(page);
 544                set_page_count(page, 0);
 545                set_page_refcounted(page);
 546                __free_page(page);
 547        } else {
 548                int loop;
 549
 550                prefetchw(page);
 551                for (loop = 0; loop < BITS_PER_LONG; loop++) {
 552                        struct page *p = &page[loop];
 553
 554                        if (loop + 1 < BITS_PER_LONG)
 555                                prefetchw(p + 1);
 556                        __ClearPageReserved(p);
 557                        set_page_count(p, 0);
 558                }
 559
 560                set_page_refcounted(page);
 561                __free_pages(page, order);
 562        }
 563}
 564
 565
 566/*
 567 * The order of subdivision here is critical for the IO subsystem.
 568 * Please do not alter this order without good reasons and regression
 569 * testing. Specifically, as large blocks of memory are subdivided,
 570 * the order in which smaller blocks are delivered depends on the order
 571 * they're subdivided in this function. This is the primary factor
 572 * influencing the order in which pages are delivered to the IO
 573 * subsystem according to empirical testing, and this is also justified
 574 * by considering the behavior of a buddy system containing a single
 575 * large block of memory acted on by a series of small allocations.
 576 * This behavior is a critical factor in sglist merging's success.
 577 *
 578 * -- wli
 579 */
 580static inline void expand(struct zone *zone, struct page *page,
 581        int low, int high, struct free_area *area,
 582        int migratetype)
 583{
 584        unsigned long size = 1 << high;
 585
 586        while (high > low) {
 587                area--;
 588                high--;
 589                size >>= 1;
 590                VM_BUG_ON(bad_range(zone, &page[size]));
 591                list_add(&page[size].lru, &area->free_list[migratetype]);
 592                area->nr_free++;
 593                set_page_order(&page[size], high);
 594        }
 595}
 596
 597/*
 598 * This page is about to be returned from the page allocator
 599 */
 600static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
 601{
 602        if (unlikely(page_mapcount(page) |
 603                (page->mapping != NULL)  |
 604                (page_count(page) != 0)  |
 605                (page->flags & (
 606                        1 << PG_lru     |
 607                        1 << PG_private |
 608                        1 << PG_locked  |
 609                        1 << PG_active  |
 610                        1 << PG_dirty   |
 611                        1 << PG_slab    |
 612                        1 << PG_swapcache |
 613                        1 << PG_writeback |
 614                        1 << PG_reserved |
 615                        1 << PG_buddy ))))
 616                bad_page(page);
 617
 618        /*
 619         * For now, we report if PG_reserved was found set, but do not
 620         * clear it, and do not allocate the page: as a safety net.
 621         */
 622        if (PageReserved(page))
 623                return 1;
 624
 625        page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_readahead |
 626                        1 << PG_referenced | 1 << PG_arch_1 |
 627                        1 << PG_owner_priv_1 | 1 << PG_mappedtodisk);
 628        set_page_private(page, 0);
 629        set_page_refcounted(page);
 630
 631        arch_alloc_page(page, order);
 632        kernel_map_pages(page, 1 << order, 1);
 633
 634        if (gfp_flags & __GFP_ZERO)
 635                prep_zero_page(page, order, gfp_flags);
 636
 637        if (order && (gfp_flags & __GFP_COMP))
 638                prep_compound_page(page, order);
 639
 640        return 0;
 641}
 642
 643/*
 644 * Go through the free lists for the given migratetype and remove
 645 * the smallest available page from the freelists
 646 */
 647static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
 648                                                int migratetype)
 649{
 650        unsigned int current_order;
 651        struct free_area * area;
 652        struct page *page;
 653
 654        /* Find a page of the appropriate size in the preferred list */
 655        for (current_order = order; current_order < MAX_ORDER; ++current_order) {
 656                area = &(zone->free_area[current_order]);
 657                if (list_empty(&area->free_list[migratetype]))
 658                        continue;
 659
 660                page = list_entry(area->free_list[migratetype].next,
 661                                                        struct page, lru);
 662                list_del(&page->lru);
 663                rmv_page_order(page);
 664                area->nr_free--;
 665                __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
 666                expand(zone, page, order, current_order, area, migratetype);
 667                return page;
 668        }
 669
 670        return NULL;
 671}
 672
 673
 674/*
 675 * This array describes the order lists are fallen back to when
 676 * the free lists for the desirable migrate type are depleted
 677 */
 678static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
 679        [MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_RESERVE },
 680        [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_RESERVE },
 681        [MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
 682        [MIGRATE_RESERVE]     = { MIGRATE_RESERVE,     MIGRATE_RESERVE,   MIGRATE_RESERVE }, /* Never used */
 683};
 684
 685/*
 686 * Move the free pages in a range to the free lists of the requested type.
 687 * Note that start_page and end_pages are not aligned on a pageblock
 688 * boundary. If alignment is required, use move_freepages_block()
 689 */
 690int move_freepages(struct zone *zone,
 691                        struct page *start_page, struct page *end_page,
 692                        int migratetype)
 693{
 694        struct page *page;
 695        unsigned long order;
 696        int pages_moved = 0;
 697
 698#ifndef CONFIG_HOLES_IN_ZONE
 699        /*
 700         * page_zone is not safe to call in this context when
 701         * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
 702         * anyway as we check zone boundaries in move_freepages_block().
 703         * Remove at a later date when no bug reports exist related to
 704         * grouping pages by mobility
 705         */
 706        BUG_ON(page_zone(start_page) != page_zone(end_page));
 707#endif
 708
 709        for (page = start_page; page <= end_page;) {
 710                if (!pfn_valid_within(page_to_pfn(page))) {
 711                        page++;
 712                        continue;
 713                }
 714
 715                if (!PageBuddy(page)) {
 716                        page++;
 717                        continue;
 718                }
 719
 720                order = page_order(page);
 721                list_del(&page->lru);
 722                list_add(&page->lru,
 723                        &zone->free_area[order].free_list[migratetype]);
 724                page += 1 << order;
 725                pages_moved += 1 << order;
 726        }
 727
 728        return pages_moved;
 729}
 730
 731int move_freepages_block(struct zone *zone, struct page *page, int migratetype)
 732{
 733        unsigned long start_pfn, end_pfn;
 734        struct page *start_page, *end_page;
 735
 736        start_pfn = page_to_pfn(page);
 737        start_pfn = start_pfn & ~(pageblock_nr_pages-1);
 738        start_page = pfn_to_page(start_pfn);
 739        end_page = start_page + pageblock_nr_pages - 1;
 740        end_pfn = start_pfn + pageblock_nr_pages - 1;
 741
 742        /* Do not cross zone boundaries */
 743        if (start_pfn < zone->zone_start_pfn)
 744                start_page = page;
 745        if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
 746                return 0;
 747
 748        return move_freepages(zone, start_page, end_page, migratetype);
 749}
 750
 751/* Remove an element from the buddy allocator from the fallback list */
 752static struct page *__rmqueue_fallback(struct zone *zone, int order,
 753                                                int start_migratetype)
 754{
 755        struct free_area * area;
 756        int current_order;
 757        struct page *page;
 758        int migratetype, i;
 759
 760        /* Find the largest possible block of pages in the other list */
 761        for (current_order = MAX_ORDER-1; current_order >= order;
 762                                                --current_order) {
 763                for (i = 0; i < MIGRATE_TYPES - 1; i++) {
 764                        migratetype = fallbacks[start_migratetype][i];
 765
 766                        /* MIGRATE_RESERVE handled later if necessary */
 767                        if (migratetype == MIGRATE_RESERVE)
 768                                continue;
 769
 770                        area = &(zone->free_area[current_order]);
 771                        if (list_empty(&area->free_list[migratetype]))
 772                                continue;
 773
 774                        page = list_entry(area->free_list[migratetype].next,
 775                                        struct page, lru);
 776                        area->nr_free--;
 777
 778                        /*
 779                         * If breaking a large block of pages, move all free
 780                         * pages to the preferred allocation list. If falling
 781                         * back for a reclaimable kernel allocation, be more
 782                         * agressive about taking ownership of free pages
 783                         */
 784                        if (unlikely(current_order >= (pageblock_order >> 1)) ||
 785                                        start_migratetype == MIGRATE_RECLAIMABLE) {
 786                                unsigned long pages;
 787                                pages = move_freepages_block(zone, page,
 788                                                                start_migratetype);
 789
 790                                /* Claim the whole block if over half of it is free */
 791                                if (pages >= (1 << (pageblock_order-1)))
 792                                        set_pageblock_migratetype(page,
 793                                                                start_migratetype);
 794
 795                                migratetype = start_migratetype;
 796                        }
 797
 798                        /* Remove the page from the freelists */
 799                        list_del(&page->lru);
 800                        rmv_page_order(page);
 801                        __mod_zone_page_state(zone, NR_FREE_PAGES,
 802                                                        -(1UL << order));
 803
 804                        if (current_order == pageblock_order)
 805                                set_pageblock_migratetype(page,
 806                                                        start_migratetype);
 807
 808                        expand(zone, page, order, current_order, area, migratetype);
 809                        return page;
 810                }
 811        }
 812
 813        /* Use MIGRATE_RESERVE rather than fail an allocation */
 814        return __rmqueue_smallest(zone, order, MIGRATE_RESERVE);
 815}
 816
 817/*
 818 * Do the hard work of removing an element from the buddy allocator.
 819 * Call me with the zone->lock already held.
 820 */
 821static struct page *__rmqueue(struct zone *zone, unsigned int order,
 822                                                int migratetype)
 823{
 824        struct page *page;
 825
 826        page = __rmqueue_smallest(zone, order, migratetype);
 827
 828        if (unlikely(!page))
 829                page = __rmqueue_fallback(zone, order, migratetype);
 830
 831        return page;
 832}
 833
 834/* 
 835 * Obtain a specified number of elements from the buddy allocator, all under
 836 * a single hold of the lock, for efficiency.  Add them to the supplied list.
 837 * Returns the number of new pages which were placed at *list.
 838 */
 839static int rmqueue_bulk(struct zone *zone, unsigned int order, 
 840                        unsigned long count, struct list_head *list,
 841                        int migratetype)
 842{
 843        int i;
 844        
 845        spin_lock(&zone->lock);
 846        for (i = 0; i < count; ++i) {
 847                struct page *page = __rmqueue(zone, order, migratetype);
 848                if (unlikely(page == NULL))
 849                        break;
 850
 851                /*
 852                 * Split buddy pages returned by expand() are received here
 853                 * in physical page order. The page is added to the callers and
 854                 * list and the list head then moves forward. From the callers
 855                 * perspective, the linked list is ordered by page number in
 856                 * some conditions. This is useful for IO devices that can
 857                 * merge IO requests if the physical pages are ordered
 858                 * properly.
 859                 */
 860                list_add(&page->lru, list);
 861                set_page_private(page, migratetype);
 862                list = &page->lru;
 863        }
 864        spin_unlock(&zone->lock);
 865        return i;
 866}
 867
 868#ifdef CONFIG_NUMA
 869/*
 870 * Called from the vmstat counter updater to drain pagesets of this
 871 * currently executing processor on remote nodes after they have
 872 * expired.
 873 *
 874 * Note that this function must be called with the thread pinned to
 875 * a single processor.
 876 */
 877void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
 878{
 879        unsigned long flags;
 880        int to_drain;
 881
 882        local_irq_save(flags);
 883        if (pcp->count >= pcp->batch)
 884                to_drain = pcp->batch;
 885        else
 886                to_drain = pcp->count;
 887        free_pages_bulk(zone, to_drain, &pcp->list, 0);
 888        pcp->count -= to_drain;
 889        local_irq_restore(flags);
 890}
 891#endif
 892
 893static void __drain_pages(unsigned int cpu)
 894{
 895        unsigned long flags;
 896        struct zone *zone;
 897        int i;
 898
 899        for_each_zone(zone) {
 900                struct per_cpu_pageset *pset;
 901
 902                if (!populated_zone(zone))
 903                        continue;
 904
 905                pset = zone_pcp(zone, cpu);
 906                for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
 907                        struct per_cpu_pages *pcp;
 908
 909                        pcp = &pset->pcp[i];
 910                        local_irq_save(flags);
 911                        free_pages_bulk(zone, pcp->count, &pcp->list, 0);
 912                        pcp->count = 0;
 913                        local_irq_restore(flags);
 914                }
 915        }
 916}
 917
 918#ifdef CONFIG_HIBERNATION
 919
 920void mark_free_pages(struct zone *zone)
 921{
 922        unsigned long pfn, max_zone_pfn;
 923        unsigned long flags;
 924        int order, t;
 925        struct list_head *curr;
 926
 927        if (!zone->spanned_pages)
 928                return;
 929
 930        spin_lock_irqsave(&zone->lock, flags);
 931
 932        max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
 933        for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
 934                if (pfn_valid(pfn)) {
 935                        struct page *page = pfn_to_page(pfn);
 936
 937                        if (!swsusp_page_is_forbidden(page))
 938                                swsusp_unset_page_free(page);
 939                }
 940
 941        for_each_migratetype_order(order, t) {
 942                list_for_each(curr, &zone->free_area[order].free_list[t]) {
 943                        unsigned long i;
 944
 945                        pfn = page_to_pfn(list_entry(curr, struct page, lru));
 946                        for (i = 0; i < (1UL << order); i++)
 947                                swsusp_set_page_free(pfn_to_page(pfn + i));
 948                }
 949        }
 950        spin_unlock_irqrestore(&zone->lock, flags);
 951}
 952#endif /* CONFIG_PM */
 953
 954/*
 955 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
 956 */
 957void drain_local_pages(void)
 958{
 959        unsigned long flags;
 960
 961        local_irq_save(flags);  
 962        __drain_pages(smp_processor_id());
 963        local_irq_restore(flags);       
 964}
 965
 966void smp_drain_local_pages(void *arg)
 967{
 968        drain_local_pages();
 969}
 970
 971/*
 972 * Spill all the per-cpu pages from all CPUs back into the buddy allocator
 973 */
 974void drain_all_local_pages(void)
 975{
 976        unsigned long flags;
 977
 978        local_irq_save(flags);
 979        __drain_pages(smp_processor_id());
 980        local_irq_restore(flags);
 981
 982        smp_call_function(smp_drain_local_pages, NULL, 0, 1);
 983}
 984
 985/*
 986 * Free a 0-order page
 987 */
 988static void fastcall free_hot_cold_page(struct page *page, int cold)
 989{
 990        struct zone *zone = page_zone(page);
 991        struct per_cpu_pages *pcp;
 992        unsigned long flags;
 993
 994        if (PageAnon(page))
 995                page->mapping = NULL;
 996        if (free_pages_check(page))
 997                return;
 998
 999        if (!PageHighMem(page))
1000                debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
1001        arch_free_page(page, 0);
1002        kernel_map_pages(page, 1, 0);
1003
1004        pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
1005        local_irq_save(flags);
1006        __count_vm_event(PGFREE);
1007        list_add(&page->lru, &pcp->list);
1008        set_page_private(page, get_pageblock_migratetype(page));
1009        pcp->count++;
1010        if (pcp->count >= pcp->high) {
1011                free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
1012                pcp->count -= pcp->batch;
1013        }
1014        local_irq_restore(flags);
1015        put_cpu();
1016}
1017
1018void fastcall free_hot_page(struct page *page)
1019{
1020        free_hot_cold_page(page, 0);
1021}
1022        
1023void fastcall free_cold_page(struct page *page)
1024{
1025        free_hot_cold_page(page, 1);
1026}
1027
1028/*
1029 * split_page takes a non-compound higher-order page, and splits it into
1030 * n (1<<order) sub-pages: page[0..n]
1031 * Each sub-page must be freed individually.
1032 *
1033 * Note: this is probably too low level an operation for use in drivers.
1034 * Please consult with lkml before using this in your driver.
1035 */
1036void split_page(struct page *page, unsigned int order)
1037{
1038        int i;
1039
1040        VM_BUG_ON(PageCompound(page));
1041        VM_BUG_ON(!page_count(page));
1042        for (i = 1; i < (1 << order); i++)
1043                set_page_refcounted(page + i);
1044}
1045
1046/*
1047 * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
1048 * we cheat by calling it from here, in the order > 0 path.  Saves a branch
1049 * or two.
1050 */
1051static struct page *buffered_rmqueue(struct zonelist *zonelist,
1052                        struct zone *zone, int order, gfp_t gfp_flags)
1053{
1054        unsigned long flags;
1055        struct page *page;
1056        int cold = !!(gfp_flags & __GFP_COLD);
1057        int cpu;
1058        int migratetype = allocflags_to_migratetype(gfp_flags);
1059
1060again:
1061        cpu  = get_cpu();
1062        if (likely(order == 0)) {
1063                struct per_cpu_pages *pcp;
1064
1065                pcp = &zone_pcp(zone, cpu)->pcp[cold];
1066                local_irq_save(flags);
1067                if (!pcp->count) {
1068                        pcp->count = rmqueue_bulk(zone, 0,
1069                                        pcp->batch, &pcp->list, migratetype);
1070                        if (unlikely(!pcp->count))
1071                                goto failed;
1072                }
1073
1074                /* Find a page of the appropriate migrate type */
1075                list_for_each_entry(page, &pcp->list, lru)
1076                        if (page_private(page) == migratetype)
1077                                break;
1078
1079                /* Allocate more to the pcp list if necessary */
1080                if (unlikely(&page->lru == &pcp->list)) {
1081                        pcp->count += rmqueue_bulk(zone, 0,
1082                                        pcp->batch, &pcp->list, migratetype);
1083                        page = list_entry(pcp->list.next, struct page, lru);
1084                }
1085
1086                list_del(&page->lru);
1087                pcp->count--;
1088        } else {
1089                spin_lock_irqsave(&zone->lock, flags);
1090                page = __rmqueue(zone, order, migratetype);
1091                spin_unlock(&zone->lock);
1092                if (!page)
1093                        goto failed;
1094        }
1095
1096        __count_zone_vm_events(PGALLOC, zone, 1 << order);
1097        zone_statistics(zonelist, zone);
1098        local_irq_restore(flags);
1099        put_cpu();
1100
1101        VM_BUG_ON(bad_range(zone, page));
1102        if (prep_new_page(page, order, gfp_flags))
1103                goto again;
1104        return page;
1105
1106failed:
1107        local_irq_restore(flags);
1108        put_cpu();
1109        return NULL;
1110}
1111
1112#define ALLOC_NO_WATERMARKS     0x01 /* don't check watermarks at all */
1113#define ALLOC_WMARK_MIN         0x02 /* use pages_min watermark */
1114#define ALLOC_WMARK_LOW         0x04 /* use pages_low watermark */
1115#define ALLOC_WMARK_HIGH        0x08 /* use pages_high watermark */
1116#define ALLOC_HARDER            0x10 /* try to alloc harder */
1117#define ALLOC_HIGH              0x20 /* __GFP_HIGH set */
1118#define ALLOC_CPUSET            0x40 /* check for correct cpuset */
1119
1120#ifdef CONFIG_FAIL_PAGE_ALLOC
1121
1122static struct fail_page_alloc_attr {
1123        struct fault_attr attr;
1124
1125        u32 ignore_gfp_highmem;
1126        u32 ignore_gfp_wait;
1127        u32 min_order;
1128
1129#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1130
1131        struct dentry *ignore_gfp_highmem_file;
1132        struct dentry *ignore_gfp_wait_file;
1133        struct dentry *min_order_file;
1134
1135#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1136
1137} fail_page_alloc = {
1138        .attr = FAULT_ATTR_INITIALIZER,
1139        .ignore_gfp_wait = 1,
1140        .ignore_gfp_highmem = 1,
1141        .min_order = 1,
1142};
1143
1144static int __init setup_fail_page_alloc(char *str)
1145{
1146        return setup_fault_attr(&fail_page_alloc.attr, str);
1147}
1148__setup("fail_page_alloc=", setup_fail_page_alloc);
1149
1150static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1151{
1152        if (order < fail_page_alloc.min_order)
1153                return 0;
1154        if (gfp_mask & __GFP_NOFAIL)
1155                return 0;
1156        if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1157                return 0;
1158        if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1159                return 0;
1160
1161        return should_fail(&fail_page_alloc.attr, 1 << order);
1162}
1163
1164#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1165
1166static int __init fail_page_alloc_debugfs(void)
1167{
1168        mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1169        struct dentry *dir;
1170        int err;
1171
1172        err = init_fault_attr_dentries(&fail_page_alloc.attr,
1173                                       "fail_page_alloc");
1174        if (err)
1175                return err;
1176        dir = fail_page_alloc.attr.dentries.dir;
1177
1178        fail_page_alloc.ignore_gfp_wait_file =
1179                debugfs_create_bool("ignore-gfp-wait", mode, dir,
1180                                      &fail_page_alloc.ignore_gfp_wait);
1181
1182        fail_page_alloc.ignore_gfp_highmem_file =
1183                debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1184                                      &fail_page_alloc.ignore_gfp_highmem);
1185        fail_page_alloc.min_order_file =
1186                debugfs_create_u32("min-order", mode, dir,
1187                                   &fail_page_alloc.min_order);
1188
1189        if (!fail_page_alloc.ignore_gfp_wait_file ||
1190            !fail_page_alloc.ignore_gfp_highmem_file ||
1191            !fail_page_alloc.min_order_file) {
1192                err = -ENOMEM;
1193                debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
1194                debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
1195                debugfs_remove(fail_page_alloc.min_order_file);
1196                cleanup_fault_attr_dentries(&fail_page_alloc.attr);
1197        }
1198
1199        return err;
1200}
1201
1202late_initcall(fail_page_alloc_debugfs);
1203
1204#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1205
1206#else /* CONFIG_FAIL_PAGE_ALLOC */
1207
1208static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1209{
1210        return 0;
1211}
1212
1213#endif /* CONFIG_FAIL_PAGE_ALLOC */
1214
1215/*
1216 * Return 1 if free pages are above 'mark'. This takes into account the order
1217 * of the allocation.
1218 */
1219int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1220                      int classzone_idx, int alloc_flags)
1221{
1222        /* free_pages my go negative - that's OK */
1223        long min = mark;
1224        long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
1225        int o;
1226
1227        if (alloc_flags & ALLOC_HIGH)
1228                min -= min / 2;
1229        if (alloc_flags & ALLOC_HARDER)
1230                min -= min / 4;
1231
1232        if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1233                return 0;
1234        for (o = 0; o < order; o++) {
1235                /* At the next order, this order's pages become unavailable */
1236                free_pages -= z->free_area[o].nr_free << o;
1237
1238                /* Require fewer higher order pages to be free */
1239                min >>= 1;
1240
1241                if (free_pages <= min)
1242                        return 0;
1243        }
1244        return 1;
1245}
1246
1247#ifdef CONFIG_NUMA
1248/*
1249 * zlc_setup - Setup for "zonelist cache".  Uses cached zone data to
1250 * skip over zones that are not allowed by the cpuset, or that have
1251 * been recently (in last second) found to be nearly full.  See further
1252 * comments in mmzone.h.  Reduces cache footprint of zonelist scans
1253 * that have to skip over a lot of full or unallowed zones.
1254 *
1255 * If the zonelist cache is present in the passed in zonelist, then
1256 * returns a pointer to the allowed node mask (either the current
1257 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
1258 *
1259 * If the zonelist cache is not available for this zonelist, does
1260 * nothing and returns NULL.
1261 *
1262 * If the fullzones BITMAP in the zonelist cache is stale (more than
1263 * a second since last zap'd) then we zap it out (clear its bits.)
1264 *
1265 * We hold off even calling zlc_setup, until after we've checked the
1266 * first zone in the zonelist, on the theory that most allocations will
1267 * be satisfied from that first zone, so best to examine that zone as
1268 * quickly as we can.
1269 */
1270static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1271{
1272        struct zonelist_cache *zlc;     /* cached zonelist speedup info */
1273        nodemask_t *allowednodes;       /* zonelist_cache approximation */
1274
1275        zlc = zonelist->zlcache_ptr;
1276        if (!zlc)
1277                return NULL;
1278
1279        if (jiffies - zlc->last_full_zap > 1 * HZ) {
1280                bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1281                zlc->last_full_zap = jiffies;
1282        }
1283
1284        allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1285                                        &cpuset_current_mems_allowed :
1286                                        &node_states[N_HIGH_MEMORY];
1287        return allowednodes;
1288}
1289
1290/*
1291 * Given 'z' scanning a zonelist, run a couple of quick checks to see
1292 * if it is worth looking at further for free memory:
1293 *  1) Check that the zone isn't thought to be full (doesn't have its
1294 *     bit set in the zonelist_cache fullzones BITMAP).
1295 *  2) Check that the zones node (obtained from the zonelist_cache
1296 *     z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1297 * Return true (non-zero) if zone is worth looking at further, or
1298 * else return false (zero) if it is not.
1299 *
1300 * This check -ignores- the distinction between various watermarks,
1301 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ...  If a zone is
1302 * found to be full for any variation of these watermarks, it will
1303 * be considered full for up to one second by all requests, unless
1304 * we are so low on memory on all allowed nodes that we are forced
1305 * into the second scan of the zonelist.
1306 *
1307 * In the second scan we ignore this zonelist cache and exactly
1308 * apply the watermarks to all zones, even it is slower to do so.
1309 * We are low on memory in the second scan, and should leave no stone
1310 * unturned looking for a free page.
1311 */
1312static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z,
1313                                                nodemask_t *allowednodes)
1314{
1315        struct zonelist_cache *zlc;     /* cached zonelist speedup info */
1316        int i;                          /* index of *z in zonelist zones */
1317        int n;                          /* node that zone *z is on */
1318
1319        zlc = zonelist->zlcache_ptr;
1320        if (!zlc)
1321                return 1;
1322
1323        i = z - zonelist->zones;
1324        n = zlc->z_to_n[i];
1325
1326        /* This zone is worth trying if it is allowed but not full */
1327        return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1328}
1329
1330/*
1331 * Given 'z' scanning a zonelist, set the corresponding bit in
1332 * zlc->fullzones, so that subsequent attempts to allocate a page
1333 * from that zone don't waste time re-examining it.
1334 */
1335static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z)
1336{
1337        struct zonelist_cache *zlc;     /* cached zonelist speedup info */
1338        int i;                          /* index of *z in zonelist zones */
1339
1340        zlc = zonelist->zlcache_ptr;
1341        if (!zlc)
1342                return;
1343
1344        i = z - zonelist->zones;
1345
1346        set_bit(i, zlc->fullzones);
1347}
1348
1349#else   /* CONFIG_NUMA */
1350
1351static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1352{
1353        return NULL;
1354}
1355
1356static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z,
1357                                nodemask_t *allowednodes)
1358{
1359        return 1;
1360}
1361
1362static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z)
1363{
1364}
1365#endif  /* CONFIG_NUMA */
1366
1367/*
1368 * get_page_from_freelist goes through the zonelist trying to allocate
1369 * a page.
1370 */
1371static struct page *
1372get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
1373                struct zonelist *zonelist, int alloc_flags)
1374{
1375        struct zone **z;
1376        struct page *page = NULL;
1377        int classzone_idx = zone_idx(zonelist->zones[0]);
1378        struct zone *zone;
1379        nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1380        int zlc_active = 0;             /* set if using zonelist_cache */
1381        int did_zlc_setup = 0;          /* just call zlc_setup() one time */
1382        enum zone_type highest_zoneidx = -1; /* Gets set for policy zonelists */
1383
1384zonelist_scan:
1385        /*
1386         * Scan zonelist, looking for a zone with enough free.
1387         * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1388         */
1389        z = zonelist->zones;
1390
1391        do {
1392                /*
1393                 * In NUMA, this could be a policy zonelist which contains
1394                 * zones that may not be allowed by the current gfp_mask.
1395                 * Check the zone is allowed by the current flags
1396                 */
1397                if (unlikely(alloc_should_filter_zonelist(zonelist))) {
1398                        if (highest_zoneidx == -1)
1399                                highest_zoneidx = gfp_zone(gfp_mask);
1400                        if (zone_idx(*z) > highest_zoneidx)
1401                                continue;
1402                }
1403
1404                if (NUMA_BUILD && zlc_active &&
1405                        !zlc_zone_worth_trying(zonelist, z, allowednodes))
1406                                continue;
1407                zone = *z;
1408                if ((alloc_flags & ALLOC_CPUSET) &&
1409                        !cpuset_zone_allowed_softwall(zone, gfp_mask))
1410                                goto try_next_zone;
1411
1412                if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
1413                        unsigned long mark;
1414                        if (alloc_flags & ALLOC_WMARK_MIN)
1415                                mark = zone->pages_min;
1416                        else if (alloc_flags & ALLOC_WMARK_LOW)
1417                                mark = zone->pages_low;
1418                        else
1419                                mark = zone->pages_high;
1420                        if (!zone_watermark_ok(zone, order, mark,
1421                                    classzone_idx, alloc_flags)) {
1422                                if (!zone_reclaim_mode ||
1423                                    !zone_reclaim(zone, gfp_mask, order))
1424                                        goto this_zone_full;
1425                        }
1426                }
1427
1428                page = buffered_rmqueue(zonelist, zone, order, gfp_mask);
1429                if (page)
1430                        break;
1431this_zone_full:
1432                if (NUMA_BUILD)
1433                        zlc_mark_zone_full(zonelist, z);
1434try_next_zone:
1435                if (NUMA_BUILD && !did_zlc_setup) {
1436                        /* we do zlc_setup after the first zone is tried */
1437                        allowednodes = zlc_setup(zonelist, alloc_flags);
1438                        zlc_active = 1;
1439                        did_zlc_setup = 1;
1440                }
1441        } while (*(++z) != NULL);
1442
1443        if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1444                /* Disable zlc cache for second zonelist scan */
1445                zlc_active = 0;
1446                goto zonelist_scan;
1447        }
1448        return page;
1449}
1450
1451/*
1452 * This is the 'heart' of the zoned buddy allocator.
1453 */
1454struct page * fastcall
1455__alloc_pages(gfp_t gfp_mask, unsigned int order,
1456                struct zonelist *zonelist)
1457{
1458        const gfp_t wait = gfp_mask & __GFP_WAIT;
1459        struct zone **z;
1460        struct page *page;
1461        struct reclaim_state reclaim_state;
1462        struct task_struct *p = current;
1463        int do_retry;
1464        int alloc_flags;
1465        int did_some_progress;
1466
1467        might_sleep_if(wait);
1468
1469        if (should_fail_alloc_page(gfp_mask, order))
1470                return NULL;
1471
1472restart:
1473        z = zonelist->zones;  /* the list of zones suitable for gfp_mask */
1474
1475        if (unlikely(*z == NULL)) {
1476                /*
1477                 * Happens if we have an empty zonelist as a result of
1478                 * GFP_THISNODE being used on a memoryless node
1479                 */
1480                return NULL;
1481        }
1482
1483        page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
1484                                zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET);
1485        if (page)
1486                goto got_pg;
1487
1488        /*
1489         * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
1490         * __GFP_NOWARN set) should not cause reclaim since the subsystem
1491         * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
1492         * using a larger set of nodes after it has established that the
1493         * allowed per node queues are empty and that nodes are
1494         * over allocated.
1495         */
1496        if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
1497                goto nopage;
1498
1499        for (z = zonelist->zones; *z; z++)
1500                wakeup_kswapd(*z, order);
1501
1502        /*
1503         * OK, we're below the kswapd watermark and have kicked background
1504         * reclaim. Now things get more complex, so set up alloc_flags according
1505         * to how we want to proceed.
1506         *
1507         * The caller may dip into page reserves a bit more if the caller
1508         * cannot run direct reclaim, or if the caller has realtime scheduling
1509         * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
1510         * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
1511         */
1512        alloc_flags = ALLOC_WMARK_MIN;
1513        if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
1514                alloc_flags |= ALLOC_HARDER;
1515        if (gfp_mask & __GFP_HIGH)
1516                alloc_flags |= ALLOC_HIGH;
1517        if (wait)
1518                alloc_flags |= ALLOC_CPUSET;
1519
1520        /*
1521         * Go through the zonelist again. Let __GFP_HIGH and allocations
1522         * coming from realtime tasks go deeper into reserves.
1523         *
1524         * This is the last chance, in general, before the goto nopage.
1525         * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
1526         * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1527         */
1528        page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags);
1529        if (page)
1530                goto got_pg;
1531
1532        /* This allocation should allow future memory freeing. */
1533
1534rebalance:
1535        if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
1536                        && !in_interrupt()) {
1537                if (!(gfp_mask & __GFP_NOMEMALLOC)) {
1538nofail_alloc:
1539                        /* go through the zonelist yet again, ignoring mins */
1540                        page = get_page_from_freelist(gfp_mask, order,
1541                                zonelist, ALLOC_NO_WATERMARKS);
1542                        if (page)
1543                                goto got_pg;
1544                        if (gfp_mask & __GFP_NOFAIL) {
1545                                congestion_wait(WRITE, HZ/50);
1546                                goto nofail_alloc;
1547                        }
1548                }
1549                goto nopage;
1550        }
1551
1552        /* Atomic allocations - we can't balance anything */
1553        if (!wait)
1554                goto nopage;
1555
1556        cond_resched();
1557
1558        /* We now go into synchronous reclaim */
1559        cpuset_memory_pressure_bump();
1560        p->flags |= PF_MEMALLOC;
1561        reclaim_state.reclaimed_slab = 0;
1562        p->reclaim_state = &reclaim_state;
1563
1564        did_some_progress = try_to_free_pages(zonelist->zones, order, gfp_mask);
1565
1566        p->reclaim_state = NULL;
1567        p->flags &= ~PF_MEMALLOC;
1568
1569        cond_resched();
1570
1571        if (order != 0)
1572                drain_all_local_pages();
1573
1574        if (likely(did_some_progress)) {
1575                page = get_page_from_freelist(gfp_mask, order,
1576                                                zonelist, alloc_flags);
1577                if (page)
1578                        goto got_pg;
1579        } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
1580                if (!try_set_zone_oom(zonelist)) {
1581                        schedule_timeout_uninterruptible(1);
1582                        goto restart;
1583                }
1584
1585                /*
1586                 * Go through the zonelist yet one more time, keep
1587                 * very high watermark here, this is only to catch
1588                 * a parallel oom killing, we must fail if we're still
1589                 * under heavy pressure.
1590                 */
1591                page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
1592                                zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET);
1593                if (page) {
1594                        clear_zonelist_oom(zonelist);
1595                        goto got_pg;
1596                }
1597
1598                /* The OOM killer will not help higher order allocs so fail */
1599                if (order > PAGE_ALLOC_COSTLY_ORDER) {
1600                        clear_zonelist_oom(zonelist);
1601                        goto nopage;
1602                }
1603
1604                out_of_memory(zonelist, gfp_mask, order);
1605                clear_zonelist_oom(zonelist);
1606                goto restart;
1607        }
1608
1609        /*
1610         * Don't let big-order allocations loop unless the caller explicitly
1611         * requests that.  Wait for some write requests to complete then retry.
1612         *
1613         * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order
1614         * <= 3, but that may not be true in other implementations.
1615         */
1616        do_retry = 0;
1617        if (!(gfp_mask & __GFP_NORETRY)) {
1618                if ((order <= PAGE_ALLOC_COSTLY_ORDER) ||
1619                                                (gfp_mask & __GFP_REPEAT))
1620                        do_retry = 1;
1621                if (gfp_mask & __GFP_NOFAIL)
1622                        do_retry = 1;
1623        }
1624        if (do_retry) {
1625                congestion_wait(WRITE, HZ/50);
1626                goto rebalance;
1627        }
1628
1629nopage:
1630        if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
1631                printk(KERN_WARNING "%s: page allocation failure."
1632                        " order:%d, mode:0x%x\n",
1633                        p->comm, order, gfp_mask);
1634                dump_stack();
1635                show_mem();
1636        }
1637got_pg:
1638        return page;
1639}
1640
1641EXPORT_SYMBOL(__alloc_pages);
1642
1643/*
1644 * Common helper functions.
1645 */
1646fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1647{
1648        struct page * page;
1649        page = alloc_pages(gfp_mask, order);
1650        if (!page)
1651                return 0;
1652        return (unsigned long) page_address(page);
1653}
1654
1655EXPORT_SYMBOL(__get_free_pages);
1656
1657fastcall unsigned long get_zeroed_page(gfp_t gfp_mask)
1658{
1659        struct page * page;
1660
1661        /*
1662         * get_zeroed_page() returns a 32-bit address, which cannot represent
1663         * a highmem page
1664         */
1665        VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
1666
1667        page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
1668        if (page)
1669                return (unsigned long) page_address(page);
1670        return 0;
1671}
1672
1673EXPORT_SYMBOL(get_zeroed_page);
1674
1675void __pagevec_free(struct pagevec *pvec)
1676{
1677        int i = pagevec_count(pvec);
1678
1679        while (--i >= 0)
1680                free_hot_cold_page(pvec->pages[i], pvec->cold);
1681}
1682
1683fastcall void __free_pages(struct page *page, unsigned int order)
1684{
1685        if (put_page_testzero(page)) {
1686                if (order == 0)
1687                        free_hot_page(page);
1688                else
1689                        __free_pages_ok(page, order);
1690        }
1691}
1692
1693EXPORT_SYMBOL(__free_pages);
1694
1695fastcall void free_pages(unsigned long addr, unsigned int order)
1696{
1697        if (addr != 0) {
1698                VM_BUG_ON(!virt_addr_valid((void *)addr));
1699                __free_pages(virt_to_page((void *)addr), order);
1700        }
1701}
1702
1703EXPORT_SYMBOL(free_pages);
1704
1705static unsigned int nr_free_zone_pages(int offset)
1706{
1707        /* Just pick one node, since fallback list is circular */
1708        pg_data_t *pgdat = NODE_DATA(numa_node_id());
1709        unsigned int sum = 0;
1710
1711        struct zonelist *zonelist = pgdat->node_zonelists + offset;
1712        struct zone **zonep = zonelist->zones;
1713        struct zone *zone;
1714
1715        for (zone = *zonep++; zone; zone = *zonep++) {
1716                unsigned long size = zone->present_pages;
1717                unsigned long high = zone->pages_high;
1718                if (size > high)
1719                        sum += size - high;
1720        }
1721
1722        return sum;
1723}
1724
1725/*
1726 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
1727 */
1728unsigned int nr_free_buffer_pages(void)
1729{
1730        return nr_free_zone_pages(gfp_zone(GFP_USER));
1731}
1732EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
1733
1734/*
1735 * Amount of free RAM allocatable within all zones
1736 */
1737unsigned int nr_free_pagecache_pages(void)
1738{
1739        return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
1740}
1741
1742static inline void show_node(struct zone *zone)
1743{
1744        if (NUMA_BUILD)
1745                printk("Node %d ", zone_to_nid(zone));
1746}
1747
1748void si_meminfo(struct sysinfo *val)
1749{
1750        val->totalram = totalram_pages;
1751        val->sharedram = 0;
1752        val->freeram = global_page_state(NR_FREE_PAGES);
1753        val->bufferram = nr_blockdev_pages();
1754        val->totalhigh = totalhigh_pages;
1755        val->freehigh = nr_free_highpages();
1756        val->mem_unit = PAGE_SIZE;
1757}
1758
1759EXPORT_SYMBOL(si_meminfo);
1760
1761#ifdef CONFIG_NUMA
1762void si_meminfo_node(struct sysinfo *val, int nid)
1763{
1764        pg_data_t *pgdat = NODE_DATA(nid);
1765
1766        val->totalram = pgdat->node_present_pages;
1767        val->freeram = node_page_state(nid, NR_FREE_PAGES);
1768#ifdef CONFIG_HIGHMEM
1769        val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
1770        val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
1771                        NR_FREE_PAGES);
1772#else
1773        val->totalhigh = 0;
1774        val->freehigh = 0;
1775#endif
1776        val->mem_unit = PAGE_SIZE;
1777}
1778#endif
1779
1780#define K(x) ((x) << (PAGE_SHIFT-10))
1781
1782/*
1783 * Show free area list (used inside shift_scroll-lock stuff)
1784 * We also calculate the percentage fragmentation. We do this by counting the
1785 * memory on each free list with the exception of the first item on the list.
1786 */
1787void show_free_areas(void)
1788{
1789        int cpu;
1790        struct zone *zone;
1791
1792        for_each_zone(zone) {
1793                if (!populated_zone(zone))
1794                        continue;
1795
1796                show_node(zone);
1797                printk("%s per-cpu:\n", zone->name);
1798
1799                for_each_online_cpu(cpu) {
1800                        struct per_cpu_pageset *pageset;
1801
1802                        pageset = zone_pcp(zone, cpu);
1803
1804                        printk("CPU %4d: Hot: hi:%5d, btch:%4d usd:%4d   "
1805                               "Cold: hi:%5d, btch:%4d usd:%4d\n",
1806                               cpu, pageset->pcp[0].high,
1807                               pageset->pcp[0].batch, pageset->pcp[0].count,
1808                               pageset->pcp[1].high, pageset->pcp[1].batch,
1809                               pageset->pcp[1].count);
1810                }
1811        }
1812
1813        printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu\n"
1814                " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
1815                global_page_state(NR_ACTIVE),
1816                global_page_state(NR_INACTIVE),
1817                global_page_state(NR_FILE_DIRTY),
1818                global_page_state(NR_WRITEBACK),
1819                global_page_state(NR_UNSTABLE_NFS),
1820                global_page_state(NR_FREE_PAGES),
1821                global_page_state(NR_SLAB_RECLAIMABLE) +
1822                        global_page_state(NR_SLAB_UNRECLAIMABLE),
1823                global_page_state(NR_FILE_MAPPED),
1824                global_page_state(NR_PAGETABLE),
1825                global_page_state(NR_BOUNCE));
1826
1827        for_each_zone(zone) {
1828                int i;
1829
1830                if (!populated_zone(zone))
1831                        continue;
1832
1833                show_node(zone);
1834                printk("%s"
1835                        " free:%lukB"
1836                        " min:%lukB"
1837                        " low:%lukB"
1838                        " high:%lukB"
1839                        " active:%lukB"
1840                        " inactive:%lukB"
1841                        " present:%lukB"
1842                        " pages_scanned:%lu"
1843                        " all_unreclaimable? %s"
1844                        "\n",
1845                        zone->name,
1846                        K(zone_page_state(zone, NR_FREE_PAGES)),
1847                        K(zone->pages_min),
1848                        K(zone->pages_low),
1849                        K(zone->pages_high),
1850                        K(zone_page_state(zone, NR_ACTIVE)),
1851                        K(zone_page_state(zone, NR_INACTIVE)),
1852                        K(zone->present_pages),
1853                        zone->pages_scanned,
1854                        (zone_is_all_unreclaimable(zone) ? "yes" : "no")
1855                        );
1856                printk("lowmem_reserve[]:");
1857                for (i = 0; i < MAX_NR_ZONES; i++)
1858                        printk(" %lu", zone->lowmem_reserve[i]);
1859                printk("\n");
1860        }
1861
1862        for_each_zone(zone) {
1863                unsigned long nr[MAX_ORDER], flags, order, total = 0;
1864
1865                if (!populated_zone(zone))
1866                        continue;
1867
1868                show_node(zone);
1869                printk("%s: ", zone->name);
1870
1871                spin_lock_irqsave(&zone->lock, flags);
1872                for (order = 0; order < MAX_ORDER; order++) {
1873                        nr[order] = zone->free_area[order].nr_free;
1874                        total += nr[order] << order;
1875                }
1876                spin_unlock_irqrestore(&zone->lock, flags);
1877                for (order = 0; order < MAX_ORDER; order++)
1878                        printk("%lu*%lukB ", nr[order], K(1UL) << order);
1879                printk("= %lukB\n", K(total));
1880        }
1881
1882        show_swap_cache_info();
1883}
1884
1885/*
1886 * Builds allocation fallback zone lists.
1887 *
1888 * Add all populated zones of a node to the zonelist.
1889 */
1890static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
1891                                int nr_zones, enum zone_type zone_type)
1892{
1893        struct zone *zone;
1894
1895        BUG_ON(zone_type >= MAX_NR_ZONES);
1896        zone_type++;
1897
1898        do {
1899                zone_type--;
1900                zone = pgdat->node_zones + zone_type;
1901                if (populated_zone(zone)) {
1902                        zonelist->zones[nr_zones++] = zone;
1903                        check_highest_zone(zone_type);
1904                }
1905
1906        } while (zone_type);
1907        return nr_zones;
1908}
1909
1910
1911/*
1912 *  zonelist_order:
1913 *  0 = automatic detection of better ordering.
1914 *  1 = order by ([node] distance, -zonetype)
1915 *  2 = order by (-zonetype, [node] distance)
1916 *
1917 *  If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
1918 *  the same zonelist. So only NUMA can configure this param.
1919 */
1920#define ZONELIST_ORDER_DEFAULT  0
1921#define ZONELIST_ORDER_NODE     1
1922#define ZONELIST_ORDER_ZONE     2
1923
1924/* zonelist order in the kernel.
1925 * set_zonelist_order() will set this to NODE or ZONE.
1926 */
1927static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
1928static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
1929
1930
1931#ifdef CONFIG_NUMA
1932/* The value user specified ....changed by config */
1933static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
1934/* string for sysctl */
1935#define NUMA_ZONELIST_ORDER_LEN 16
1936char numa_zonelist_order[16] = "default";
1937
1938/*
1939 * interface for configure zonelist ordering.
1940 * command line option "numa_zonelist_order"
1941 *      = "[dD]efault   - default, automatic configuration.
1942 *      = "[nN]ode      - order by node locality, then by zone within node
1943 *      = "[zZ]one      - order by zone, then by locality within zone
1944 */
1945
1946static int __parse_numa_zonelist_order(char *s)
1947{
1948        if (*s == 'd' || *s == 'D') {
1949                user_zonelist_order = ZONELIST_ORDER_DEFAULT;
1950        } else if (*s == 'n' || *s == 'N') {
1951                user_zonelist_order = ZONELIST_ORDER_NODE;
1952        } else if (*s == 'z' || *s == 'Z') {
1953                user_zonelist_order = ZONELIST_ORDER_ZONE;
1954        } else {
1955                printk(KERN_WARNING
1956                        "Ignoring invalid numa_zonelist_order value:  "
1957                        "%s\n", s);
1958                return -EINVAL;
1959        }
1960        return 0;
1961}
1962
1963static __init int setup_numa_zonelist_order(char *s)
1964{
1965        if (s)
1966                return __parse_numa_zonelist_order(s);
1967        return 0;
1968}
1969early_param("numa_zonelist_order", setup_numa_zonelist_order);
1970
1971/*
1972 * sysctl handler for numa_zonelist_order
1973 */
1974int numa_zonelist_order_handler(ctl_table *table, int write,
1975                struct file *file, void __user *buffer, size_t *length,
1976                loff_t *ppos)
1977{
1978        char saved_string[NUMA_ZONELIST_ORDER_LEN];
1979        int ret;
1980
1981        if (write)
1982                strncpy(saved_string, (char*)table->data,
1983                        NUMA_ZONELIST_ORDER_LEN);
1984        ret = proc_dostring(table, write, file, buffer, length, ppos);
1985        if (ret)
1986                return ret;
1987        if (write) {
1988                int oldval = user_zonelist_order;
1989                if (__parse_numa_zonelist_order((char*)table->data)) {
1990                        /*
1991                         * bogus value.  restore saved string
1992                         */
1993                        strncpy((char*)table->data, saved_string,
1994                                NUMA_ZONELIST_ORDER_LEN);
1995                        user_zonelist_order = oldval;
1996                } else if (oldval != user_zonelist_order)
1997                        build_all_zonelists();
1998        }
1999        return 0;
2000}
2001
2002
2003#define MAX_NODE_LOAD (num_online_nodes())
2004static int node_load[MAX_NUMNODES];
2005
2006/**
2007 * find_next_best_node - find the next node that should appear in a given node's fallback list
2008 * @node: node whose fallback list we're appending
2009 * @used_node_mask: nodemask_t of already used nodes
2010 *
2011 * We use a number of factors to determine which is the next node that should
2012 * appear on a given node's fallback list.  The node should not have appeared
2013 * already in @node's fallback list, and it should be the next closest node
2014 * according to the distance array (which contains arbitrary distance values
2015 * from each node to each node in the system), and should also prefer nodes
2016 * with no CPUs, since presumably they'll have very little allocation pressure
2017 * on them otherwise.
2018 * It returns -1 if no node is found.
2019 */
2020static int find_next_best_node(int node, nodemask_t *used_node_mask)
2021{
2022        int n, val;
2023        int min_val = INT_MAX;
2024        int best_node = -1;
2025
2026        /* Use the local node if we haven't already */
2027        if (!node_isset(node, *used_node_mask)) {
2028                node_set(node, *used_node_mask);
2029                return node;
2030        }
2031
2032        for_each_node_state(n, N_HIGH_MEMORY) {
2033                cpumask_t tmp;
2034
2035                /* Don't want a node to appear more than once */
2036                if (node_isset(n, *used_node_mask))
2037                        continue;
2038
2039                /* Use the distance array to find the distance */
2040                val = node_distance(node, n);
2041
2042                /* Penalize nodes under us ("prefer the next node") */
2043                val += (n < node);
2044
2045                /* Give preference to headless and unused nodes */
2046                tmp = node_to_cpumask(n);
2047                if (!cpus_empty(tmp))
2048                        val += PENALTY_FOR_NODE_WITH_CPUS;
2049
2050                /* Slight preference for less loaded node */
2051                val *= (MAX_NODE_LOAD*MAX_NUMNODES);
2052                val += node_load[n];
2053
2054                if (val < min_val) {
2055                        min_val = val;
2056                        best_node = n;
2057                }
2058        }
2059
2060        if (best_node >= 0)
2061                node_set(best_node, *used_node_mask);
2062
2063        return best_node;
2064}
2065
2066
2067/*
2068 * Build zonelists ordered by node and zones within node.
2069 * This results in maximum locality--normal zone overflows into local
2070 * DMA zone, if any--but risks exhausting DMA zone.
2071 */
2072static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
2073{
2074        enum zone_type i;
2075        int j;
2076        struct zonelist *zonelist;
2077
2078        for (i = 0; i < MAX_NR_ZONES; i++) {
2079                zonelist = pgdat->node_zonelists + i;
2080                for (j = 0; zonelist->zones[j] != NULL; j++)
2081                        ;
2082                j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
2083                zonelist->zones[j] = NULL;
2084        }
2085}
2086
2087/*
2088 * Build gfp_thisnode zonelists
2089 */
2090static void build_thisnode_zonelists(pg_data_t *pgdat)
2091{
2092        enum zone_type i;
2093        int j;
2094        struct zonelist *zonelist;
2095
2096        for (i = 0; i < MAX_NR_ZONES; i++) {
2097                zonelist = pgdat->node_zonelists + MAX_NR_ZONES + i;
2098                j = build_zonelists_node(pgdat, zonelist, 0, i);
2099                zonelist->zones[j] = NULL;
2100        }
2101}
2102
2103/*
2104 * Build zonelists ordered by zone and nodes within zones.
2105 * This results in conserving DMA zone[s] until all Normal memory is
2106 * exhausted, but results in overflowing to remote node while memory
2107 * may still exist in local DMA zone.
2108 */
2109static int node_order[MAX_NUMNODES];
2110
2111static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
2112{
2113        enum zone_type i;
2114        int pos, j, node;
2115        int zone_type;          /* needs to be signed */
2116        struct zone *z;
2117        struct zonelist *zonelist;
2118
2119        for (i = 0; i < MAX_NR_ZONES; i++) {
2120                zonelist = pgdat->node_zonelists + i;
2121                pos = 0;
2122                for (zone_type = i; zone_type >= 0; zone_type--) {
2123                        for (j = 0; j < nr_nodes; j++) {
2124                                node = node_order[j];
2125                                z = &NODE_DATA(node)->node_zones[zone_type];
2126                                if (populated_zone(z)) {
2127                                        zonelist->zones[pos++] = z;
2128                                        check_highest_zone(zone_type);
2129                                }
2130                        }
2131                }
2132                zonelist->zones[pos] = NULL;
2133        }
2134}
2135
2136static int default_zonelist_order(void)
2137{
2138        int nid, zone_type;
2139        unsigned long low_kmem_size,total_size;
2140        struct zone *z;
2141        int average_size;
2142        /*
2143         * ZONE_DMA and ZONE_DMA32 can be very small area in the sytem.
2144         * If they are really small and used heavily, the system can fall
2145         * into OOM very easily.
2146         * This function detect ZONE_DMA/DMA32 size and confgigures zone order.
2147         */
2148        /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
2149        low_kmem_size = 0;
2150        total_size = 0;
2151        for_each_online_node(nid) {
2152                for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2153                        z = &NODE_DATA(nid)->node_zones[zone_type];
2154                        if (populated_zone(z)) {
2155                                if (zone_type < ZONE_NORMAL)
2156                                        low_kmem_size += z->present_pages;
2157                                total_size += z->present_pages;
2158                        }
2159                }
2160        }
2161        if (!low_kmem_size ||  /* there are no DMA area. */
2162            low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
2163                return ZONELIST_ORDER_NODE;
2164        /*
2165         * look into each node's config.
2166         * If there is a node whose DMA/DMA32 memory is very big area on
2167         * local memory, NODE_ORDER may be suitable.
2168         */
2169        average_size = total_size /
2170                                (nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
2171        for_each_online_node(nid) {
2172                low_kmem_size = 0;
2173                total_size = 0;
2174                for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2175                        z = &NODE_DATA(nid)->node_zones[zone_type];
2176                        if (populated_zone(z)) {
2177                                if (zone_type < ZONE_NORMAL)
2178                                        low_kmem_size += z->present_pages;
2179                                total_size += z->present_pages;
2180                        }
2181                }
2182                if (low_kmem_size &&
2183                    total_size > average_size && /* ignore small node */
2184                    low_kmem_size > total_size * 70/100)
2185                        return ZONELIST_ORDER_NODE;
2186        }
2187        return ZONELIST_ORDER_ZONE;
2188}
2189
2190static void set_zonelist_order(void)
2191{
2192        if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
2193                current_zonelist_order = default_zonelist_order();
2194        else
2195                current_zonelist_order = user_zonelist_order;
2196}
2197
2198static void build_zonelists(pg_data_t *pgdat)
2199{
2200        int j, node, load;
2201        enum zone_type i;
2202        nodemask_t used_mask;
2203        int local_node, prev_node;
2204        struct zonelist *zonelist;
2205        int order = current_zonelist_order;
2206
2207        /* initialize zonelists */
2208        for (i = 0; i < MAX_ZONELISTS; i++) {
2209                zonelist = pgdat->node_zonelists + i;
2210                zonelist->zones[0] = NULL;
2211        }
2212
2213        /* NUMA-aware ordering of nodes */
2214        local_node = pgdat->node_id;
2215        load = num_online_nodes();
2216        prev_node = local_node;
2217        nodes_clear(used_mask);
2218
2219        memset(node_load, 0, sizeof(node_load));
2220        memset(node_order, 0, sizeof(node_order));
2221        j = 0;
2222
2223        while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
2224                int distance = node_distance(local_node, node);
2225
2226                /*
2227                 * If another node is sufficiently far away then it is better
2228                 * to reclaim pages in a zone before going off node.
2229                 */
2230                if (distance > RECLAIM_DISTANCE)
2231                        zone_reclaim_mode = 1;
2232
2233                /*
2234                 * We don't want to pressure a particular node.
2235                 * So adding penalty to the first node in same
2236                 * distance group to make it round-robin.
2237                 */
2238                if (distance != node_distance(local_node, prev_node))
2239                        node_load[node] = load;
2240
2241                prev_node = node;
2242                load--;
2243                if (order == ZONELIST_ORDER_NODE)
2244                        build_zonelists_in_node_order(pgdat, node);
2245                else
2246                        node_order[j++] = node; /* remember order */
2247        }
2248
2249        if (order == ZONELIST_ORDER_ZONE) {
2250                /* calculate node order -- i.e., DMA last! */
2251                build_zonelists_in_zone_order(pgdat, j);
2252        }
2253
2254        build_thisnode_zonelists(pgdat);
2255}
2256
2257/* Construct the zonelist performance cache - see further mmzone.h */
2258static void build_zonelist_cache(pg_data_t *pgdat)
2259{
2260        int i;
2261
2262        for (i = 0; i < MAX_NR_ZONES; i++) {
2263                struct zonelist *zonelist;
2264                struct zonelist_cache *zlc;
2265                struct zone **z;
2266
2267                zonelist = pgdat->node_zonelists + i;
2268                zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
2269                bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
2270                for (z = zonelist->zones; *z; z++)
2271                        zlc->z_to_n[z - zonelist->zones] = zone_to_nid(*z);
2272        }
2273}
2274
2275
2276#else   /* CONFIG_NUMA */
2277
2278static void set_zonelist_order(void)
2279{
2280        current_zonelist_order = ZONELIST_ORDER_ZONE;
2281}
2282
2283static void build_zonelists(pg_data_t *pgdat)
2284{
2285        int node, local_node;
2286        enum zone_type i,j;
2287
2288        local_node = pgdat->node_id;
2289        for (i = 0; i < MAX_NR_ZONES; i++) {
2290                struct zonelist *zonelist;
2291
2292                zonelist = pgdat->node_zonelists + i;
2293
2294                j = build_zonelists_node(pgdat, zonelist, 0, i);
2295                /*
2296                 * Now we build the zonelist so that it contains the zones
2297                 * of all the other nodes.
2298                 * We don't want to pressure a particular node, so when
2299                 * building the zones for node N, we make sure that the
2300                 * zones coming right after the local ones are those from
2301                 * node N+1 (modulo N)
2302                 */
2303                for (node = local_node + 1; node < MAX_NUMNODES; node++) {
2304                        if (!node_online(node))
2305                                continue;
2306                        j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
2307                }
2308                for (node = 0; node < local_node; node++) {
2309                        if (!node_online(node))
2310                                continue;
2311                        j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
2312                }
2313
2314                zonelist->zones[j] = NULL;
2315        }
2316}
2317
2318/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
2319static void build_zonelist_cache(pg_data_t *pgdat)
2320{
2321        int i;
2322
2323        for (i = 0; i < MAX_NR_ZONES; i++)
2324                pgdat->node_zonelists[i].zlcache_ptr = NULL;
2325}
2326
2327#endif  /* CONFIG_NUMA */
2328
2329/* return values int ....just for stop_machine_run() */
2330static int __build_all_zonelists(void *dummy)
2331{
2332        int nid;
2333
2334        for_each_online_node(nid) {
2335                pg_data_t *pgdat = NODE_DATA(nid);
2336
2337                build_zonelists(pgdat);
2338                build_zonelist_cache(pgdat);
2339        }
2340        return 0;
2341}
2342
2343void build_all_zonelists(void)
2344{
2345        set_zonelist_order();
2346
2347        if (system_state == SYSTEM_BOOTING) {
2348                __build_all_zonelists(NULL);
2349                cpuset_init_current_mems_allowed();
2350        } else {
2351                /* we have to stop all cpus to guarantee there is no user
2352                   of zonelist */
2353                stop_machine_run(__build_all_zonelists, NULL, NR_CPUS);
2354                /* cpuset refresh routine should be here */
2355        }
2356        vm_total_pages = nr_free_pagecache_pages();
2357        /*
2358         * Disable grouping by mobility if the number of pages in the
2359         * system is too low to allow the mechanism to work. It would be
2360         * more accurate, but expensive to check per-zone. This check is
2361         * made on memory-hotadd so a system can start with mobility
2362         * disabled and enable it later
2363         */
2364        if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
2365                page_group_by_mobility_disabled = 1;
2366        else
2367                page_group_by_mobility_disabled = 0;
2368
2369        printk("Built %i zonelists in %s order, mobility grouping %s.  "
2370                "Total pages: %ld\n",
2371                        num_online_nodes(),
2372                        zonelist_order_name[current_zonelist_order],
2373                        page_group_by_mobility_disabled ? "off" : "on",
2374                        vm_total_pages);
2375#ifdef CONFIG_NUMA
2376        printk("Policy zone: %s\n", zone_names[policy_zone]);
2377#endif
2378}
2379
2380/*
2381 * Helper functions to size the waitqueue hash table.
2382 * Essentially these want to choose hash table sizes sufficiently
2383 * large so that collisions trying to wait on pages are rare.
2384 * But in fact, the number of active page waitqueues on typical
2385 * systems is ridiculously low, less than 200. So this is even
2386 * conservative, even though it seems large.
2387 *
2388 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
2389 * waitqueues, i.e. the size of the waitq table given the number of pages.
2390 */
2391#define PAGES_PER_WAITQUEUE     256
2392
2393#ifndef CONFIG_MEMORY_HOTPLUG
2394static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2395{
2396        unsigned long size = 1;
2397
2398        pages /= PAGES_PER_WAITQUEUE;
2399
2400        while (size < pages)
2401                size <<= 1;
2402
2403        /*
2404         * Once we have dozens or even hundreds of threads sleeping
2405         * on IO we've got bigger problems than wait queue collision.
2406         * Limit the size of the wait table to a reasonable size.
2407         */
2408        size = min(size, 4096UL);
2409
2410        return max(size, 4UL);
2411}
2412#else
2413/*
2414 * A zone's size might be changed by hot-add, so it is not possible to determine
2415 * a suitable size for its wait_table.  So we use the maximum size now.
2416 *
2417 * The max wait table size = 4096 x sizeof(wait_queue_head_t).   ie:
2418 *
2419 *    i386 (preemption config)    : 4096 x 16 = 64Kbyte.
2420 *    ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
2421 *    ia64, x86-64 (preemption)   : 4096 x 24 = 96Kbyte.
2422 *
2423 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
2424 * or more by the traditional way. (See above).  It equals:
2425 *
2426 *    i386, x86-64, powerpc(4K page size) : =  ( 2G + 1M)byte.
2427 *    ia64(16K page size)                 : =  ( 8G + 4M)byte.
2428 *    powerpc (64K page size)             : =  (32G +16M)byte.
2429 */
2430static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2431{
2432        return 4096UL;
2433}
2434#endif
2435
2436/*
2437 * This is an integer logarithm so that shifts can be used later
2438 * to extract the more random high bits from the multiplicative
2439 * hash function before the remainder is taken.
2440 */
2441static inline unsigned long wait_table_bits(unsigned long size)
2442{
2443        return ffz(~size);
2444}
2445
2446#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
2447
2448/*
2449 * Mark a number of pageblocks as MIGRATE_RESERVE. The number
2450 * of blocks reserved is based on zone->pages_min. The memory within the
2451 * reserve will tend to store contiguous free pages. Setting min_free_kbytes
2452 * higher will lead to a bigger reserve which will get freed as contiguous
2453 * blocks as reclaim kicks in
2454 */
2455static void setup_zone_migrate_reserve(struct zone *zone)
2456{
2457        unsigned long start_pfn, pfn, end_pfn;
2458        struct page *page;
2459        unsigned long reserve, block_migratetype;
2460
2461        /* Get the start pfn, end pfn and the number of blocks to reserve */
2462        start_pfn = zone->zone_start_pfn;
2463        end_pfn = start_pfn + zone->spanned_pages;
2464        reserve = roundup(zone->pages_min, pageblock_nr_pages) >>
2465                                                        pageblock_order;
2466
2467        for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
2468                if (!pfn_valid(pfn))
2469                        continue;
2470                page = pfn_to_page(pfn);
2471
2472                /* Blocks with reserved pages will never free, skip them. */
2473                if (PageReserved(page))
2474                        continue;
2475
2476                block_migratetype = get_pageblock_migratetype(page);
2477
2478                /* If this block is reserved, account for it */
2479                if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
2480                        reserve--;
2481                        continue;
2482                }
2483
2484                /* Suitable for reserving if this block is movable */
2485                if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
2486                        set_pageblock_migratetype(page, MIGRATE_RESERVE);
2487                        move_freepages_block(zone, page, MIGRATE_RESERVE);
2488                        reserve--;
2489                        continue;
2490                }
2491
2492                /*
2493                 * If the reserve is met and this is a previous reserved block,
2494                 * take it back
2495                 */
2496                if (block_migratetype == MIGRATE_RESERVE) {
2497                        set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2498                        move_freepages_block(zone, page, MIGRATE_MOVABLE);
2499                }
2500        }
2501}
2502
2503/*
2504 * Initially all pages are reserved - free ones are freed
2505 * up by free_all_bootmem() once the early boot process is
2506 * done. Non-atomic initialization, single-pass.
2507 */
2508void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
2509                unsigned long start_pfn, enum memmap_context context)
2510{
2511        struct page *page;
2512        unsigned long end_pfn = start_pfn + size;
2513        unsigned long pfn;
2514
2515        for (pfn = start_pfn; pfn < end_pfn; pfn++) {
2516                /*
2517                 * There can be holes in boot-time mem_map[]s
2518                 * handed to this function.  They do not
2519                 * exist on hotplugged memory.
2520                 */
2521                if (context == MEMMAP_EARLY) {
2522                        if (!early_pfn_valid(pfn))
2523                                continue;
2524                        if (!early_pfn_in_nid(pfn, nid))
2525                                continue;
2526                }
2527                page = pfn_to_page(pfn);
2528                set_page_links(page, zone, nid, pfn);
2529                init_page_count(page);
2530                reset_page_mapcount(page);
2531                SetPageReserved(page);
2532
2533                /*
2534                 * Mark the block movable so that blocks are reserved for
2535                 * movable at startup. This will force kernel allocations
2536                 * to reserve their blocks rather than leaking throughout
2537                 * the address space during boot when many long-lived
2538                 * kernel allocations are made. Later some blocks near
2539                 * the start are marked MIGRATE_RESERVE by
2540                 * setup_zone_migrate_reserve()
2541                 */
2542                if ((pfn & (pageblock_nr_pages-1)))
2543                        set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2544
2545                INIT_LIST_HEAD(&page->lru);
2546#ifdef WANT_PAGE_VIRTUAL
2547                /* The shift won't overflow because ZONE_NORMAL is below 4G. */
2548                if (!is_highmem_idx(zone))
2549                        set_page_address(page, __va(pfn << PAGE_SHIFT));
2550#endif
2551        }
2552}
2553
2554static void __meminit zone_init_free_lists(struct pglist_data *pgdat,
2555                                struct zone *zone, unsigned long size)
2556{
2557        int order, t;
2558        for_each_migratetype_order(order, t) {
2559                INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
2560                zone->free_area[order].nr_free = 0;
2561        }
2562}
2563
2564#ifndef __HAVE_ARCH_MEMMAP_INIT
2565#define memmap_init(size, nid, zone, start_pfn) \
2566        memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
2567#endif
2568
2569static int zone_batchsize(struct zone *zone)
2570{
2571        int batch;
2572
2573        /*
2574         * The per-cpu-pages pools are set to around 1000th of the
2575         * size of the zone.  But no more than 1/2 of a meg.
2576         *
2577         * OK, so we don't know how big the cache is.  So guess.
2578         */
2579        batch = zone->present_pages / 1024;
2580        if (batch * PAGE_SIZE > 512 * 1024)
2581                batch = (512 * 1024) / PAGE_SIZE;
2582        batch /= 4;             /* We effectively *= 4 below */
2583        if (batch < 1)
2584                batch = 1;
2585
2586        /*
2587         * Clamp the batch to a 2^n - 1 value. Having a power
2588         * of 2 value was found to be more likely to have
2589         * suboptimal cache aliasing properties in some cases.
2590         *
2591         * For example if 2 tasks are alternately allocating
2592         * batches of pages, one task can end up with a lot
2593         * of pages of one half of the possible page colors
2594         * and the other with pages of the other colors.
2595         */
2596        batch = (1 << (fls(batch + batch/2)-1)) - 1;
2597
2598        return batch;
2599}
2600
2601inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
2602{
2603        struct per_cpu_pages *pcp;
2604
2605        memset(p, 0, sizeof(*p));
2606
2607        pcp = &p->pcp[0];               /* hot */
2608        pcp->count = 0;
2609        pcp->high = 6 * batch;
2610        pcp->batch = max(1UL, 1 * batch);
2611        INIT_LIST_HEAD(&pcp->list);
2612
2613        pcp = &p->pcp[1];               /* cold*/
2614        pcp->count = 0;
2615        pcp->high = 2 * batch;
2616        pcp->batch = max(1UL, batch/2);
2617        INIT_LIST_HEAD(&pcp->list);
2618}
2619
2620/*
2621 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
2622 * to the value high for the pageset p.
2623 */
2624
2625static void setup_pagelist_highmark(struct per_cpu_pageset *p,
2626                                unsigned long high)
2627{
2628        struct per_cpu_pages *pcp;
2629
2630        pcp = &p->pcp[0]; /* hot list */
2631        pcp->high = high;
2632        pcp->batch = max(1UL, high/4);
2633        if ((high/4) > (PAGE_SHIFT * 8))
2634                pcp->batch = PAGE_SHIFT * 8;
2635}
2636
2637
2638#ifdef CONFIG_NUMA
2639/*
2640 * Boot pageset table. One per cpu which is going to be used for all
2641 * zones and all nodes. The parameters will be set in such a way
2642 * that an item put on a list will immediately be handed over to
2643 * the buddy list. This is safe since pageset manipulation is done
2644 * with interrupts disabled.
2645 *
2646 * Some NUMA counter updates may also be caught by the boot pagesets.
2647 *
2648 * The boot_pagesets must be kept even after bootup is complete for
2649 * unused processors and/or zones. They do play a role for bootstrapping
2650 * hotplugged processors.
2651 *
2652 * zoneinfo_show() and maybe other functions do
2653 * not check if the processor is online before following the pageset pointer.
2654 * Other parts of the kernel may not check if the zone is available.
2655 */
2656static struct per_cpu_pageset boot_pageset[NR_CPUS];
2657
2658/*
2659 * Dynamically allocate memory for the
2660 * per cpu pageset array in struct zone.
2661 */
2662static int __cpuinit process_zones(int cpu)
2663{
2664        struct zone *zone, *dzone;
2665        int node = cpu_to_node(cpu);
2666
2667        node_set_state(node, N_CPU);    /* this node has a cpu */
2668
2669        for_each_zone(zone) {
2670
2671                if (!populated_zone(zone))
2672                        continue;
2673
2674                zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
2675                                         GFP_KERNEL, node);
2676                if (!zone_pcp(zone, cpu))
2677                        goto bad;
2678
2679                setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
2680
2681                if (percpu_pagelist_fraction)
2682                        setup_pagelist_highmark(zone_pcp(zone, cpu),
2683                                (zone->present_pages / percpu_pagelist_fraction));
2684        }
2685
2686        return 0;
2687bad:
2688        for_each_zone(dzone) {
2689                if (!populated_zone(dzone))
2690                        continue;
2691                if (dzone == zone)
2692                        break;
2693                kfree(zone_pcp(dzone, cpu));
2694                zone_pcp(dzone, cpu) = NULL;
2695        }
2696        return -ENOMEM;
2697}
2698
2699static inline void free_zone_pagesets(int cpu)
2700{
2701        struct zone *zone;
2702
2703        for_each_zone(zone) {
2704                struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
2705
2706                /* Free per_cpu_pageset if it is slab allocated */
2707                if (pset != &boot_pageset[cpu])
2708                        kfree(pset);
2709                zone_pcp(zone, cpu) = NULL;
2710        }
2711}
2712
2713static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
2714                unsigned long action,
2715                void *hcpu)
2716{
2717        int cpu = (long)hcpu;
2718        int ret = NOTIFY_OK;
2719
2720        switch (action) {
2721        case CPU_UP_PREPARE:
2722        case CPU_UP_PREPARE_FROZEN:
2723                if (process_zones(cpu))
2724                        ret = NOTIFY_BAD;
2725                break;
2726        case CPU_UP_CANCELED:
2727        case CPU_UP_CANCELED_FROZEN:
2728        case CPU_DEAD:
2729        case CPU_DEAD_FROZEN:
2730                free_zone_pagesets(cpu);
2731                break;
2732        default:
2733                break;
2734        }
2735        return ret;
2736}
2737
2738static struct notifier_block __cpuinitdata pageset_notifier =
2739        { &pageset_cpuup_callback, NULL, 0 };
2740
2741void __init setup_per_cpu_pageset(void)
2742{
2743        int err;
2744
2745        /* Initialize per_cpu_pageset for cpu 0.
2746         * A cpuup callback will do this for every cpu
2747         * as it comes online
2748         */
2749        err = process_zones(smp_processor_id());
2750        BUG_ON(err);
2751        register_cpu_notifier(&pageset_notifier);
2752}
2753
2754#endif
2755
2756static noinline __init_refok
2757int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
2758{
2759        int i;
2760        struct pglist_data *pgdat = zone->zone_pgdat;
2761        size_t alloc_size;
2762
2763        /*
2764         * The per-page waitqueue mechanism uses hashed waitqueues
2765         * per zone.
2766         */
2767        zone->wait_table_hash_nr_entries =
2768                 wait_table_hash_nr_entries(zone_size_pages);
2769        zone->wait_table_bits =
2770                wait_table_bits(zone->wait_table_hash_nr_entries);
2771        alloc_size = zone->wait_table_hash_nr_entries
2772                                        * sizeof(wait_queue_head_t);
2773
2774        if (system_state == SYSTEM_BOOTING) {
2775                zone->wait_table = (wait_queue_head_t *)
2776                        alloc_bootmem_node(pgdat, alloc_size);
2777        } else {
2778                /*
2779                 * This case means that a zone whose size was 0 gets new memory
2780                 * via memory hot-add.
2781                 * But it may be the case that a new node was hot-added.  In
2782                 * this case vmalloc() will not be able to use this new node's
2783                 * memory - this wait_table must be initialized to use this new
2784                 * node itself as well.
2785                 * To use this new node's memory, further consideration will be
2786                 * necessary.
2787                 */
2788                zone->wait_table = vmalloc(alloc_size);
2789        }
2790        if (!zone->wait_table)
2791                return -ENOMEM;
2792
2793        for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
2794                init_waitqueue_head(zone->wait_table + i);
2795
2796        return 0;
2797}
2798
2799static __meminit void zone_pcp_init(struct zone *zone)
2800{
2801        int cpu;
2802        unsigned long batch = zone_batchsize(zone);
2803
2804        for (cpu = 0; cpu < NR_CPUS; cpu++) {
2805#ifdef CONFIG_NUMA
2806                /* Early boot. Slab allocator not functional yet */
2807                zone_pcp(zone, cpu) = &boot_pageset[cpu];
2808                setup_pageset(&boot_pageset[cpu],0);
2809#else
2810                setup_pageset(zone_pcp(zone,cpu), batch);
2811#endif
2812        }
2813        if (zone->present_pages)
2814                printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%lu\n",
2815                        zone->name, zone->present_pages, batch);
2816}
2817
2818__meminit int init_currently_empty_zone(struct zone *zone,
2819                                        unsigned long zone_start_pfn,
2820                                        unsigned long size,
2821                                        enum memmap_context context)
2822{
2823        struct pglist_data *pgdat = zone->zone_pgdat;
2824        int ret;
2825        ret = zone_wait_table_init(zone, size);
2826        if (ret)
2827                return ret;
2828        pgdat->nr_zones = zone_idx(zone) + 1;
2829
2830        zone->zone_start_pfn = zone_start_pfn;
2831
2832        memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn);
2833
2834        zone_init_free_lists(pgdat, zone, zone->spanned_pages);
2835
2836        return 0;
2837}
2838
2839#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
2840/*
2841 * Basic iterator support. Return the first range of PFNs for a node
2842 * Note: nid == MAX_NUMNODES returns first region regardless of node
2843 */
2844static int __meminit first_active_region_index_in_nid(int nid)
2845{
2846        int i;
2847
2848        for (i = 0; i < nr_nodemap_entries; i++)
2849                if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
2850                        return i;
2851
2852        return -1;
2853}
2854
2855/*
2856 * Basic iterator support. Return the next active range of PFNs for a node
2857 * Note: nid == MAX_NUMNODES returns next region regardless of node
2858 */
2859static int __meminit next_active_region_index_in_nid(int index, int nid)
2860{
2861        for (index = index + 1; index < nr_nodemap_entries; index++)
2862                if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
2863                        return index;
2864
2865        return -1;
2866}
2867
2868#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
2869/*
2870 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
2871 * Architectures may implement their own version but if add_active_range()
2872 * was used and there are no special requirements, this is a convenient
2873 * alternative
2874 */
2875int __meminit early_pfn_to_nid(unsigned long pfn)
2876{
2877        int i;
2878
2879        for (i = 0; i < nr_nodemap_entries; i++) {
2880                unsigned long start_pfn = early_node_map[i].start_pfn;
2881                unsigned long end_pfn = early_node_map[i].end_pfn;
2882
2883                if (start_pfn <= pfn && pfn < end_pfn)
2884                        return early_node_map[i].nid;
2885        }
2886
2887        return 0;
2888}
2889#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
2890
2891/* Basic iterator support to walk early_node_map[] */
2892#define for_each_active_range_index_in_nid(i, nid) \
2893        for (i = first_active_region_index_in_nid(nid); i != -1; \
2894                                i = next_active_region_index_in_nid(i, nid))
2895
2896/**
2897 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
2898 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
2899 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
2900 *
2901 * If an architecture guarantees that all ranges registered with
2902 * add_active_ranges() contain no holes and may be freed, this
2903 * this function may be used instead of calling free_bootmem() manually.
2904 */
2905void __init free_bootmem_with_active_regions(int nid,
2906                                                unsigned long max_low_pfn)
2907{
2908        int i;
2909
2910        for_each_active_range_index_in_nid(i, nid) {
2911                unsigned long size_pages = 0;
2912                unsigned long end_pfn = early_node_map[i].end_pfn;
2913
2914                if (early_node_map[i].start_pfn >= max_low_pfn)
2915                        continue;
2916
2917                if (end_pfn > max_low_pfn)
2918                        end_pfn = max_low_pfn;
2919
2920                size_pages = end_pfn - early_node_map[i].start_pfn;
2921                free_bootmem_node(NODE_DATA(early_node_map[i].nid),
2922                                PFN_PHYS(early_node_map[i].start_pfn),
2923                                size_pages << PAGE_SHIFT);
2924        }
2925}
2926
2927/**
2928 * sparse_memory_present_with_active_regions - Call memory_present for each active range
2929 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
2930 *
2931 * If an architecture guarantees that all ranges registered with
2932 * add_active_ranges() contain no holes and may be freed, this
2933 * function may be used instead of calling memory_present() manually.
2934 */
2935void __init sparse_memory_present_with_active_regions(int nid)
2936{
2937        int i;
2938
2939        for_each_active_range_index_in_nid(i, nid)
2940                memory_present(early_node_map[i].nid,
2941                                early_node_map[i].start_pfn,
2942                                early_node_map[i].end_pfn);
2943}
2944
2945/**
2946 * push_node_boundaries - Push node boundaries to at least the requested boundary
2947 * @nid: The nid of the node to push the boundary for
2948 * @start_pfn: The start pfn of the node
2949 * @end_pfn: The end pfn of the node
2950 *
2951 * In reserve-based hot-add, mem_map is allocated that is unused until hotadd
2952 * time. Specifically, on x86_64, SRAT will report ranges that can potentially
2953 * be hotplugged even though no physical memory exists. This function allows
2954 * an arch to push out the node boundaries so mem_map is allocated that can
2955 * be used later.
2956 */
2957#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
2958void __init push_node_boundaries(unsigned int nid,
2959                unsigned long start_pfn, unsigned long end_pfn)
2960{
2961        printk(KERN_DEBUG "Entering push_node_boundaries(%u, %lu, %lu)\n",
2962                        nid, start_pfn, end_pfn);
2963
2964        /* Initialise the boundary for this node if necessary */
2965        if (node_boundary_end_pfn[nid] == 0)
2966                node_boundary_start_pfn[nid] = -1UL;
2967
2968        /* Update the boundaries */
2969        if (node_boundary_start_pfn[nid] > start_pfn)
2970                node_boundary_start_pfn[nid] = start_pfn;
2971        if (node_boundary_end_pfn[nid] < end_pfn)
2972                node_boundary_end_pfn[nid] = end_pfn;
2973}
2974
2975/* If necessary, push the node boundary out for reserve hotadd */
2976static void __meminit account_node_boundary(unsigned int nid,
2977                unsigned long *start_pfn, unsigned long *end_pfn)
2978{
2979        printk(KERN_DEBUG "Entering account_node_boundary(%u, %lu, %lu)\n",
2980                        nid, *start_pfn, *end_pfn);
2981
2982        /* Return if boundary information has not been provided */
2983        if (node_boundary_end_pfn[nid] == 0)
2984                return;
2985
2986        /* Check the boundaries and update if necessary */
2987        if (node_boundary_start_pfn[nid] < *start_pfn)
2988                *start_pfn = node_boundary_start_pfn[nid];
2989        if (node_boundary_end_pfn[nid] > *end_pfn)
2990                *end_pfn = node_boundary_end_pfn[nid];
2991}
2992#else
2993void __init push_node_boundaries(unsigned int nid,
2994                unsigned long start_pfn, unsigned long end_pfn) {}
2995
2996static void __meminit account_node_boundary(unsigned int nid,
2997                unsigned long *start_pfn, unsigned long *end_pfn) {}
2998#endif
2999
3000
3001/**
3002 * get_pfn_range_for_nid - Return the start and end page frames for a node
3003 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
3004 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
3005 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
3006 *
3007 * It returns the start and end page frame of a node based on information
3008 * provided by an arch calling add_active_range(). If called for a node
3009 * with no available memory, a warning is printed and the start and end
3010 * PFNs will be 0.
3011 */
3012void __meminit get_pfn_range_for_nid(unsigned int nid,
3013                        unsigned long *start_pfn, unsigned long *end_pfn)
3014{
3015        int i;
3016        *start_pfn = -1UL;
3017        *end_pfn = 0;
3018
3019        for_each_active_range_index_in_nid(i, nid) {
3020                *start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
3021                *end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
3022        }
3023
3024        if (*start_pfn == -1UL)
3025                *start_pfn = 0;
3026
3027        /* Push the node boundaries out if requested */
3028        account_node_boundary(nid, start_pfn, end_pfn);
3029}
3030
3031/*
3032 * This finds a zone that can be used for ZONE_MOVABLE pages. The
3033 * assumption is made that zones within a node are ordered in monotonic
3034 * increasing memory addresses so that the "highest" populated zone is used
3035 */
3036void __init find_usable_zone_for_movable(void)
3037{
3038        int zone_index;
3039        for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3040                if (zone_index == ZONE_MOVABLE)
3041                        continue;
3042
3043                if (arch_zone_highest_possible_pfn[zone_index] >
3044                                arch_zone_lowest_possible_pfn[zone_index])
3045                        break;
3046        }
3047
3048        VM_BUG_ON(zone_index == -1);
3049        movable_zone = zone_index;
3050}
3051
3052/*
3053 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
3054 * because it is sized independant of architecture. Unlike the other zones,
3055 * the starting point for ZONE_MOVABLE is not fixed. It may be different
3056 * in each node depending on the size of each node and how evenly kernelcore
3057 * is distributed. This helper function adjusts the zone ranges
3058 * provided by the architecture for a given node by using the end of the
3059 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
3060 * zones within a node are in order of monotonic increases memory addresses
3061 */
3062void __meminit adjust_zone_range_for_zone_movable(int nid,
3063                                        unsigned long zone_type,
3064                                        unsigned long node_start_pfn,
3065                                        unsigned long node_end_pfn,
3066                                        unsigned long *zone_start_pfn,
3067                                        unsigned long *zone_end_pfn)
3068{
3069        /* Only adjust if ZONE_MOVABLE is on this node */
3070        if (zone_movable_pfn[nid]) {
3071                /* Size ZONE_MOVABLE */
3072                if (zone_type == ZONE_MOVABLE) {
3073                        *zone_start_pfn = zone_movable_pfn[nid];
3074                        *zone_end_pfn = min(node_end_pfn,
3075                                arch_zone_highest_possible_pfn[movable_zone]);
3076
3077                /* Adjust for ZONE_MOVABLE starting within this range */
3078                } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
3079                                *zone_end_pfn > zone_movable_pfn[nid]) {
3080                        *zone_end_pfn = zone_movable_pfn[nid];
3081
3082                /* Check if this whole range is within ZONE_MOVABLE */
3083                } else if (*zone_start_pfn >= zone_movable_pfn[nid])
3084                        *zone_start_pfn = *zone_end_pfn;
3085        }
3086}
3087
3088/*
3089 * Return the number of pages a zone spans in a node, including holes
3090 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
3091 */
3092static unsigned long __meminit zone_spanned_pages_in_node(int nid,
3093                                        unsigned long zone_type,
3094                                        unsigned long *ignored)
3095{
3096        unsigned long node_start_pfn, node_end_pfn;
3097        unsigned long zone_start_pfn, zone_end_pfn;
3098
3099        /* Get the start and end of the node and zone */
3100        get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3101        zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
3102        zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
3103        adjust_zone_range_for_zone_movable(nid, zone_type,
3104                                node_start_pfn, node_end_pfn,
3105                                &zone_start_pfn, &zone_end_pfn);
3106
3107        /* Check that this node has pages within the zone's required range */
3108        if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
3109                return 0;
3110
3111        /* Move the zone boundaries inside the node if necessary */
3112        zone_end_pfn = min(zone_end_pfn, node_end_pfn);
3113        zone_start_pfn = max(zone_start_pfn, node_start_pfn);
3114
3115        /* Return the spanned pages */
3116        return zone_end_pfn - zone_start_pfn;
3117}
3118
3119/*
3120 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
3121 * then all holes in the requested range will be accounted for.
3122 */
3123unsigned long __meminit __absent_pages_in_range(int nid,
3124                                unsigned long range_start_pfn,
3125                                unsigned long range_end_pfn)
3126{
3127        int i = 0;
3128        unsigned long prev_end_pfn = 0, hole_pages = 0;
3129        unsigned long start_pfn;
3130
3131        /* Find the end_pfn of the first active range of pfns in the node */
3132        i = first_active_region_index_in_nid(nid);
3133        if (i == -1)
3134                return 0;
3135
3136        prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3137
3138        /* Account for ranges before physical memory on this node */
3139        if (early_node_map[i].start_pfn > range_start_pfn)
3140                hole_pages = prev_end_pfn - range_start_pfn;
3141
3142        /* Find all holes for the zone within the node */
3143        for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
3144
3145                /* No need to continue if prev_end_pfn is outside the zone */
3146                if (prev_end_pfn >= range_end_pfn)
3147                        break;
3148
3149                /* Make sure the end of the zone is not within the hole */
3150                start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3151                prev_end_pfn = max(prev_end_pfn, range_start_pfn);
3152
3153                /* Update the hole size cound and move on */
3154                if (start_pfn > range_start_pfn) {
3155                        BUG_ON(prev_end_pfn > start_pfn);
3156                        hole_pages += start_pfn - prev_end_pfn;
3157                }
3158                prev_end_pfn = early_node_map[i].end_pfn;
3159        }
3160
3161        /* Account for ranges past physical memory on this node */
3162        if (range_end_pfn > prev_end_pfn)
3163                hole_pages += range_end_pfn -
3164                                max(range_start_pfn, prev_end_pfn);
3165
3166        return hole_pages;
3167}
3168
3169/**
3170 * absent_pages_in_range - Return number of page frames in holes within a range
3171 * @start_pfn: The start PFN to start searching for holes
3172 * @end_pfn: The end PFN to stop searching for holes
3173 *
3174 * It returns the number of pages frames in memory holes within a range.
3175 */
3176unsigned long __init absent_pages_in_range(unsigned long start_pfn,
3177                                                        unsigned long end_pfn)
3178{
3179        return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
3180}
3181
3182/* Return the number of page frames in holes in a zone on a node */
3183static unsigned long __meminit zone_absent_pages_in_node(int nid,
3184                                        unsigned long zone_type,
3185                                        unsigned long *ignored)
3186{
3187        unsigned long node_start_pfn, node_end_pfn;
3188        unsigned long zone_start_pfn, zone_end_pfn;
3189
3190        get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3191        zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
3192                                                        node_start_pfn);
3193        zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
3194                                                        node_end_pfn);
3195
3196        adjust_zone_range_for_zone_movable(nid, zone_type,
3197                        node_start_pfn, node_end_pfn,
3198                        &zone_start_pfn, &zone_end_pfn);
3199        return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
3200}
3201
3202#else
3203static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
3204                                        unsigned long zone_type,
3205                                        unsigned long *zones_size)
3206{
3207        return zones_size[zone_type];
3208}
3209
3210static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
3211                                                unsigned long zone_type,
3212                                                unsigned long *zholes_size)
3213{
3214        if (!zholes_size)
3215                return 0;
3216
3217        return zholes_size[zone_type];
3218}
3219
3220#endif
3221
3222static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
3223                unsigned long *zones_size, unsigned long *zholes_size)
3224{
3225        unsigned long realtotalpages, totalpages = 0;
3226        enum zone_type i;
3227
3228        for (i = 0; i < MAX_NR_ZONES; i++)
3229                totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
3230                                                                zones_size);
3231        pgdat->node_spanned_pages = totalpages;
3232
3233        realtotalpages = totalpages;
3234        for (i = 0; i < MAX_NR_ZONES; i++)
3235                realtotalpages -=
3236                        zone_absent_pages_in_node(pgdat->node_id, i,
3237                                                                zholes_size);
3238        pgdat->node_present_pages = realtotalpages;
3239        printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
3240                                                        realtotalpages);
3241}
3242
3243#ifndef CONFIG_SPARSEMEM
3244/*
3245 * Calculate the size of the zone->blockflags rounded to an unsigned long
3246 * Start by making sure zonesize is a multiple of pageblock_order by rounding
3247 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
3248 * round what is now in bits to nearest long in bits, then return it in
3249 * bytes.
3250 */
3251static unsigned long __init usemap_size(unsigned long zonesize)
3252{
3253        unsigned long usemapsize;
3254
3255        usemapsize = roundup(zonesize, pageblock_nr_pages);
3256        usemapsize = usemapsize >> pageblock_order;
3257        usemapsize *= NR_PAGEBLOCK_BITS;
3258        usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
3259
3260        return usemapsize / 8;
3261}
3262
3263static void __init setup_usemap(struct pglist_data *pgdat,
3264                                struct zone *zone, unsigned long zonesize)
3265{
3266        unsigned long usemapsize = usemap_size(zonesize);
3267        zone->pageblock_flags = NULL;
3268        if (usemapsize) {
3269                zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
3270                memset(zone->pageblock_flags, 0, usemapsize);
3271        }
3272}
3273#else
3274static void inline setup_usemap(struct pglist_data *pgdat,
3275                                struct zone *zone, unsigned long zonesize) {}
3276#endif /* CONFIG_SPARSEMEM */
3277
3278#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
3279
3280/* Return a sensible default order for the pageblock size. */
3281static inline int pageblock_default_order(void)
3282{
3283        if (HPAGE_SHIFT > PAGE_SHIFT)
3284                return HUGETLB_PAGE_ORDER;
3285
3286        return MAX_ORDER-1;
3287}
3288
3289/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
3290static inline void __init set_pageblock_order(unsigned int order)
3291{
3292        /* Check that pageblock_nr_pages has not already been setup */
3293        if (pageblock_order)
3294                return;
3295
3296        /*
3297         * Assume the largest contiguous order of interest is a huge page.
3298         * This value may be variable depending on boot parameters on IA64
3299         */
3300        pageblock_order = order;
3301}
3302#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3303
3304/*
3305 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
3306 * and pageblock_default_order() are unused as pageblock_order is set
3307 * at compile-time. See include/linux/pageblock-flags.h for the values of
3308 * pageblock_order based on the kernel config
3309 */
3310static inline int pageblock_default_order(unsigned int order)
3311{
3312        return MAX_ORDER-1;
3313}
3314#define set_pageblock_order(x)  do {} while (0)
3315
3316#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3317
3318/*
3319 * Set up the zone data structures:
3320 *   - mark all pages reserved
3321 *   - mark all memory queues empty
3322 *   - clear the memory bitmaps
3323 */
3324static void __meminit free_area_init_core(struct pglist_data *pgdat,
3325                unsigned long *zones_size, unsigned long *zholes_size)
3326{
3327        enum zone_type j;
3328        int nid = pgdat->node_id;
3329        unsigned long zone_start_pfn = pgdat->node_start_pfn;
3330        int ret;
3331
3332        pgdat_resize_init(pgdat);
3333        pgdat->nr_zones = 0;
3334        init_waitqueue_head(&pgdat->kswapd_wait);
3335        pgdat->kswapd_max_order = 0;
3336        
3337        for (j = 0; j < MAX_NR_ZONES; j++) {
3338                struct zone *zone = pgdat->node_zones + j;
3339                unsigned long size, realsize, memmap_pages;
3340
3341                size = zone_spanned_pages_in_node(nid, j, zones_size);
3342                realsize = size - zone_absent_pages_in_node(nid, j,
3343                                                                zholes_size);
3344
3345                /*
3346                 * Adjust realsize so that it accounts for how much memory
3347                 * is used by this zone for memmap. This affects the watermark
3348                 * and per-cpu initialisations
3349                 */
3350                memmap_pages = (size * sizeof(struct page)) >> PAGE_SHIFT;
3351                if (realsize >= memmap_pages) {
3352                        realsize -= memmap_pages;
3353                        printk(KERN_DEBUG
3354                                "  %s zone: %lu pages used for memmap\n",
3355                                zone_names[j], memmap_pages);
3356                } else
3357                        printk(KERN_WARNING
3358                                "  %s zone: %lu pages exceeds realsize %lu\n",
3359                                zone_names[j], memmap_pages, realsize);
3360
3361                /* Account for reserved pages */
3362                if (j == 0 && realsize > dma_reserve) {
3363                        realsize -= dma_reserve;
3364                        printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
3365                                        zone_names[0], dma_reserve);
3366                }
3367
3368                if (!is_highmem_idx(j))
3369                        nr_kernel_pages += realsize;
3370                nr_all_pages += realsize;
3371
3372                zone->spanned_pages = size;
3373                zone->present_pages = realsize;
3374#ifdef CONFIG_NUMA
3375                zone->node = nid;
3376                zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
3377                                                / 100;
3378                zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
3379#endif
3380                zone->name = zone_names[j];
3381                spin_lock_init(&zone->lock);
3382                spin_lock_init(&zone->lru_lock);
3383                zone_seqlock_init(zone);
3384                zone->zone_pgdat = pgdat;
3385
3386                zone->prev_priority = DEF_PRIORITY;
3387
3388                zone_pcp_init(zone);
3389                INIT_LIST_HEAD(&zone->active_list);
3390                INIT_LIST_HEAD(&zone->inactive_list);
3391                zone->nr_scan_active = 0;
3392                zone->nr_scan_inactive = 0;
3393                zap_zone_vm_stats(zone);
3394                zone->flags = 0;
3395                if (!size)
3396                        continue;
3397
3398                set_pageblock_order(pageblock_default_order());
3399                setup_usemap(pgdat, zone, size);
3400                ret = init_currently_empty_zone(zone, zone_start_pfn,
3401                                                size, MEMMAP_EARLY);
3402                BUG_ON(ret);
3403                zone_start_pfn += size;
3404        }
3405}
3406
3407static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
3408{
3409        /* Skip empty nodes */
3410        if (!pgdat->node_spanned_pages)
3411                return;
3412
3413#ifdef CONFIG_FLAT_NODE_MEM_MAP
3414        /* ia64 gets its own node_mem_map, before this, without bootmem */
3415        if (!pgdat->node_mem_map) {
3416                unsigned long size, start, end;
3417                struct page *map;
3418
3419                /*
3420                 * The zone's endpoints aren't required to be MAX_ORDER
3421                 * aligned but the node_mem_map endpoints must be in order
3422                 * for the buddy allocator to function correctly.
3423                 */
3424                start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
3425                end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
3426                end = ALIGN(end, MAX_ORDER_NR_PAGES);
3427                size =  (end - start) * sizeof(struct page);
3428                map = alloc_remap(pgdat->node_id, size);
3429                if (!map)
3430                        map = alloc_bootmem_node(pgdat, size);
3431                pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
3432        }
3433#ifndef CONFIG_NEED_MULTIPLE_NODES
3434        /*
3435         * With no DISCONTIG, the global mem_map is just set as node 0's
3436         */
3437        if (pgdat == NODE_DATA(0)) {
3438                mem_map = NODE_DATA(0)->node_mem_map;
3439#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3440                if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
3441                        mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
3442#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
3443        }
3444#endif
3445#endif /* CONFIG_FLAT_NODE_MEM_MAP */
3446}
3447
3448void __meminit free_area_init_node(int nid, struct pglist_data *pgdat,
3449                unsigned long *zones_size, unsigned long node_start_pfn,
3450                unsigned long *zholes_size)
3451{
3452        pgdat->node_id = nid;
3453        pgdat->node_start_pfn = node_start_pfn;
3454        calculate_node_totalpages(pgdat, zones_size, zholes_size);
3455
3456        alloc_node_mem_map(pgdat);
3457
3458        free_area_init_core(pgdat, zones_size, zholes_size);
3459}
3460
3461#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3462
3463#if MAX_NUMNODES > 1
3464/*
3465 * Figure out the number of possible node ids.
3466 */
3467static void __init setup_nr_node_ids(void)
3468{
3469        unsigned int node;
3470        unsigned int highest = 0;
3471
3472        for_each_node_mask(node, node_possible_map)
3473                highest = node;
3474        nr_node_ids = highest + 1;
3475}
3476#else
3477static inline void setup_nr_node_ids(void)
3478{
3479}
3480#endif
3481
3482/**
3483 * add_active_range - Register a range of PFNs backed by physical memory
3484 * @nid: The node ID the range resides on
3485 * @start_pfn: The start PFN of the available physical memory
3486 * @end_pfn: The end PFN of the available physical memory
3487 *
3488 * These ranges are stored in an early_node_map[] and later used by
3489 * free_area_init_nodes() to calculate zone sizes and holes. If the
3490 * range spans a memory hole, it is up to the architecture to ensure
3491 * the memory is not freed by the bootmem allocator. If possible
3492 * the range being registered will be merged with existing ranges.
3493 */
3494void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3495                                                unsigned long end_pfn)
3496{
3497        int i;
3498
3499        printk(KERN_DEBUG "Entering add_active_range(%d, %lu, %lu) "
3500                          "%d entries of %d used\n",
3501                          nid, start_pfn, end_pfn,
3502                          nr_nodemap_entries, MAX_ACTIVE_REGIONS);
3503
3504        /* Merge with existing active regions if possible */
3505        for (i = 0; i < nr_nodemap_entries; i++) {
3506                if (early_node_map[i].nid != nid)
3507                        continue;
3508
3509                /* Skip if an existing region covers this new one */
3510                if (start_pfn >= early_node_map[i].start_pfn &&
3511                                end_pfn <= early_node_map[i].end_pfn)
3512                        return;
3513
3514                /* Merge forward if suitable */
3515                if (start_pfn <= early_node_map[i].end_pfn &&
3516                                end_pfn > early_node_map[i].end_pfn) {
3517                        early_node_map[i].end_pfn = end_pfn;
3518                        return;
3519                }
3520
3521                /* Merge backward if suitable */
3522                if (start_pfn < early_node_map[i].end_pfn &&
3523                                end_pfn >= early_node_map[i].start_pfn) {
3524                        early_node_map[i].start_pfn = start_pfn;
3525                        return;
3526                }
3527        }
3528
3529        /* Check that early_node_map is large enough */
3530        if (i >= MAX_ACTIVE_REGIONS) {
3531                printk(KERN_CRIT "More than %d memory regions, truncating\n",
3532                                                        MAX_ACTIVE_REGIONS);
3533                return;
3534        }
3535
3536        early_node_map[i].nid = nid;
3537        early_node_map[i].start_pfn = start_pfn;
3538        early_node_map[i].end_pfn = end_pfn;
3539        nr_nodemap_entries = i + 1;
3540}
3541
3542/**
3543 * shrink_active_range - Shrink an existing registered range of PFNs
3544 * @nid: The node id the range is on that should be shrunk
3545 * @old_end_pfn: The old end PFN of the range
3546 * @new_end_pfn: The new PFN of the range
3547 *
3548 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
3549 * The map is kept at the end physical page range that has already been
3550 * registered with add_active_range(). This function allows an arch to shrink
3551 * an existing registered range.
3552 */
3553void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn,
3554                                                unsigned long new_end_pfn)
3555{
3556        int i;
3557
3558        /* Find the old active region end and shrink */
3559        for_each_active_range_index_in_nid(i, nid)
3560                if (early_node_map[i].end_pfn == old_end_pfn) {
3561                        early_node_map[i].end_pfn = new_end_pfn;
3562                        break;
3563                }
3564}
3565
3566/**
3567 * remove_all_active_ranges - Remove all currently registered regions
3568 *
3569 * During discovery, it may be found that a table like SRAT is invalid
3570 * and an alternative discovery method must be used. This function removes
3571 * all currently registered regions.
3572 */
3573void __init remove_all_active_ranges(void)
3574{
3575        memset(early_node_map, 0, sizeof(early_node_map));
3576        nr_nodemap_entries = 0;
3577#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
3578        memset(node_boundary_start_pfn, 0, sizeof(node_boundary_start_pfn));
3579        memset(node_boundary_end_pfn, 0, sizeof(node_boundary_end_pfn));
3580#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
3581}
3582
3583/* Compare two active node_active_regions */
3584static int __init cmp_node_active_region(const void *a, const void *b)
3585{
3586        struct node_active_region *arange = (struct node_active_region *)a;
3587        struct node_active_region *brange = (struct node_active_region *)b;
3588
3589        /* Done this way to avoid overflows */
3590        if (arange->start_pfn > brange->start_pfn)
3591                return 1;
3592        if (arange->start_pfn < brange->start_pfn)
3593                return -1;
3594
3595        return 0;
3596}
3597
3598/* sort the node_map by start_pfn */
3599static void __init sort_node_map(void)
3600{
3601        sort(early_node_map, (size_t)nr_nodemap_entries,
3602                        sizeof(struct node_active_region),
3603                        cmp_node_active_region, NULL);
3604}
3605
3606/* Find the lowest pfn for a node */
3607unsigned long __init find_min_pfn_for_node(unsigned long nid)
3608{
3609        int i;
3610        unsigned long min_pfn = ULONG_MAX;
3611
3612        /* Assuming a sorted map, the first range found has the starting pfn */
3613        for_each_active_range_index_in_nid(i, nid)
3614                min_pfn = min(min_pfn, early_node_map[i].start_pfn);
3615
3616        if (min_pfn == ULONG_MAX) {
3617                printk(KERN_WARNING
3618                        "Could not find start_pfn for node %lu\n", nid);
3619                return 0;
3620        }
3621
3622        return min_pfn;
3623}
3624
3625/**
3626 * find_min_pfn_with_active_regions - Find the minimum PFN registered
3627 *
3628 * It returns the minimum PFN based on information provided via
3629 * add_active_range().
3630 */
3631unsigned long __init find_min_pfn_with_active_regions(void)
3632{
3633        return find_min_pfn_for_node(MAX_NUMNODES);
3634}
3635
3636/**
3637 * find_max_pfn_with_active_regions - Find the maximum PFN registered
3638 *
3639 * It returns the maximum PFN based on information provided via
3640 * add_active_range().
3641 */
3642unsigned long __init find_max_pfn_with_active_regions(void)
3643{
3644        int i;
3645        unsigned long max_pfn = 0;
3646
3647        for (i = 0; i < nr_nodemap_entries; i++)
3648                max_pfn = max(max_pfn, early_node_map[i].end_pfn);
3649
3650        return max_pfn;
3651}
3652
3653/*
3654 * early_calculate_totalpages()
3655 * Sum pages in active regions for movable zone.
3656 * Populate N_HIGH_MEMORY for calculating usable_nodes.
3657 */
3658static unsigned long __init early_calculate_totalpages(void)
3659{
3660        int i;
3661        unsigned long totalpages = 0;
3662
3663        for (i = 0; i < nr_nodemap_entries; i++) {
3664                unsigned long pages = early_node_map[i].end_pfn -
3665                                                early_node_map[i].start_pfn;
3666                totalpages += pages;
3667                if (pages)
3668                        node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
3669        }
3670        return totalpages;
3671}
3672
3673/*
3674 * Find the PFN the Movable zone begins in each node. Kernel memory
3675 * is spread evenly between nodes as long as the nodes have enough
3676 * memory. When they don't, some nodes will have more kernelcore than
3677 * others
3678 */
3679void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
3680{
3681        int i, nid;
3682        unsigned long usable_startpfn;
3683        unsigned long kernelcore_node, kernelcore_remaining;
3684        unsigned long totalpages = early_calculate_totalpages();
3685        int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
3686
3687        /*
3688         * If movablecore was specified, calculate what size of
3689         * kernelcore that corresponds so that memory usable for
3690         * any allocation type is evenly spread. If both kernelcore
3691         * and movablecore are specified, then the value of kernelcore
3692         * will be used for required_kernelcore if it's greater than
3693         * what movablecore would have allowed.
3694         */
3695        if (required_movablecore) {
3696                unsigned long corepages;
3697
3698                /*
3699                 * Round-up so that ZONE_MOVABLE is at least as large as what
3700                 * was requested by the user
3701                 */
3702                required_movablecore =
3703                        roundup(required_movablecore, MAX_ORDER_NR_PAGES);
3704                corepages = totalpages - required_movablecore;
3705
3706                required_kernelcore = max(required_kernelcore, corepages);
3707        }
3708
3709        /* If kernelcore was not specified, there is no ZONE_MOVABLE */
3710        if (!required_kernelcore)
3711                return;
3712
3713        /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
3714        find_usable_zone_for_movable();
3715        usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
3716
3717restart:
3718        /* Spread kernelcore memory as evenly as possible throughout nodes */
3719        kernelcore_node = required_kernelcore / usable_nodes;
3720        for_each_node_state(nid, N_HIGH_MEMORY) {
3721                /*
3722                 * Recalculate kernelcore_node if the division per node
3723                 * now exceeds what is necessary to satisfy the requested
3724                 * amount of memory for the kernel
3725                 */
3726                if (required_kernelcore < kernelcore_node)
3727                        kernelcore_node = required_kernelcore / usable_nodes;
3728
3729                /*
3730                 * As the map is walked, we track how much memory is usable
3731                 * by the kernel using kernelcore_remaining. When it is
3732                 * 0, the rest of the node is usable by ZONE_MOVABLE
3733                 */
3734                kernelcore_remaining = kernelcore_node;
3735
3736                /* Go through each range of PFNs within this node */
3737                for_each_active_range_index_in_nid(i, nid) {
3738                        unsigned long start_pfn, end_pfn;
3739                        unsigned long size_pages;
3740
3741                        start_pfn = max(early_node_map[i].start_pfn,
3742                                                zone_movable_pfn[nid]);
3743                        end_pfn = early_node_map[i].end_pfn;
3744                        if (start_pfn >= end_pfn)
3745                                continue;
3746
3747                        /* Account for what is only usable for kernelcore */
3748                        if (start_pfn < usable_startpfn) {
3749                                unsigned long kernel_pages;
3750                                kernel_pages = min(end_pfn, usable_startpfn)
3751                                                                - start_pfn;
3752
3753                                kernelcore_remaining -= min(kernel_pages,
3754                                                        kernelcore_remaining);
3755                                required_kernelcore -= min(kernel_pages,
3756                                                        required_kernelcore);
3757
3758                                /* Continue if range is now fully accounted */
3759                                if (end_pfn <= usable_startpfn) {
3760
3761                                        /*
3762                                         * Push zone_movable_pfn to the end so
3763                                         * that if we have to rebalance
3764                                         * kernelcore across nodes, we will
3765                                         * not double account here
3766                                         */
3767                                        zone_movable_pfn[nid] = end_pfn;
3768                                        continue;
3769                                }
3770                                start_pfn = usable_startpfn;
3771                        }
3772
3773                        /*
3774                         * The usable PFN range for ZONE_MOVABLE is from
3775                         * start_pfn->end_pfn. Calculate size_pages as the
3776                         * number of pages used as kernelcore
3777                         */
3778                        size_pages = end_pfn - start_pfn;
3779                        if (size_pages > kernelcore_remaining)
3780                                size_pages = kernelcore_remaining;
3781                        zone_movable_pfn[nid] = start_pfn + size_pages;
3782
3783                        /*
3784                         * Some kernelcore has been met, update counts and
3785                         * break if the kernelcore for this node has been
3786                         * satisified
3787                         */
3788                        required_kernelcore -= min(required_kernelcore,
3789                                                                size_pages);
3790                        kernelcore_remaining -= size_pages;
3791                        if (!kernelcore_remaining)
3792                                break;
3793                }
3794        }
3795
3796        /*
3797         * If there is still required_kernelcore, we do another pass with one
3798         * less node in the count. This will push zone_movable_pfn[nid] further
3799         * along on the nodes that still have memory until kernelcore is
3800         * satisified
3801         */
3802        usable_nodes--;
3803        if (usable_nodes && required_kernelcore > usable_nodes)
3804                goto restart;
3805
3806        /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
3807        for (nid = 0; nid < MAX_NUMNODES; nid++)
3808                zone_movable_pfn[nid] =
3809                        roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
3810}
3811
3812/* Any regular memory on that node ? */
3813static void check_for_regular_memory(pg_data_t *pgdat)
3814{
3815#ifdef CONFIG_HIGHMEM
3816        enum zone_type zone_type;
3817
3818        for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
3819                struct zone *zone = &pgdat->node_zones[zone_type];
3820                if (zone->present_pages)
3821                        node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
3822        }
3823#endif
3824}
3825
3826/**
3827 * free_area_init_nodes - Initialise all pg_data_t and zone data
3828 * @max_zone_pfn: an array of max PFNs for each zone
3829 *
3830 * This will call free_area_init_node() for each active node in the system.
3831 * Using the page ranges provided by add_active_range(), the size of each
3832 * zone in each node and their holes is calculated. If the maximum PFN
3833 * between two adjacent zones match, it is assumed that the zone is empty.
3834 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
3835 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
3836 * starts where the previous one ended. For example, ZONE_DMA32 starts
3837 * at arch_max_dma_pfn.
3838 */
3839void __init free_area_init_nodes(unsigned long *max_zone_pfn)
3840{
3841        unsigned long nid;
3842        enum zone_type i;
3843
3844        /* Sort early_node_map as initialisation assumes it is sorted */
3845        sort_node_map();
3846
3847        /* Record where the zone boundaries are */
3848        memset(arch_zone_lowest_possible_pfn, 0,
3849                                sizeof(arch_zone_lowest_possible_pfn));
3850        memset(arch_zone_highest_possible_pfn, 0,
3851                                sizeof(arch_zone_highest_possible_pfn));
3852        arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
3853        arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
3854        for (i = 1; i < MAX_NR_ZONES; i++) {
3855                if (i == ZONE_MOVABLE)
3856                        continue;
3857                arch_zone_lowest_possible_pfn[i] =
3858                        arch_zone_highest_possible_pfn[i-1];
3859                arch_zone_highest_possible_pfn[i] =
3860                        max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
3861        }
3862        arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
3863        arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
3864
3865        /* Find the PFNs that ZONE_MOVABLE begins at in each node */
3866        memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
3867        find_zone_movable_pfns_for_nodes(zone_movable_pfn);
3868
3869        /* Print out the zone ranges */
3870        printk("Zone PFN ranges:\n");
3871        for (i = 0; i < MAX_NR_ZONES; i++) {
3872                if (i == ZONE_MOVABLE)
3873                        continue;
3874                printk("  %-8s %8lu -> %8lu\n",
3875                                zone_names[i],
3876                                arch_zone_lowest_possible_pfn[i],
3877                                arch_zone_highest_possible_pfn[i]);
3878        }
3879
3880        /* Print out the PFNs ZONE_MOVABLE begins at in each node */
3881        printk("Movable zone start PFN for each node\n");
3882        for (i = 0; i < MAX_NUMNODES; i++) {
3883                if (zone_movable_pfn[i])
3884                        printk("  Node %d: %lu\n", i, zone_movable_pfn[i]);
3885        }
3886
3887        /* Print out the early_node_map[] */
3888        printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
3889        for (i = 0; i < nr_nodemap_entries; i++)
3890                printk("  %3d: %8lu -> %8lu\n", early_node_map[i].nid,
3891                                                early_node_map[i].start_pfn,
3892                                                early_node_map[i].end_pfn);
3893
3894        /* Initialise every node */
3895        setup_nr_node_ids();
3896        for_each_online_node(nid) {
3897                pg_data_t *pgdat = NODE_DATA(nid);
3898                free_area_init_node(nid, pgdat, NULL,
3899                                find_min_pfn_for_node(nid), NULL);
3900
3901                /* Any memory on that node */
3902                if (pgdat->node_present_pages)
3903                        node_set_state(nid, N_HIGH_MEMORY);
3904                check_for_regular_memory(pgdat);
3905        }
3906}
3907
3908static int __init cmdline_parse_core(char *p, unsigned long *core)
3909{
3910        unsigned long long coremem;
3911        if (!p)
3912                return -EINVAL;
3913
3914        coremem = memparse(p, &p);
3915        *core = coremem >> PAGE_SHIFT;
3916
3917        /* Paranoid check that UL is enough for the coremem value */
3918        WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
3919
3920        return 0;
3921}
3922
3923/*
3924 * kernelcore=size sets the amount of memory for use for allocations that
3925 * cannot be reclaimed or migrated.
3926 */
3927static int __init cmdline_parse_kernelcore(char *p)
3928{
3929        return cmdline_parse_core(p, &required_kernelcore);
3930}
3931
3932/*
3933 * movablecore=size sets the amount of memory for use for allocations that
3934 * can be reclaimed or migrated.
3935 */
3936static int __init cmdline_parse_movablecore(char *p)
3937{
3938        return cmdline_parse_core(p, &required_movablecore);
3939}
3940
3941early_param("kernelcore", cmdline_parse_kernelcore);
3942early_param("movablecore", cmdline_parse_movablecore);
3943
3944#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
3945
3946/**
3947 * set_dma_reserve - set the specified number of pages reserved in the first zone
3948 * @new_dma_reserve: The number of pages to mark reserved
3949 *
3950 * The per-cpu batchsize and zone watermarks are determined by present_pages.
3951 * In the DMA zone, a significant percentage may be consumed by kernel image
3952 * and other unfreeable allocations which can skew the watermarks badly. This
3953 * function may optionally be used to account for unfreeable pages in the
3954 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
3955 * smaller per-cpu batchsize.
3956 */
3957void __init set_dma_reserve(unsigned long new_dma_reserve)
3958{
3959        dma_reserve = new_dma_reserve;
3960}
3961
3962#ifndef CONFIG_NEED_MULTIPLE_NODES
3963static bootmem_data_t contig_bootmem_data;
3964struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data };
3965
3966EXPORT_SYMBOL(contig_page_data);
3967#endif
3968
3969void __init free_area_init(unsigned long *zones_size)
3970{
3971        free_area_init_node(0, NODE_DATA(0), zones_size,
3972                        __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
3973}
3974
3975static int page_alloc_cpu_notify(struct notifier_block *self,
3976                                 unsigned long action, void *hcpu)
3977{
3978        int cpu = (unsigned long)hcpu;
3979
3980        if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
3981                local_irq_disable();
3982                __drain_pages(cpu);
3983                vm_events_fold_cpu(cpu);
3984                local_irq_enable();
3985                refresh_cpu_vm_stats(cpu);
3986        }
3987        return NOTIFY_OK;
3988}
3989
3990void __init page_alloc_init(void)
3991{
3992        hotcpu_notifier(page_alloc_cpu_notify, 0);
3993}
3994
3995/*
3996 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
3997 *      or min_free_kbytes changes.
3998 */
3999static void calculate_totalreserve_pages(void)
4000{
4001        struct pglist_data *pgdat;
4002        unsigned long reserve_pages = 0;
4003        enum zone_type i, j;
4004
4005        for_each_online_pgdat(pgdat) {
4006                for (i = 0; i < MAX_NR_ZONES; i++) {
4007                        struct zone *zone = pgdat->node_zones + i;
4008                        unsigned long max = 0;
4009
4010                        /* Find valid and maximum lowmem_reserve in the zone */
4011                        for (j = i; j < MAX_NR_ZONES; j++) {
4012                                if (zone->lowmem_reserve[j] > max)
4013                                        max = zone->lowmem_reserve[j];
4014                        }
4015
4016                        /* we treat pages_high as reserved pages. */
4017                        max += zone->pages_high;
4018
4019                        if (max > zone->present_pages)
4020                                max = zone->present_pages;
4021                        reserve_pages += max;
4022                }
4023        }
4024        totalreserve_pages = reserve_pages;
4025}
4026
4027/*
4028 * setup_per_zone_lowmem_reserve - called whenever
4029 *      sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone
4030 *      has a correct pages reserved value, so an adequate number of
4031 *      pages are left in the zone after a successful __alloc_pages().
4032 */
4033static void setup_per_zone_lowmem_reserve(void)
4034{
4035        struct pglist_data *pgdat;
4036        enum zone_type j, idx;
4037
4038        for_each_online_pgdat(pgdat) {
4039                for (j = 0; j < MAX_NR_ZONES; j++) {
4040                        struct zone *zone = pgdat->node_zones + j;
4041                        unsigned long present_pages = zone->present_pages;
4042
4043                        zone->lowmem_reserve[j] = 0;
4044
4045                        idx = j;
4046                        while (idx) {
4047                                struct zone *lower_zone;
4048
4049                                idx--;
4050
4051                                if (sysctl_lowmem_reserve_ratio[idx] < 1)
4052                                        sysctl_lowmem_reserve_ratio[idx] = 1;
4053
4054                                lower_zone = pgdat->node_zones + idx;
4055                                lower_zone->lowmem_reserve[j] = present_pages /
4056                                        sysctl_lowmem_reserve_ratio[idx];
4057                                present_pages += lower_zone->present_pages;
4058                        }
4059                }
4060        }
4061
4062        /* update totalreserve_pages */
4063        calculate_totalreserve_pages();
4064}
4065
4066/**
4067 * setup_per_zone_pages_min - called when min_free_kbytes changes.
4068 *
4069 * Ensures that the pages_{min,low,high} values for each zone are set correctly
4070 * with respect to min_free_kbytes.
4071 */
4072void setup_per_zone_pages_min(void)
4073{
4074        unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
4075        unsigned long lowmem_pages = 0;
4076        struct zone *zone;
4077        unsigned long flags;
4078
4079        /* Calculate total number of !ZONE_HIGHMEM pages */
4080        for_each_zone(zone) {
4081                if (!is_highmem(zone))
4082                        lowmem_pages += zone->present_pages;
4083        }
4084
4085        for_each_zone(zone) {
4086                u64 tmp;
4087
4088                spin_lock_irqsave(&zone->lru_lock, flags);
4089                tmp = (u64)pages_min * zone->present_pages;
4090                do_div(tmp, lowmem_pages);
4091                if (is_highmem(zone)) {
4092                        /*
4093                         * __GFP_HIGH and PF_MEMALLOC allocations usually don't
4094                         * need highmem pages, so cap pages_min to a small
4095                         * value here.
4096                         *
4097                         * The (pages_high-pages_low) and (pages_low-pages_min)
4098                         * deltas controls asynch page reclaim, and so should
4099                         * not be capped for highmem.
4100                         */
4101                        int min_pages;
4102
4103                        min_pages = zone->present_pages / 1024;
4104                        if (min_pages < SWAP_CLUSTER_MAX)
4105                                min_pages = SWAP_CLUSTER_MAX;
4106                        if (min_pages > 128)
4107                                min_pages = 128;
4108                        zone->pages_min = min_pages;
4109                } else {
4110                        /*
4111                         * If it's a lowmem zone, reserve a number of pages
4112                         * proportionate to the zone's size.
4113                         */
4114                        zone->pages_min = tmp;
4115                }
4116
4117                zone->pages_low   = zone->pages_min + (tmp >> 2);
4118                zone->pages_high  = zone->pages_min + (tmp >> 1);
4119                setup_zone_migrate_reserve(zone);
4120                spin_unlock_irqrestore(&zone->lru_lock, flags);
4121        }
4122
4123        /* update totalreserve_pages */
4124        calculate_totalreserve_pages();
4125}
4126
4127/*
4128 * Initialise min_free_kbytes.
4129 *
4130 * For small machines we want it small (128k min).  For large machines
4131 * we want it large (64MB max).  But it is not linear, because network
4132 * bandwidth does not increase linearly with machine size.  We use
4133 *
4134 *      min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
4135 *      min_free_kbytes = sqrt(lowmem_kbytes * 16)
4136 *
4137 * which yields
4138 *
4139 * 16MB:        512k
4140 * 32MB:        724k
4141 * 64MB:        1024k
4142 * 128MB:       1448k
4143 * 256MB:       2048k
4144 * 512MB:       2896k
4145 * 1024MB:      4096k
4146 * 2048MB:      5792k
4147 * 4096MB:      8192k
4148 * 8192MB:      11584k
4149 * 16384MB:     16384k
4150 */
4151static int __init init_per_zone_pages_min(void)
4152{
4153        unsigned long lowmem_kbytes;
4154
4155        lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
4156
4157        min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
4158        if (min_free_kbytes < 128)
4159                min_free_kbytes = 128;
4160        if (min_free_kbytes > 65536)
4161                min_free_kbytes = 65536;
4162        setup_per_zone_pages_min();
4163        setup_per_zone_lowmem_reserve();
4164        return 0;
4165}
4166module_init(init_per_zone_pages_min)
4167
4168/*
4169 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 
4170 *      that we can call two helper functions whenever min_free_kbytes
4171 *      changes.
4172 */
4173int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 
4174        struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4175{
4176        proc_dointvec(table, write, file, buffer, length, ppos);
4177        if (write)
4178                setup_per_zone_pages_min();
4179        return 0;
4180}
4181
4182#ifdef CONFIG_NUMA
4183int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
4184        struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4185{
4186        struct zone *zone;
4187        int rc;
4188
4189        rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4190        if (rc)
4191                return rc;
4192
4193        for_each_zone(zone)
4194                zone->min_unmapped_pages = (zone->present_pages *
4195                                sysctl_min_unmapped_ratio) / 100;
4196        return 0;
4197}
4198
4199int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
4200        struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4201{
4202        struct zone *zone;
4203        int rc;
4204
4205        rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4206        if (rc)
4207                return rc;
4208
4209        for_each_zone(zone)
4210                zone->min_slab_pages = (zone->present_pages *
4211                                sysctl_min_slab_ratio) / 100;
4212        return 0;
4213}
4214#endif
4215
4216/*
4217 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
4218 *      proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
4219 *      whenever sysctl_lowmem_reserve_ratio changes.
4220 *
4221 * The reserve ratio obviously has absolutely no relation with the
4222 * pages_min watermarks. The lowmem reserve ratio can only make sense
4223 * if in function of the boot time zone sizes.
4224 */
4225int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
4226        struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4227{
4228        proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4229        setup_per_zone_lowmem_reserve();
4230        return 0;
4231}
4232
4233/*
4234 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
4235 * cpu.  It is the fraction of total pages in each zone that a hot per cpu pagelist
4236 * can have before it gets flushed back to buddy allocator.
4237 */
4238
4239int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
4240        struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4241{
4242        struct zone *zone;
4243        unsigned int cpu;
4244        int ret;
4245
4246        ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4247        if (!write || (ret == -EINVAL))
4248                return ret;
4249        for_each_zone(zone) {
4250                for_each_online_cpu(cpu) {
4251                        unsigned long  high;
4252                        high = zone->present_pages / percpu_pagelist_fraction;
4253                        setup_pagelist_highmark(zone_pcp(zone, cpu), high);
4254                }
4255        }
4256        return 0;
4257}
4258
4259int hashdist = HASHDIST_DEFAULT;
4260
4261#ifdef CONFIG_NUMA
4262static int __init set_hashdist(char *str)
4263{
4264        if (!str)
4265                return 0;
4266        hashdist = simple_strtoul(str, &str, 0);
4267        return 1;
4268}
4269__setup("hashdist=", set_hashdist);
4270#endif
4271
4272/*
4273 * allocate a large system hash table from bootmem
4274 * - it is assumed that the hash table must contain an exact power-of-2
4275 *   quantity of entries
4276 * - limit is the number of hash buckets, not the total allocation size
4277 */
4278void *__init alloc_large_system_hash(const char *tablename,
4279                                     unsigned long bucketsize,
4280                                     unsigned long numentries,
4281                                     int scale,
4282                                     int flags,
4283                                     unsigned int *_hash_shift,
4284                                     unsigned int *_hash_mask,
4285                                     unsigned long limit)
4286{
4287        unsigned long long max = limit;
4288        unsigned long log2qty, size;
4289        void *table = NULL;
4290
4291        /* allow the kernel cmdline to have a say */
4292        if (!numentries) {
4293                /* round applicable memory size up to nearest megabyte */
4294                numentries = nr_kernel_pages;
4295                numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
4296                numentries >>= 20 - PAGE_SHIFT;
4297                numentries <<= 20 - PAGE_SHIFT;
4298
4299                /* limit to 1 bucket per 2^scale bytes of low memory */
4300                if (scale > PAGE_SHIFT)
4301                        numentries >>= (scale - PAGE_SHIFT);
4302                else
4303                        numentries <<= (PAGE_SHIFT - scale);
4304
4305                /* Make sure we've got at least a 0-order allocation.. */
4306                if (unlikely((numentries * bucketsize) < PAGE_SIZE))
4307                        numentries = PAGE_SIZE / bucketsize;
4308        }
4309        numentries = roundup_pow_of_two(numentries);
4310
4311        /* limit allocation size to 1/16 total memory by default */
4312        if (max == 0) {
4313                max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
4314                do_div(max, bucketsize);
4315        }
4316
4317        if (numentries > max)
4318                numentries = max;
4319
4320        log2qty = ilog2(numentries);
4321
4322        do {
4323                size = bucketsize << log2qty;
4324                if (flags & HASH_EARLY)
4325                        table = alloc_bootmem(size);
4326                else if (hashdist)
4327                        table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
4328                else {
4329                        unsigned long order;
4330                        for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++)
4331                                ;
4332                        table = (void*) __get_free_pages(GFP_ATOMIC, order);
4333                        /*
4334                         * If bucketsize is not a power-of-two, we may free
4335                         * some pages at the end of hash table.
4336                         */
4337                        if (table) {
4338                                unsigned long alloc_end = (unsigned long)table +
4339                                                (PAGE_SIZE << order);
4340                                unsigned long used = (unsigned long)table +
4341                                                PAGE_ALIGN(size);
4342                                split_page(virt_to_page(table), order);
4343                                while (used < alloc_end) {
4344                                        free_page(used);
4345                                        used += PAGE_SIZE;
4346                                }
4347                        }
4348                }
4349        } while (!table && size > PAGE_SIZE && --log2qty);
4350
4351        if (!table)
4352                panic("Failed to allocate %s hash table\n", tablename);
4353
4354        printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n",
4355               tablename,
4356               (1U << log2qty),
4357               ilog2(size) - PAGE_SHIFT,
4358               size);
4359
4360        if (_hash_shift)
4361                *_hash_shift = log2qty;
4362        if (_hash_mask)
4363                *_hash_mask = (1 << log2qty) - 1;
4364
4365        return table;
4366}
4367
4368#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE
4369struct page *pfn_to_page(unsigned long pfn)
4370{
4371        return __pfn_to_page(pfn);
4372}
4373unsigned long page_to_pfn(struct page *page)
4374{
4375        return __page_to_pfn(page);
4376}
4377EXPORT_SYMBOL(pfn_to_page);
4378EXPORT_SYMBOL(page_to_pfn);
4379#endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */
4380
4381/* Return a pointer to the bitmap storing bits affecting a block of pages */
4382static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
4383                                                        unsigned long pfn)
4384{
4385#ifdef CONFIG_SPARSEMEM
4386        return __pfn_to_section(pfn)->pageblock_flags;
4387#else
4388        return zone->pageblock_flags;
4389#endif /* CONFIG_SPARSEMEM */
4390}
4391
4392static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
4393{
4394#ifdef CONFIG_SPARSEMEM
4395        pfn &= (PAGES_PER_SECTION-1);
4396        return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4397#else
4398        pfn = pfn - zone->zone_start_pfn;
4399        return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4400#endif /* CONFIG_SPARSEMEM */
4401}
4402
4403/**
4404 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
4405 * @page: The page within the block of interest
4406 * @start_bitidx: The first bit of interest to retrieve
4407 * @end_bitidx: The last bit of interest
4408 * returns pageblock_bits flags
4409 */
4410unsigned long get_pageblock_flags_group(struct page *page,
4411                                        int start_bitidx, int end_bitidx)
4412{
4413        struct zone *zone;
4414        unsigned long *bitmap;
4415        unsigned long pfn, bitidx;
4416        unsigned long flags = 0;
4417        unsigned long value = 1;
4418
4419        zone = page_zone(page);
4420        pfn = page_to_pfn(page);
4421        bitmap = get_pageblock_bitmap(zone, pfn);
4422        bitidx = pfn_to_bitidx(zone, pfn);
4423
4424        for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4425                if (test_bit(bitidx + start_bitidx, bitmap))
4426                        flags |= value;
4427
4428        return flags;
4429}
4430
4431/**
4432 * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
4433 * @page: The page within the block of interest
4434 * @start_bitidx: The first bit of interest
4435 * @end_bitidx: The last bit of interest
4436 * @flags: The flags to set
4437 */
4438void set_pageblock_flags_group(struct page *page, unsigned long flags,
4439                                        int start_bitidx, int end_bitidx)
4440{
4441        struct zone *zone;
4442        unsigned long *bitmap;
4443        unsigned long pfn, bitidx;
4444        unsigned long value = 1;
4445
4446        zone = page_zone(page);
4447        pfn = page_to_pfn(page);
4448        bitmap = get_pageblock_bitmap(zone, pfn);
4449        bitidx = pfn_to_bitidx(zone, pfn);
4450
4451        for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4452                if (flags & value)
4453                        __set_bit(bitidx + start_bitidx, bitmap);
4454                else
4455                        __clear_bit(bitidx + start_bitidx, bitmap);
4456}
4457
4458/*
4459 * This is designed as sub function...plz see page_isolation.c also.
4460 * set/clear page block's type to be ISOLATE.
4461 * page allocater never alloc memory from ISOLATE block.
4462 */
4463
4464int set_migratetype_isolate(struct page *page)
4465{
4466        struct zone *zone;
4467        unsigned long flags;
4468        int ret = -EBUSY;
4469
4470        zone = page_zone(page);
4471        spin_lock_irqsave(&zone->lock, flags);
4472        /*
4473         * In future, more migrate types will be able to be isolation target.
4474         */
4475        if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
4476                goto out;
4477        set_pageblock_migratetype(page, MIGRATE_ISOLATE);
4478        move_freepages_block(zone, page, MIGRATE_ISOLATE);
4479        ret = 0;
4480out:
4481        spin_unlock_irqrestore(&zone->lock, flags);
4482        if (!ret)
4483                drain_all_local_pages();
4484        return ret;
4485}
4486
4487void unset_migratetype_isolate(struct page *page)
4488{
4489        struct zone *zone;
4490        unsigned long flags;
4491        zone = page_zone(page);
4492        spin_lock_irqsave(&zone->lock, flags);
4493        if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
4494                goto out;
4495        set_pageblock_migratetype(page, MIGRATE_MOVABLE);
4496        move_freepages_block(zone, page, MIGRATE_MOVABLE);
4497out:
4498        spin_unlock_irqrestore(&zone->lock, flags);
4499}
4500
4501#ifdef CONFIG_MEMORY_HOTREMOVE
4502/*
4503 * All pages in the range must be isolated before calling this.
4504 */
4505void
4506__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
4507{
4508        struct page *page;
4509        struct zone *zone;
4510        int order, i;
4511        unsigned long pfn;
4512        unsigned long flags;
4513        /* find the first valid pfn */
4514        for (pfn = start_pfn; pfn < end_pfn; pfn++)
4515                if (pfn_valid(pfn))
4516                        break;
4517        if (pfn == end_pfn)
4518                return;
4519        zone = page_zone(pfn_to_page(pfn));
4520        spin_lock_irqsave(&zone->lock, flags);
4521        pfn = start_pfn;
4522        while (pfn < end_pfn) {
4523                if (!pfn_valid(pfn)) {
4524                        pfn++;
4525                        continue;
4526                }
4527                page = pfn_to_page(pfn);
4528                BUG_ON(page_count(page));
4529                BUG_ON(!PageBuddy(page));
4530                order = page_order(page);
4531#ifdef CONFIG_DEBUG_VM
4532                printk(KERN_INFO "remove from free list %lx %d %lx\n",
4533                       pfn, 1 << order, end_pfn);
4534#endif
4535                list_del(&page->lru);
4536                rmv_page_order(page);
4537                zone->free_area[order].nr_free--;
4538                __mod_zone_page_state(zone, NR_FREE_PAGES,
4539                                      - (1UL << order));
4540                for (i = 0; i < (1 << order); i++)
4541                        SetPageReserved((page+i));
4542                pfn += (1 << order);
4543        }
4544        spin_unlock_irqrestore(&zone->lock, flags);
4545}
4546#endif
4547