linux/mm/vmscan.c
<<
>>
Prefs
   1/*
   2 *  linux/mm/vmscan.c
   3 *
   4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   5 *
   6 *  Swap reorganised 29.12.95, Stephen Tweedie.
   7 *  kswapd added: 7.1.96  sct
   8 *  Removed kswapd_ctl limits, and swap out as many pages as needed
   9 *  to bring the system back to freepages.high: 2.4.97, Rik van Riel.
  10 *  Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
  11 *  Multiqueue VM started 5.8.00, Rik van Riel.
  12 */
  13
  14#include <linux/mm.h>
  15#include <linux/module.h>
  16#include <linux/gfp.h>
  17#include <linux/kernel_stat.h>
  18#include <linux/swap.h>
  19#include <linux/pagemap.h>
  20#include <linux/init.h>
  21#include <linux/highmem.h>
  22#include <linux/vmstat.h>
  23#include <linux/file.h>
  24#include <linux/writeback.h>
  25#include <linux/blkdev.h>
  26#include <linux/buffer_head.h>  /* for try_to_release_page(),
  27                                        buffer_heads_over_limit */
  28#include <linux/mm_inline.h>
  29#include <linux/pagevec.h>
  30#include <linux/backing-dev.h>
  31#include <linux/rmap.h>
  32#include <linux/topology.h>
  33#include <linux/cpu.h>
  34#include <linux/cpuset.h>
  35#include <linux/compaction.h>
  36#include <linux/notifier.h>
  37#include <linux/rwsem.h>
  38#include <linux/delay.h>
  39#include <linux/kthread.h>
  40#include <linux/freezer.h>
  41#include <linux/memcontrol.h>
  42#include <linux/delayacct.h>
  43#include <linux/sysctl.h>
  44
  45#include <asm/tlbflush.h>
  46#include <asm/div64.h>
  47
  48#include <linux/swapops.h>
  49
  50#include "internal.h"
  51
  52#define CREATE_TRACE_POINTS
  53#include <trace/events/vmscan.h>
  54
  55/*
  56 * reclaim_mode determines how the inactive list is shrunk
  57 * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages
  58 * RECLAIM_MODE_ASYNC:  Do not block
  59 * RECLAIM_MODE_SYNC:   Allow blocking e.g. call wait_on_page_writeback
  60 * RECLAIM_MODE_LUMPYRECLAIM: For high-order allocations, take a reference
  61 *                      page from the LRU and reclaim all pages within a
  62 *                      naturally aligned range
  63 * RECLAIM_MODE_COMPACTION: For high-order allocations, reclaim a number of
  64 *                      order-0 pages and then compact the zone
  65 */
  66typedef unsigned __bitwise__ reclaim_mode_t;
  67#define RECLAIM_MODE_SINGLE             ((__force reclaim_mode_t)0x01u)
  68#define RECLAIM_MODE_ASYNC              ((__force reclaim_mode_t)0x02u)
  69#define RECLAIM_MODE_SYNC               ((__force reclaim_mode_t)0x04u)
  70#define RECLAIM_MODE_LUMPYRECLAIM       ((__force reclaim_mode_t)0x08u)
  71#define RECLAIM_MODE_COMPACTION         ((__force reclaim_mode_t)0x10u)
  72
  73struct scan_control {
  74        /* Incremented by the number of inactive pages that were scanned */
  75        unsigned long nr_scanned;
  76
  77        /* Number of pages freed so far during a call to shrink_zones() */
  78        unsigned long nr_reclaimed;
  79
  80        /* How many pages shrink_list() should reclaim */
  81        unsigned long nr_to_reclaim;
  82
  83        unsigned long hibernation_mode;
  84
  85        /* This context's GFP mask */
  86        gfp_t gfp_mask;
  87
  88        int may_writepage;
  89
  90        /* Can mapped pages be reclaimed? */
  91        int may_unmap;
  92
  93        /* Can pages be swapped as part of reclaim? */
  94        int may_swap;
  95
  96        int swappiness;
  97
  98        int order;
  99
 100        /*
 101         * Intend to reclaim enough continuous memory rather than reclaim
 102         * enough amount of memory. i.e, mode for high order allocation.
 103         */
 104        reclaim_mode_t reclaim_mode;
 105
 106        /* Which cgroup do we reclaim from */
 107        struct mem_cgroup *mem_cgroup;
 108
 109        /*
 110         * Nodemask of nodes allowed by the caller. If NULL, all nodes
 111         * are scanned.
 112         */
 113        nodemask_t      *nodemask;
 114};
 115
 116#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
 117
 118#ifdef ARCH_HAS_PREFETCH
 119#define prefetch_prev_lru_page(_page, _base, _field)                    \
 120        do {                                                            \
 121                if ((_page)->lru.prev != _base) {                       \
 122                        struct page *prev;                              \
 123                                                                        \
 124                        prev = lru_to_page(&(_page->lru));              \
 125                        prefetch(&prev->_field);                        \
 126                }                                                       \
 127        } while (0)
 128#else
 129#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
 130#endif
 131
 132#ifdef ARCH_HAS_PREFETCHW
 133#define prefetchw_prev_lru_page(_page, _base, _field)                   \
 134        do {                                                            \
 135                if ((_page)->lru.prev != _base) {                       \
 136                        struct page *prev;                              \
 137                                                                        \
 138                        prev = lru_to_page(&(_page->lru));              \
 139                        prefetchw(&prev->_field);                       \
 140                }                                                       \
 141        } while (0)
 142#else
 143#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
 144#endif
 145
 146/*
 147 * From 0 .. 100.  Higher means more swappy.
 148 */
 149int vm_swappiness = 60;
 150long vm_total_pages;    /* The total number of pages which the VM controls */
 151
 152static LIST_HEAD(shrinker_list);
 153static DECLARE_RWSEM(shrinker_rwsem);
 154
 155#ifdef CONFIG_CGROUP_MEM_RES_CTLR
 156#define scanning_global_lru(sc) (!(sc)->mem_cgroup)
 157#else
 158#define scanning_global_lru(sc) (1)
 159#endif
 160
 161static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone,
 162                                                  struct scan_control *sc)
 163{
 164        if (!scanning_global_lru(sc))
 165                return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone);
 166
 167        return &zone->reclaim_stat;
 168}
 169
 170static unsigned long zone_nr_lru_pages(struct zone *zone,
 171                                struct scan_control *sc, enum lru_list lru)
 172{
 173        if (!scanning_global_lru(sc))
 174                return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru);
 175
 176        return zone_page_state(zone, NR_LRU_BASE + lru);
 177}
 178
 179
 180/*
 181 * Add a shrinker callback to be called from the vm
 182 */
 183void register_shrinker(struct shrinker *shrinker)
 184{
 185        shrinker->nr = 0;
 186        down_write(&shrinker_rwsem);
 187        list_add_tail(&shrinker->list, &shrinker_list);
 188        up_write(&shrinker_rwsem);
 189}
 190EXPORT_SYMBOL(register_shrinker);
 191
 192/*
 193 * Remove one
 194 */
 195void unregister_shrinker(struct shrinker *shrinker)
 196{
 197        down_write(&shrinker_rwsem);
 198        list_del(&shrinker->list);
 199        up_write(&shrinker_rwsem);
 200}
 201EXPORT_SYMBOL(unregister_shrinker);
 202
 203#define SHRINK_BATCH 128
 204/*
 205 * Call the shrink functions to age shrinkable caches
 206 *
 207 * Here we assume it costs one seek to replace a lru page and that it also
 208 * takes a seek to recreate a cache object.  With this in mind we age equal
 209 * percentages of the lru and ageable caches.  This should balance the seeks
 210 * generated by these structures.
 211 *
 212 * If the vm encountered mapped pages on the LRU it increase the pressure on
 213 * slab to avoid swapping.
 214 *
 215 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
 216 *
 217 * `lru_pages' represents the number of on-LRU pages in all the zones which
 218 * are eligible for the caller's allocation attempt.  It is used for balancing
 219 * slab reclaim versus page reclaim.
 220 *
 221 * Returns the number of slab objects which we shrunk.
 222 */
 223unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
 224                        unsigned long lru_pages)
 225{
 226        struct shrinker *shrinker;
 227        unsigned long ret = 0;
 228
 229        if (scanned == 0)
 230                scanned = SWAP_CLUSTER_MAX;
 231
 232        if (!down_read_trylock(&shrinker_rwsem))
 233                return 1;       /* Assume we'll be able to shrink next time */
 234
 235        list_for_each_entry(shrinker, &shrinker_list, list) {
 236                unsigned long long delta;
 237                unsigned long total_scan;
 238                unsigned long max_pass;
 239
 240                max_pass = (*shrinker->shrink)(shrinker, 0, gfp_mask);
 241                delta = (4 * scanned) / shrinker->seeks;
 242                delta *= max_pass;
 243                do_div(delta, lru_pages + 1);
 244                shrinker->nr += delta;
 245                if (shrinker->nr < 0) {
 246                        printk(KERN_ERR "shrink_slab: %pF negative objects to "
 247                               "delete nr=%ld\n",
 248                               shrinker->shrink, shrinker->nr);
 249                        shrinker->nr = max_pass;
 250                }
 251
 252                /*
 253                 * Avoid risking looping forever due to too large nr value:
 254                 * never try to free more than twice the estimate number of
 255                 * freeable entries.
 256                 */
 257                if (shrinker->nr > max_pass * 2)
 258                        shrinker->nr = max_pass * 2;
 259
 260                total_scan = shrinker->nr;
 261                shrinker->nr = 0;
 262
 263                while (total_scan >= SHRINK_BATCH) {
 264                        long this_scan = SHRINK_BATCH;
 265                        int shrink_ret;
 266                        int nr_before;
 267
 268                        nr_before = (*shrinker->shrink)(shrinker, 0, gfp_mask);
 269                        shrink_ret = (*shrinker->shrink)(shrinker, this_scan,
 270                                                                gfp_mask);
 271                        if (shrink_ret == -1)
 272                                break;
 273                        if (shrink_ret < nr_before)
 274                                ret += nr_before - shrink_ret;
 275                        count_vm_events(SLABS_SCANNED, this_scan);
 276                        total_scan -= this_scan;
 277
 278                        cond_resched();
 279                }
 280
 281                shrinker->nr += total_scan;
 282        }
 283        up_read(&shrinker_rwsem);
 284        return ret;
 285}
 286
 287static void set_reclaim_mode(int priority, struct scan_control *sc,
 288                                   bool sync)
 289{
 290        reclaim_mode_t syncmode = sync ? RECLAIM_MODE_SYNC : RECLAIM_MODE_ASYNC;
 291
 292        /*
 293         * Initially assume we are entering either lumpy reclaim or
 294         * reclaim/compaction.Depending on the order, we will either set the
 295         * sync mode or just reclaim order-0 pages later.
 296         */
 297        if (COMPACTION_BUILD)
 298                sc->reclaim_mode = RECLAIM_MODE_COMPACTION;
 299        else
 300                sc->reclaim_mode = RECLAIM_MODE_LUMPYRECLAIM;
 301
 302        /*
 303         * Avoid using lumpy reclaim or reclaim/compaction if possible by
 304         * restricting when its set to either costly allocations or when
 305         * under memory pressure
 306         */
 307        if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
 308                sc->reclaim_mode |= syncmode;
 309        else if (sc->order && priority < DEF_PRIORITY - 2)
 310                sc->reclaim_mode |= syncmode;
 311        else
 312                sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC;
 313}
 314
 315static void reset_reclaim_mode(struct scan_control *sc)
 316{
 317        sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC;
 318}
 319
 320static inline int is_page_cache_freeable(struct page *page)
 321{
 322        /*
 323         * A freeable page cache page is referenced only by the caller
 324         * that isolated the page, the page cache radix tree and
 325         * optional buffer heads at page->private.
 326         */
 327        return page_count(page) - page_has_private(page) == 2;
 328}
 329
 330static int may_write_to_queue(struct backing_dev_info *bdi,
 331                              struct scan_control *sc)
 332{
 333        if (current->flags & PF_SWAPWRITE)
 334                return 1;
 335        if (!bdi_write_congested(bdi))
 336                return 1;
 337        if (bdi == current->backing_dev_info)
 338                return 1;
 339
 340        /* lumpy reclaim for hugepage often need a lot of write */
 341        if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
 342                return 1;
 343        return 0;
 344}
 345
 346/*
 347 * We detected a synchronous write error writing a page out.  Probably
 348 * -ENOSPC.  We need to propagate that into the address_space for a subsequent
 349 * fsync(), msync() or close().
 350 *
 351 * The tricky part is that after writepage we cannot touch the mapping: nothing
 352 * prevents it from being freed up.  But we have a ref on the page and once
 353 * that page is locked, the mapping is pinned.
 354 *
 355 * We're allowed to run sleeping lock_page() here because we know the caller has
 356 * __GFP_FS.
 357 */
 358static void handle_write_error(struct address_space *mapping,
 359                                struct page *page, int error)
 360{
 361        lock_page_nosync(page);
 362        if (page_mapping(page) == mapping)
 363                mapping_set_error(mapping, error);
 364        unlock_page(page);
 365}
 366
 367/* possible outcome of pageout() */
 368typedef enum {
 369        /* failed to write page out, page is locked */
 370        PAGE_KEEP,
 371        /* move page to the active list, page is locked */
 372        PAGE_ACTIVATE,
 373        /* page has been sent to the disk successfully, page is unlocked */
 374        PAGE_SUCCESS,
 375        /* page is clean and locked */
 376        PAGE_CLEAN,
 377} pageout_t;
 378
 379/*
 380 * pageout is called by shrink_page_list() for each dirty page.
 381 * Calls ->writepage().
 382 */
 383static pageout_t pageout(struct page *page, struct address_space *mapping,
 384                         struct scan_control *sc)
 385{
 386        /*
 387         * If the page is dirty, only perform writeback if that write
 388         * will be non-blocking.  To prevent this allocation from being
 389         * stalled by pagecache activity.  But note that there may be
 390         * stalls if we need to run get_block().  We could test
 391         * PagePrivate for that.
 392         *
 393         * If this process is currently in __generic_file_aio_write() against
 394         * this page's queue, we can perform writeback even if that
 395         * will block.
 396         *
 397         * If the page is swapcache, write it back even if that would
 398         * block, for some throttling. This happens by accident, because
 399         * swap_backing_dev_info is bust: it doesn't reflect the
 400         * congestion state of the swapdevs.  Easy to fix, if needed.
 401         */
 402        if (!is_page_cache_freeable(page))
 403                return PAGE_KEEP;
 404        if (!mapping) {
 405                /*
 406                 * Some data journaling orphaned pages can have
 407                 * page->mapping == NULL while being dirty with clean buffers.
 408                 */
 409                if (page_has_private(page)) {
 410                        if (try_to_free_buffers(page)) {
 411                                ClearPageDirty(page);
 412                                printk("%s: orphaned page\n", __func__);
 413                                return PAGE_CLEAN;
 414                        }
 415                }
 416                return PAGE_KEEP;
 417        }
 418        if (mapping->a_ops->writepage == NULL)
 419                return PAGE_ACTIVATE;
 420        if (!may_write_to_queue(mapping->backing_dev_info, sc))
 421                return PAGE_KEEP;
 422
 423        if (clear_page_dirty_for_io(page)) {
 424                int res;
 425                struct writeback_control wbc = {
 426                        .sync_mode = WB_SYNC_NONE,
 427                        .nr_to_write = SWAP_CLUSTER_MAX,
 428                        .range_start = 0,
 429                        .range_end = LLONG_MAX,
 430                        .for_reclaim = 1,
 431                };
 432
 433                SetPageReclaim(page);
 434                res = mapping->a_ops->writepage(page, &wbc);
 435                if (res < 0)
 436                        handle_write_error(mapping, page, res);
 437                if (res == AOP_WRITEPAGE_ACTIVATE) {
 438                        ClearPageReclaim(page);
 439                        return PAGE_ACTIVATE;
 440                }
 441
 442                /*
 443                 * Wait on writeback if requested to. This happens when
 444                 * direct reclaiming a large contiguous area and the
 445                 * first attempt to free a range of pages fails.
 446                 */
 447                if (PageWriteback(page) &&
 448                    (sc->reclaim_mode & RECLAIM_MODE_SYNC))
 449                        wait_on_page_writeback(page);
 450
 451                if (!PageWriteback(page)) {
 452                        /* synchronous write or broken a_ops? */
 453                        ClearPageReclaim(page);
 454                }
 455                trace_mm_vmscan_writepage(page,
 456                        trace_reclaim_flags(page, sc->reclaim_mode));
 457                inc_zone_page_state(page, NR_VMSCAN_WRITE);
 458                return PAGE_SUCCESS;
 459        }
 460
 461        return PAGE_CLEAN;
 462}
 463
 464/*
 465 * Same as remove_mapping, but if the page is removed from the mapping, it
 466 * gets returned with a refcount of 0.
 467 */
 468static int __remove_mapping(struct address_space *mapping, struct page *page)
 469{
 470        BUG_ON(!PageLocked(page));
 471        BUG_ON(mapping != page_mapping(page));
 472
 473        spin_lock_irq(&mapping->tree_lock);
 474        /*
 475         * The non racy check for a busy page.
 476         *
 477         * Must be careful with the order of the tests. When someone has
 478         * a ref to the page, it may be possible that they dirty it then
 479         * drop the reference. So if PageDirty is tested before page_count
 480         * here, then the following race may occur:
 481         *
 482         * get_user_pages(&page);
 483         * [user mapping goes away]
 484         * write_to(page);
 485         *                              !PageDirty(page)    [good]
 486         * SetPageDirty(page);
 487         * put_page(page);
 488         *                              !page_count(page)   [good, discard it]
 489         *
 490         * [oops, our write_to data is lost]
 491         *
 492         * Reversing the order of the tests ensures such a situation cannot
 493         * escape unnoticed. The smp_rmb is needed to ensure the page->flags
 494         * load is not satisfied before that of page->_count.
 495         *
 496         * Note that if SetPageDirty is always performed via set_page_dirty,
 497         * and thus under tree_lock, then this ordering is not required.
 498         */
 499        if (!page_freeze_refs(page, 2))
 500                goto cannot_free;
 501        /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
 502        if (unlikely(PageDirty(page))) {
 503                page_unfreeze_refs(page, 2);
 504                goto cannot_free;
 505        }
 506
 507        if (PageSwapCache(page)) {
 508                swp_entry_t swap = { .val = page_private(page) };
 509                __delete_from_swap_cache(page);
 510                spin_unlock_irq(&mapping->tree_lock);
 511                swapcache_free(swap, page);
 512        } else {
 513                void (*freepage)(struct page *);
 514
 515                freepage = mapping->a_ops->freepage;
 516
 517                __remove_from_page_cache(page);
 518                spin_unlock_irq(&mapping->tree_lock);
 519                mem_cgroup_uncharge_cache_page(page);
 520
 521                if (freepage != NULL)
 522                        freepage(page);
 523        }
 524
 525        return 1;
 526
 527cannot_free:
 528        spin_unlock_irq(&mapping->tree_lock);
 529        return 0;
 530}
 531
 532/*
 533 * Attempt to detach a locked page from its ->mapping.  If it is dirty or if
 534 * someone else has a ref on the page, abort and return 0.  If it was
 535 * successfully detached, return 1.  Assumes the caller has a single ref on
 536 * this page.
 537 */
 538int remove_mapping(struct address_space *mapping, struct page *page)
 539{
 540        if (__remove_mapping(mapping, page)) {
 541                /*
 542                 * Unfreezing the refcount with 1 rather than 2 effectively
 543                 * drops the pagecache ref for us without requiring another
 544                 * atomic operation.
 545                 */
 546                page_unfreeze_refs(page, 1);
 547                return 1;
 548        }
 549        return 0;
 550}
 551
 552/**
 553 * putback_lru_page - put previously isolated page onto appropriate LRU list
 554 * @page: page to be put back to appropriate lru list
 555 *
 556 * Add previously isolated @page to appropriate LRU list.
 557 * Page may still be unevictable for other reasons.
 558 *
 559 * lru_lock must not be held, interrupts must be enabled.
 560 */
 561void putback_lru_page(struct page *page)
 562{
 563        int lru;
 564        int active = !!TestClearPageActive(page);
 565        int was_unevictable = PageUnevictable(page);
 566
 567        VM_BUG_ON(PageLRU(page));
 568
 569redo:
 570        ClearPageUnevictable(page);
 571
 572        if (page_evictable(page, NULL)) {
 573                /*
 574                 * For evictable pages, we can use the cache.
 575                 * In event of a race, worst case is we end up with an
 576                 * unevictable page on [in]active list.
 577                 * We know how to handle that.
 578                 */
 579                lru = active + page_lru_base_type(page);
 580                lru_cache_add_lru(page, lru);
 581        } else {
 582                /*
 583                 * Put unevictable pages directly on zone's unevictable
 584                 * list.
 585                 */
 586                lru = LRU_UNEVICTABLE;
 587                add_page_to_unevictable_list(page);
 588                /*
 589                 * When racing with an mlock clearing (page is
 590                 * unlocked), make sure that if the other thread does
 591                 * not observe our setting of PG_lru and fails
 592                 * isolation, we see PG_mlocked cleared below and move
 593                 * the page back to the evictable list.
 594                 *
 595                 * The other side is TestClearPageMlocked().
 596                 */
 597                smp_mb();
 598        }
 599
 600        /*
 601         * page's status can change while we move it among lru. If an evictable
 602         * page is on unevictable list, it never be freed. To avoid that,
 603         * check after we added it to the list, again.
 604         */
 605        if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) {
 606                if (!isolate_lru_page(page)) {
 607                        put_page(page);
 608                        goto redo;
 609                }
 610                /* This means someone else dropped this page from LRU
 611                 * So, it will be freed or putback to LRU again. There is
 612                 * nothing to do here.
 613                 */
 614        }
 615
 616        if (was_unevictable && lru != LRU_UNEVICTABLE)
 617                count_vm_event(UNEVICTABLE_PGRESCUED);
 618        else if (!was_unevictable && lru == LRU_UNEVICTABLE)
 619                count_vm_event(UNEVICTABLE_PGCULLED);
 620
 621        put_page(page);         /* drop ref from isolate */
 622}
 623
 624enum page_references {
 625        PAGEREF_RECLAIM,
 626        PAGEREF_RECLAIM_CLEAN,
 627        PAGEREF_KEEP,
 628        PAGEREF_ACTIVATE,
 629};
 630
 631static enum page_references page_check_references(struct page *page,
 632                                                  struct scan_control *sc)
 633{
 634        int referenced_ptes, referenced_page;
 635        unsigned long vm_flags;
 636
 637        referenced_ptes = page_referenced(page, 1, sc->mem_cgroup, &vm_flags);
 638        referenced_page = TestClearPageReferenced(page);
 639
 640        /* Lumpy reclaim - ignore references */
 641        if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM)
 642                return PAGEREF_RECLAIM;
 643
 644        /*
 645         * Mlock lost the isolation race with us.  Let try_to_unmap()
 646         * move the page to the unevictable list.
 647         */
 648        if (vm_flags & VM_LOCKED)
 649                return PAGEREF_RECLAIM;
 650
 651        if (referenced_ptes) {
 652                if (PageAnon(page))
 653                        return PAGEREF_ACTIVATE;
 654                /*
 655                 * All mapped pages start out with page table
 656                 * references from the instantiating fault, so we need
 657                 * to look twice if a mapped file page is used more
 658                 * than once.
 659                 *
 660                 * Mark it and spare it for another trip around the
 661                 * inactive list.  Another page table reference will
 662                 * lead to its activation.
 663                 *
 664                 * Note: the mark is set for activated pages as well
 665                 * so that recently deactivated but used pages are
 666                 * quickly recovered.
 667                 */
 668                SetPageReferenced(page);
 669
 670                if (referenced_page)
 671                        return PAGEREF_ACTIVATE;
 672
 673                return PAGEREF_KEEP;
 674        }
 675
 676        /* Reclaim if clean, defer dirty pages to writeback */
 677        if (referenced_page && !PageSwapBacked(page))
 678                return PAGEREF_RECLAIM_CLEAN;
 679
 680        return PAGEREF_RECLAIM;
 681}
 682
 683static noinline_for_stack void free_page_list(struct list_head *free_pages)
 684{
 685        struct pagevec freed_pvec;
 686        struct page *page, *tmp;
 687
 688        pagevec_init(&freed_pvec, 1);
 689
 690        list_for_each_entry_safe(page, tmp, free_pages, lru) {
 691                list_del(&page->lru);
 692                if (!pagevec_add(&freed_pvec, page)) {
 693                        __pagevec_free(&freed_pvec);
 694                        pagevec_reinit(&freed_pvec);
 695                }
 696        }
 697
 698        pagevec_free(&freed_pvec);
 699}
 700
 701/*
 702 * shrink_page_list() returns the number of reclaimed pages
 703 */
 704static unsigned long shrink_page_list(struct list_head *page_list,
 705                                      struct zone *zone,
 706                                      struct scan_control *sc)
 707{
 708        LIST_HEAD(ret_pages);
 709        LIST_HEAD(free_pages);
 710        int pgactivate = 0;
 711        unsigned long nr_dirty = 0;
 712        unsigned long nr_congested = 0;
 713        unsigned long nr_reclaimed = 0;
 714
 715        cond_resched();
 716
 717        while (!list_empty(page_list)) {
 718                enum page_references references;
 719                struct address_space *mapping;
 720                struct page *page;
 721                int may_enter_fs;
 722
 723                cond_resched();
 724
 725                page = lru_to_page(page_list);
 726                list_del(&page->lru);
 727
 728                if (!trylock_page(page))
 729                        goto keep;
 730
 731                VM_BUG_ON(PageActive(page));
 732                VM_BUG_ON(page_zone(page) != zone);
 733
 734                sc->nr_scanned++;
 735
 736                if (unlikely(!page_evictable(page, NULL)))
 737                        goto cull_mlocked;
 738
 739                if (!sc->may_unmap && page_mapped(page))
 740                        goto keep_locked;
 741
 742                /* Double the slab pressure for mapped and swapcache pages */
 743                if (page_mapped(page) || PageSwapCache(page))
 744                        sc->nr_scanned++;
 745
 746                may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
 747                        (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
 748
 749                if (PageWriteback(page)) {
 750                        /*
 751                         * Synchronous reclaim is performed in two passes,
 752                         * first an asynchronous pass over the list to
 753                         * start parallel writeback, and a second synchronous
 754                         * pass to wait for the IO to complete.  Wait here
 755                         * for any page for which writeback has already
 756                         * started.
 757                         */
 758                        if ((sc->reclaim_mode & RECLAIM_MODE_SYNC) &&
 759                            may_enter_fs)
 760                                wait_on_page_writeback(page);
 761                        else {
 762                                unlock_page(page);
 763                                goto keep_lumpy;
 764                        }
 765                }
 766
 767                references = page_check_references(page, sc);
 768                switch (references) {
 769                case PAGEREF_ACTIVATE:
 770                        goto activate_locked;
 771                case PAGEREF_KEEP:
 772                        goto keep_locked;
 773                case PAGEREF_RECLAIM:
 774                case PAGEREF_RECLAIM_CLEAN:
 775                        ; /* try to reclaim the page below */
 776                }
 777
 778                /*
 779                 * Anonymous process memory has backing store?
 780                 * Try to allocate it some swap space here.
 781                 */
 782                if (PageAnon(page) && !PageSwapCache(page)) {
 783                        if (!(sc->gfp_mask & __GFP_IO))
 784                                goto keep_locked;
 785                        if (!add_to_swap(page))
 786                                goto activate_locked;
 787                        may_enter_fs = 1;
 788                }
 789
 790                mapping = page_mapping(page);
 791
 792                /*
 793                 * The page is mapped into the page tables of one or more
 794                 * processes. Try to unmap it here.
 795                 */
 796                if (page_mapped(page) && mapping) {
 797                        switch (try_to_unmap(page, TTU_UNMAP)) {
 798                        case SWAP_FAIL:
 799                                goto activate_locked;
 800                        case SWAP_AGAIN:
 801                                goto keep_locked;
 802                        case SWAP_MLOCK:
 803                                goto cull_mlocked;
 804                        case SWAP_SUCCESS:
 805                                ; /* try to free the page below */
 806                        }
 807                }
 808
 809                if (PageDirty(page)) {
 810                        nr_dirty++;
 811
 812                        if (references == PAGEREF_RECLAIM_CLEAN)
 813                                goto keep_locked;
 814                        if (!may_enter_fs)
 815                                goto keep_locked;
 816                        if (!sc->may_writepage)
 817                                goto keep_locked;
 818
 819                        /* Page is dirty, try to write it out here */
 820                        switch (pageout(page, mapping, sc)) {
 821                        case PAGE_KEEP:
 822                                nr_congested++;
 823                                goto keep_locked;
 824                        case PAGE_ACTIVATE:
 825                                goto activate_locked;
 826                        case PAGE_SUCCESS:
 827                                if (PageWriteback(page))
 828                                        goto keep_lumpy;
 829                                if (PageDirty(page))
 830                                        goto keep;
 831
 832                                /*
 833                                 * A synchronous write - probably a ramdisk.  Go
 834                                 * ahead and try to reclaim the page.
 835                                 */
 836                                if (!trylock_page(page))
 837                                        goto keep;
 838                                if (PageDirty(page) || PageWriteback(page))
 839                                        goto keep_locked;
 840                                mapping = page_mapping(page);
 841                        case PAGE_CLEAN:
 842                                ; /* try to free the page below */
 843                        }
 844                }
 845
 846                /*
 847                 * If the page has buffers, try to free the buffer mappings
 848                 * associated with this page. If we succeed we try to free
 849                 * the page as well.
 850                 *
 851                 * We do this even if the page is PageDirty().
 852                 * try_to_release_page() does not perform I/O, but it is
 853                 * possible for a page to have PageDirty set, but it is actually
 854                 * clean (all its buffers are clean).  This happens if the
 855                 * buffers were written out directly, with submit_bh(). ext3
 856                 * will do this, as well as the blockdev mapping.
 857                 * try_to_release_page() will discover that cleanness and will
 858                 * drop the buffers and mark the page clean - it can be freed.
 859                 *
 860                 * Rarely, pages can have buffers and no ->mapping.  These are
 861                 * the pages which were not successfully invalidated in
 862                 * truncate_complete_page().  We try to drop those buffers here
 863                 * and if that worked, and the page is no longer mapped into
 864                 * process address space (page_count == 1) it can be freed.
 865                 * Otherwise, leave the page on the LRU so it is swappable.
 866                 */
 867                if (page_has_private(page)) {
 868                        if (!try_to_release_page(page, sc->gfp_mask))
 869                                goto activate_locked;
 870                        if (!mapping && page_count(page) == 1) {
 871                                unlock_page(page);
 872                                if (put_page_testzero(page))
 873                                        goto free_it;
 874                                else {
 875                                        /*
 876                                         * rare race with speculative reference.
 877                                         * the speculative reference will free
 878                                         * this page shortly, so we may
 879                                         * increment nr_reclaimed here (and
 880                                         * leave it off the LRU).
 881                                         */
 882                                        nr_reclaimed++;
 883                                        continue;
 884                                }
 885                        }
 886                }
 887
 888                if (!mapping || !__remove_mapping(mapping, page))
 889                        goto keep_locked;
 890
 891                /*
 892                 * At this point, we have no other references and there is
 893                 * no way to pick any more up (removed from LRU, removed
 894                 * from pagecache). Can use non-atomic bitops now (and
 895                 * we obviously don't have to worry about waking up a process
 896                 * waiting on the page lock, because there are no references.
 897                 */
 898                __clear_page_locked(page);
 899free_it:
 900                nr_reclaimed++;
 901
 902                /*
 903                 * Is there need to periodically free_page_list? It would
 904                 * appear not as the counts should be low
 905                 */
 906                list_add(&page->lru, &free_pages);
 907                continue;
 908
 909cull_mlocked:
 910                if (PageSwapCache(page))
 911                        try_to_free_swap(page);
 912                unlock_page(page);
 913                putback_lru_page(page);
 914                reset_reclaim_mode(sc);
 915                continue;
 916
 917activate_locked:
 918                /* Not a candidate for swapping, so reclaim swap space. */
 919                if (PageSwapCache(page) && vm_swap_full())
 920                        try_to_free_swap(page);
 921                VM_BUG_ON(PageActive(page));
 922                SetPageActive(page);
 923                pgactivate++;
 924keep_locked:
 925                unlock_page(page);
 926keep:
 927                reset_reclaim_mode(sc);
 928keep_lumpy:
 929                list_add(&page->lru, &ret_pages);
 930                VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
 931        }
 932
 933        /*
 934         * Tag a zone as congested if all the dirty pages encountered were
 935         * backed by a congested BDI. In this case, reclaimers should just
 936         * back off and wait for congestion to clear because further reclaim
 937         * will encounter the same problem
 938         */
 939        if (nr_dirty == nr_congested && nr_dirty != 0)
 940                zone_set_flag(zone, ZONE_CONGESTED);
 941
 942        free_page_list(&free_pages);
 943
 944        list_splice(&ret_pages, page_list);
 945        count_vm_events(PGACTIVATE, pgactivate);
 946        return nr_reclaimed;
 947}
 948
 949/*
 950 * Attempt to remove the specified page from its LRU.  Only take this page
 951 * if it is of the appropriate PageActive status.  Pages which are being
 952 * freed elsewhere are also ignored.
 953 *
 954 * page:        page to consider
 955 * mode:        one of the LRU isolation modes defined above
 956 *
 957 * returns 0 on success, -ve errno on failure.
 958 */
 959int __isolate_lru_page(struct page *page, int mode, int file)
 960{
 961        int ret = -EINVAL;
 962
 963        /* Only take pages on the LRU. */
 964        if (!PageLRU(page))
 965                return ret;
 966
 967        /*
 968         * When checking the active state, we need to be sure we are
 969         * dealing with comparible boolean values.  Take the logical not
 970         * of each.
 971         */
 972        if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
 973                return ret;
 974
 975        if (mode != ISOLATE_BOTH && page_is_file_cache(page) != file)
 976                return ret;
 977
 978        /*
 979         * When this function is being called for lumpy reclaim, we
 980         * initially look into all LRU pages, active, inactive and
 981         * unevictable; only give shrink_page_list evictable pages.
 982         */
 983        if (PageUnevictable(page))
 984                return ret;
 985
 986        ret = -EBUSY;
 987
 988        if (likely(get_page_unless_zero(page))) {
 989                /*
 990                 * Be careful not to clear PageLRU until after we're
 991                 * sure the page is not being freed elsewhere -- the
 992                 * page release code relies on it.
 993                 */
 994                ClearPageLRU(page);
 995                ret = 0;
 996        }
 997
 998        return ret;
 999}
1000
1001/*
1002 * zone->lru_lock is heavily contended.  Some of the functions that
1003 * shrink the lists perform better by taking out a batch of pages
1004 * and working on them outside the LRU lock.
1005 *
1006 * For pagecache intensive workloads, this function is the hottest
1007 * spot in the kernel (apart from copy_*_user functions).
1008 *
1009 * Appropriate locks must be held before calling this function.
1010 *
1011 * @nr_to_scan: The number of pages to look through on the list.
1012 * @src:        The LRU list to pull pages off.
1013 * @dst:        The temp list to put pages on to.
1014 * @scanned:    The number of pages that were scanned.
1015 * @order:      The caller's attempted allocation order
1016 * @mode:       One of the LRU isolation modes
1017 * @file:       True [1] if isolating file [!anon] pages
1018 *
1019 * returns how many pages were moved onto *@dst.
1020 */
1021static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1022                struct list_head *src, struct list_head *dst,
1023                unsigned long *scanned, int order, int mode, int file)
1024{
1025        unsigned long nr_taken = 0;
1026        unsigned long nr_lumpy_taken = 0;
1027        unsigned long nr_lumpy_dirty = 0;
1028        unsigned long nr_lumpy_failed = 0;
1029        unsigned long scan;
1030
1031        for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
1032                struct page *page;
1033                unsigned long pfn;
1034                unsigned long end_pfn;
1035                unsigned long page_pfn;
1036                int zone_id;
1037
1038                page = lru_to_page(src);
1039                prefetchw_prev_lru_page(page, src, flags);
1040
1041                VM_BUG_ON(!PageLRU(page));
1042
1043                switch (__isolate_lru_page(page, mode, file)) {
1044                case 0:
1045                        list_move(&page->lru, dst);
1046                        mem_cgroup_del_lru(page);
1047                        nr_taken += hpage_nr_pages(page);
1048                        break;
1049
1050                case -EBUSY:
1051                        /* else it is being freed elsewhere */
1052                        list_move(&page->lru, src);
1053                        mem_cgroup_rotate_lru_list(page, page_lru(page));
1054                        continue;
1055
1056                default:
1057                        BUG();
1058                }
1059
1060                if (!order)
1061                        continue;
1062
1063                /*
1064                 * Attempt to take all pages in the order aligned region
1065                 * surrounding the tag page.  Only take those pages of
1066                 * the same active state as that tag page.  We may safely
1067                 * round the target page pfn down to the requested order
1068                 * as the mem_map is guarenteed valid out to MAX_ORDER,
1069                 * where that page is in a different zone we will detect
1070                 * it from its zone id and abort this block scan.
1071                 */
1072                zone_id = page_zone_id(page);
1073                page_pfn = page_to_pfn(page);
1074                pfn = page_pfn & ~((1 << order) - 1);
1075                end_pfn = pfn + (1 << order);
1076                for (; pfn < end_pfn; pfn++) {
1077                        struct page *cursor_page;
1078
1079                        /* The target page is in the block, ignore it. */
1080                        if (unlikely(pfn == page_pfn))
1081                                continue;
1082
1083                        /* Avoid holes within the zone. */
1084                        if (unlikely(!pfn_valid_within(pfn)))
1085                                break;
1086
1087                        cursor_page = pfn_to_page(pfn);
1088
1089                        /* Check that we have not crossed a zone boundary. */
1090                        if (unlikely(page_zone_id(cursor_page) != zone_id))
1091                                break;
1092
1093                        /*
1094                         * If we don't have enough swap space, reclaiming of
1095                         * anon page which don't already have a swap slot is
1096                         * pointless.
1097                         */
1098                        if (nr_swap_pages <= 0 && PageAnon(cursor_page) &&
1099                            !PageSwapCache(cursor_page))
1100                                break;
1101
1102                        if (__isolate_lru_page(cursor_page, mode, file) == 0) {
1103                                list_move(&cursor_page->lru, dst);
1104                                mem_cgroup_del_lru(cursor_page);
1105                                nr_taken += hpage_nr_pages(page);
1106                                nr_lumpy_taken++;
1107                                if (PageDirty(cursor_page))
1108                                        nr_lumpy_dirty++;
1109                                scan++;
1110                        } else {
1111                                /* the page is freed already. */
1112                                if (!page_count(cursor_page))
1113                                        continue;
1114                                break;
1115                        }
1116                }
1117
1118                /* If we break out of the loop above, lumpy reclaim failed */
1119                if (pfn < end_pfn)
1120                        nr_lumpy_failed++;
1121        }
1122
1123        *scanned = scan;
1124
1125        trace_mm_vmscan_lru_isolate(order,
1126                        nr_to_scan, scan,
1127                        nr_taken,
1128                        nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed,
1129                        mode);
1130        return nr_taken;
1131}
1132
1133static unsigned long isolate_pages_global(unsigned long nr,
1134                                        struct list_head *dst,
1135                                        unsigned long *scanned, int order,
1136                                        int mode, struct zone *z,
1137                                        int active, int file)
1138{
1139        int lru = LRU_BASE;
1140        if (active)
1141                lru += LRU_ACTIVE;
1142        if (file)
1143                lru += LRU_FILE;
1144        return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order,
1145                                                                mode, file);
1146}
1147
1148/*
1149 * clear_active_flags() is a helper for shrink_active_list(), clearing
1150 * any active bits from the pages in the list.
1151 */
1152static unsigned long clear_active_flags(struct list_head *page_list,
1153                                        unsigned int *count)
1154{
1155        int nr_active = 0;
1156        int lru;
1157        struct page *page;
1158
1159        list_for_each_entry(page, page_list, lru) {
1160                int numpages = hpage_nr_pages(page);
1161                lru = page_lru_base_type(page);
1162                if (PageActive(page)) {
1163                        lru += LRU_ACTIVE;
1164                        ClearPageActive(page);
1165                        nr_active += numpages;
1166                }
1167                if (count)
1168                        count[lru] += numpages;
1169        }
1170
1171        return nr_active;
1172}
1173
1174/**
1175 * isolate_lru_page - tries to isolate a page from its LRU list
1176 * @page: page to isolate from its LRU list
1177 *
1178 * Isolates a @page from an LRU list, clears PageLRU and adjusts the
1179 * vmstat statistic corresponding to whatever LRU list the page was on.
1180 *
1181 * Returns 0 if the page was removed from an LRU list.
1182 * Returns -EBUSY if the page was not on an LRU list.
1183 *
1184 * The returned page will have PageLRU() cleared.  If it was found on
1185 * the active list, it will have PageActive set.  If it was found on
1186 * the unevictable list, it will have the PageUnevictable bit set. That flag
1187 * may need to be cleared by the caller before letting the page go.
1188 *
1189 * The vmstat statistic corresponding to the list on which the page was
1190 * found will be decremented.
1191 *
1192 * Restrictions:
1193 * (1) Must be called with an elevated refcount on the page. This is a
1194 *     fundamentnal difference from isolate_lru_pages (which is called
1195 *     without a stable reference).
1196 * (2) the lru_lock must not be held.
1197 * (3) interrupts must be enabled.
1198 */
1199int isolate_lru_page(struct page *page)
1200{
1201        int ret = -EBUSY;
1202
1203        if (PageLRU(page)) {
1204                struct zone *zone = page_zone(page);
1205
1206                spin_lock_irq(&zone->lru_lock);
1207                if (PageLRU(page) && get_page_unless_zero(page)) {
1208                        int lru = page_lru(page);
1209                        ret = 0;
1210                        ClearPageLRU(page);
1211
1212                        del_page_from_lru_list(zone, page, lru);
1213                }
1214                spin_unlock_irq(&zone->lru_lock);
1215        }
1216        return ret;
1217}
1218
1219/*
1220 * Are there way too many processes in the direct reclaim path already?
1221 */
1222static int too_many_isolated(struct zone *zone, int file,
1223                struct scan_control *sc)
1224{
1225        unsigned long inactive, isolated;
1226
1227        if (current_is_kswapd())
1228                return 0;
1229
1230        if (!scanning_global_lru(sc))
1231                return 0;
1232
1233        if (file) {
1234                inactive = zone_page_state(zone, NR_INACTIVE_FILE);
1235                isolated = zone_page_state(zone, NR_ISOLATED_FILE);
1236        } else {
1237                inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1238                isolated = zone_page_state(zone, NR_ISOLATED_ANON);
1239        }
1240
1241        return isolated > inactive;
1242}
1243
1244/*
1245 * TODO: Try merging with migrations version of putback_lru_pages
1246 */
1247static noinline_for_stack void
1248putback_lru_pages(struct zone *zone, struct scan_control *sc,
1249                                unsigned long nr_anon, unsigned long nr_file,
1250                                struct list_head *page_list)
1251{
1252        struct page *page;
1253        struct pagevec pvec;
1254        struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1255
1256        pagevec_init(&pvec, 1);
1257
1258        /*
1259         * Put back any unfreeable pages.
1260         */
1261        spin_lock(&zone->lru_lock);
1262        while (!list_empty(page_list)) {
1263                int lru;
1264                page = lru_to_page(page_list);
1265                VM_BUG_ON(PageLRU(page));
1266                list_del(&page->lru);
1267                if (unlikely(!page_evictable(page, NULL))) {
1268                        spin_unlock_irq(&zone->lru_lock);
1269                        putback_lru_page(page);
1270                        spin_lock_irq(&zone->lru_lock);
1271                        continue;
1272                }
1273                SetPageLRU(page);
1274                lru = page_lru(page);
1275                add_page_to_lru_list(zone, page, lru);
1276                if (is_active_lru(lru)) {
1277                        int file = is_file_lru(lru);
1278                        int numpages = hpage_nr_pages(page);
1279                        reclaim_stat->recent_rotated[file] += numpages;
1280                }
1281                if (!pagevec_add(&pvec, page)) {
1282                        spin_unlock_irq(&zone->lru_lock);
1283                        __pagevec_release(&pvec);
1284                        spin_lock_irq(&zone->lru_lock);
1285                }
1286        }
1287        __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
1288        __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
1289
1290        spin_unlock_irq(&zone->lru_lock);
1291        pagevec_release(&pvec);
1292}
1293
1294static noinline_for_stack void update_isolated_counts(struct zone *zone,
1295                                        struct scan_control *sc,
1296                                        unsigned long *nr_anon,
1297                                        unsigned long *nr_file,
1298                                        struct list_head *isolated_list)
1299{
1300        unsigned long nr_active;
1301        unsigned int count[NR_LRU_LISTS] = { 0, };
1302        struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1303
1304        nr_active = clear_active_flags(isolated_list, count);
1305        __count_vm_events(PGDEACTIVATE, nr_active);
1306
1307        __mod_zone_page_state(zone, NR_ACTIVE_FILE,
1308                              -count[LRU_ACTIVE_FILE]);
1309        __mod_zone_page_state(zone, NR_INACTIVE_FILE,
1310                              -count[LRU_INACTIVE_FILE]);
1311        __mod_zone_page_state(zone, NR_ACTIVE_ANON,
1312                              -count[LRU_ACTIVE_ANON]);
1313        __mod_zone_page_state(zone, NR_INACTIVE_ANON,
1314                              -count[LRU_INACTIVE_ANON]);
1315
1316        *nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
1317        *nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
1318        __mod_zone_page_state(zone, NR_ISOLATED_ANON, *nr_anon);
1319        __mod_zone_page_state(zone, NR_ISOLATED_FILE, *nr_file);
1320
1321        reclaim_stat->recent_scanned[0] += *nr_anon;
1322        reclaim_stat->recent_scanned[1] += *nr_file;
1323}
1324
1325/*
1326 * Returns true if the caller should wait to clean dirty/writeback pages.
1327 *
1328 * If we are direct reclaiming for contiguous pages and we do not reclaim
1329 * everything in the list, try again and wait for writeback IO to complete.
1330 * This will stall high-order allocations noticeably. Only do that when really
1331 * need to free the pages under high memory pressure.
1332 */
1333static inline bool should_reclaim_stall(unsigned long nr_taken,
1334                                        unsigned long nr_freed,
1335                                        int priority,
1336                                        struct scan_control *sc)
1337{
1338        int lumpy_stall_priority;
1339
1340        /* kswapd should not stall on sync IO */
1341        if (current_is_kswapd())
1342                return false;
1343
1344        /* Only stall on lumpy reclaim */
1345        if (sc->reclaim_mode & RECLAIM_MODE_SINGLE)
1346                return false;
1347
1348        /* If we have relaimed everything on the isolated list, no stall */
1349        if (nr_freed == nr_taken)
1350                return false;
1351
1352        /*
1353         * For high-order allocations, there are two stall thresholds.
1354         * High-cost allocations stall immediately where as lower
1355         * order allocations such as stacks require the scanning
1356         * priority to be much higher before stalling.
1357         */
1358        if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
1359                lumpy_stall_priority = DEF_PRIORITY;
1360        else
1361                lumpy_stall_priority = DEF_PRIORITY / 3;
1362
1363        return priority <= lumpy_stall_priority;
1364}
1365
1366/*
1367 * shrink_inactive_list() is a helper for shrink_zone().  It returns the number
1368 * of reclaimed pages
1369 */
1370static noinline_for_stack unsigned long
1371shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
1372                        struct scan_control *sc, int priority, int file)
1373{
1374        LIST_HEAD(page_list);
1375        unsigned long nr_scanned;
1376        unsigned long nr_reclaimed = 0;
1377        unsigned long nr_taken;
1378        unsigned long nr_anon;
1379        unsigned long nr_file;
1380
1381        while (unlikely(too_many_isolated(zone, file, sc))) {
1382                congestion_wait(BLK_RW_ASYNC, HZ/10);
1383
1384                /* We are about to die and free our memory. Return now. */
1385                if (fatal_signal_pending(current))
1386                        return SWAP_CLUSTER_MAX;
1387        }
1388
1389        set_reclaim_mode(priority, sc, false);
1390        lru_add_drain();
1391        spin_lock_irq(&zone->lru_lock);
1392
1393        if (scanning_global_lru(sc)) {
1394                nr_taken = isolate_pages_global(nr_to_scan,
1395                        &page_list, &nr_scanned, sc->order,
1396                        sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM ?
1397                                        ISOLATE_BOTH : ISOLATE_INACTIVE,
1398                        zone, 0, file);
1399                zone->pages_scanned += nr_scanned;
1400                if (current_is_kswapd())
1401                        __count_zone_vm_events(PGSCAN_KSWAPD, zone,
1402                                               nr_scanned);
1403                else
1404                        __count_zone_vm_events(PGSCAN_DIRECT, zone,
1405                                               nr_scanned);
1406        } else {
1407                nr_taken = mem_cgroup_isolate_pages(nr_to_scan,
1408                        &page_list, &nr_scanned, sc->order,
1409                        sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM ?
1410                                        ISOLATE_BOTH : ISOLATE_INACTIVE,
1411                        zone, sc->mem_cgroup,
1412                        0, file);
1413                /*
1414                 * mem_cgroup_isolate_pages() keeps track of
1415                 * scanned pages on its own.
1416                 */
1417        }
1418
1419        if (nr_taken == 0) {
1420                spin_unlock_irq(&zone->lru_lock);
1421                return 0;
1422        }
1423
1424        update_isolated_counts(zone, sc, &nr_anon, &nr_file, &page_list);
1425
1426        spin_unlock_irq(&zone->lru_lock);
1427
1428        nr_reclaimed = shrink_page_list(&page_list, zone, sc);
1429
1430        /* Check if we should syncronously wait for writeback */
1431        if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
1432                set_reclaim_mode(priority, sc, true);
1433                nr_reclaimed += shrink_page_list(&page_list, zone, sc);
1434        }
1435
1436        local_irq_disable();
1437        if (current_is_kswapd())
1438                __count_vm_events(KSWAPD_STEAL, nr_reclaimed);
1439        __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);
1440
1441        putback_lru_pages(zone, sc, nr_anon, nr_file, &page_list);
1442
1443        trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
1444                zone_idx(zone),
1445                nr_scanned, nr_reclaimed,
1446                priority,
1447                trace_shrink_flags(file, sc->reclaim_mode));
1448        return nr_reclaimed;
1449}
1450
1451/*
1452 * This moves pages from the active list to the inactive list.
1453 *
1454 * We move them the other way if the page is referenced by one or more
1455 * processes, from rmap.
1456 *
1457 * If the pages are mostly unmapped, the processing is fast and it is
1458 * appropriate to hold zone->lru_lock across the whole operation.  But if
1459 * the pages are mapped, the processing is slow (page_referenced()) so we
1460 * should drop zone->lru_lock around each page.  It's impossible to balance
1461 * this, so instead we remove the pages from the LRU while processing them.
1462 * It is safe to rely on PG_active against the non-LRU pages in here because
1463 * nobody will play with that bit on a non-LRU page.
1464 *
1465 * The downside is that we have to touch page->_count against each page.
1466 * But we had to alter page->flags anyway.
1467 */
1468
1469static void move_active_pages_to_lru(struct zone *zone,
1470                                     struct list_head *list,
1471                                     enum lru_list lru)
1472{
1473        unsigned long pgmoved = 0;
1474        struct pagevec pvec;
1475        struct page *page;
1476
1477        pagevec_init(&pvec, 1);
1478
1479        while (!list_empty(list)) {
1480                page = lru_to_page(list);
1481
1482                VM_BUG_ON(PageLRU(page));
1483                SetPageLRU(page);
1484
1485                list_move(&page->lru, &zone->lru[lru].list);
1486                mem_cgroup_add_lru_list(page, lru);
1487                pgmoved += hpage_nr_pages(page);
1488
1489                if (!pagevec_add(&pvec, page) || list_empty(list)) {
1490                        spin_unlock_irq(&zone->lru_lock);
1491                        if (buffer_heads_over_limit)
1492                                pagevec_strip(&pvec);
1493                        __pagevec_release(&pvec);
1494                        spin_lock_irq(&zone->lru_lock);
1495                }
1496        }
1497        __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1498        if (!is_active_lru(lru))
1499                __count_vm_events(PGDEACTIVATE, pgmoved);
1500}
1501
1502static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1503                        struct scan_control *sc, int priority, int file)
1504{
1505        unsigned long nr_taken;
1506        unsigned long pgscanned;
1507        unsigned long vm_flags;
1508        LIST_HEAD(l_hold);      /* The pages which were snipped off */
1509        LIST_HEAD(l_active);
1510        LIST_HEAD(l_inactive);
1511        struct page *page;
1512        struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1513        unsigned long nr_rotated = 0;
1514
1515        lru_add_drain();
1516        spin_lock_irq(&zone->lru_lock);
1517        if (scanning_global_lru(sc)) {
1518                nr_taken = isolate_pages_global(nr_pages, &l_hold,
1519                                                &pgscanned, sc->order,
1520                                                ISOLATE_ACTIVE, zone,
1521                                                1, file);
1522                zone->pages_scanned += pgscanned;
1523        } else {
1524                nr_taken = mem_cgroup_isolate_pages(nr_pages, &l_hold,
1525                                                &pgscanned, sc->order,
1526                                                ISOLATE_ACTIVE, zone,
1527                                                sc->mem_cgroup, 1, file);
1528                /*
1529                 * mem_cgroup_isolate_pages() keeps track of
1530                 * scanned pages on its own.
1531                 */
1532        }
1533
1534        reclaim_stat->recent_scanned[file] += nr_taken;
1535
1536        __count_zone_vm_events(PGREFILL, zone, pgscanned);
1537        if (file)
1538                __mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken);
1539        else
1540                __mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken);
1541        __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
1542        spin_unlock_irq(&zone->lru_lock);
1543
1544        while (!list_empty(&l_hold)) {
1545                cond_resched();
1546                page = lru_to_page(&l_hold);
1547                list_del(&page->lru);
1548
1549                if (unlikely(!page_evictable(page, NULL))) {
1550                        putback_lru_page(page);
1551                        continue;
1552                }
1553
1554                if (page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
1555                        nr_rotated += hpage_nr_pages(page);
1556                        /*
1557                         * Identify referenced, file-backed active pages and
1558                         * give them one more trip around the active list. So
1559                         * that executable code get better chances to stay in
1560                         * memory under moderate memory pressure.  Anon pages
1561                         * are not likely to be evicted by use-once streaming
1562                         * IO, plus JVM can create lots of anon VM_EXEC pages,
1563                         * so we ignore them here.
1564                         */
1565                        if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
1566                                list_add(&page->lru, &l_active);
1567                                continue;
1568                        }
1569                }
1570
1571                ClearPageActive(page);  /* we are de-activating */
1572                list_add(&page->lru, &l_inactive);
1573        }
1574
1575        /*
1576         * Move pages back to the lru list.
1577         */
1578        spin_lock_irq(&zone->lru_lock);
1579        /*
1580         * Count referenced pages from currently used mappings as rotated,
1581         * even though only some of them are actually re-activated.  This
1582         * helps balance scan pressure between file and anonymous pages in
1583         * get_scan_ratio.
1584         */
1585        reclaim_stat->recent_rotated[file] += nr_rotated;
1586
1587        move_active_pages_to_lru(zone, &l_active,
1588                                                LRU_ACTIVE + file * LRU_FILE);
1589        move_active_pages_to_lru(zone, &l_inactive,
1590                                                LRU_BASE   + file * LRU_FILE);
1591        __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
1592        spin_unlock_irq(&zone->lru_lock);
1593}
1594
1595#ifdef CONFIG_SWAP
1596static int inactive_anon_is_low_global(struct zone *zone)
1597{
1598        unsigned long active, inactive;
1599
1600        active = zone_page_state(zone, NR_ACTIVE_ANON);
1601        inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1602
1603        if (inactive * zone->inactive_ratio < active)
1604                return 1;
1605
1606        return 0;
1607}
1608
1609/**
1610 * inactive_anon_is_low - check if anonymous pages need to be deactivated
1611 * @zone: zone to check
1612 * @sc:   scan control of this context
1613 *
1614 * Returns true if the zone does not have enough inactive anon pages,
1615 * meaning some active anon pages need to be deactivated.
1616 */
1617static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
1618{
1619        int low;
1620
1621        /*
1622         * If we don't have swap space, anonymous page deactivation
1623         * is pointless.
1624         */
1625        if (!total_swap_pages)
1626                return 0;
1627
1628        if (scanning_global_lru(sc))
1629                low = inactive_anon_is_low_global(zone);
1630        else
1631                low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup);
1632        return low;
1633}
1634#else
1635static inline int inactive_anon_is_low(struct zone *zone,
1636                                        struct scan_control *sc)
1637{
1638        return 0;
1639}
1640#endif
1641
1642static int inactive_file_is_low_global(struct zone *zone)
1643{
1644        unsigned long active, inactive;
1645
1646        active = zone_page_state(zone, NR_ACTIVE_FILE);
1647        inactive = zone_page_state(zone, NR_INACTIVE_FILE);
1648
1649        return (active > inactive);
1650}
1651
1652/**
1653 * inactive_file_is_low - check if file pages need to be deactivated
1654 * @zone: zone to check
1655 * @sc:   scan control of this context
1656 *
1657 * When the system is doing streaming IO, memory pressure here
1658 * ensures that active file pages get deactivated, until more
1659 * than half of the file pages are on the inactive list.
1660 *
1661 * Once we get to that situation, protect the system's working
1662 * set from being evicted by disabling active file page aging.
1663 *
1664 * This uses a different ratio than the anonymous pages, because
1665 * the page cache uses a use-once replacement algorithm.
1666 */
1667static int inactive_file_is_low(struct zone *zone, struct scan_control *sc)
1668{
1669        int low;
1670
1671        if (scanning_global_lru(sc))
1672                low = inactive_file_is_low_global(zone);
1673        else
1674                low = mem_cgroup_inactive_file_is_low(sc->mem_cgroup);
1675        return low;
1676}
1677
1678static int inactive_list_is_low(struct zone *zone, struct scan_control *sc,
1679                                int file)
1680{
1681        if (file)
1682                return inactive_file_is_low(zone, sc);
1683        else
1684                return inactive_anon_is_low(zone, sc);
1685}
1686
1687static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1688        struct zone *zone, struct scan_control *sc, int priority)
1689{
1690        int file = is_file_lru(lru);
1691
1692        if (is_active_lru(lru)) {
1693                if (inactive_list_is_low(zone, sc, file))
1694                    shrink_active_list(nr_to_scan, zone, sc, priority, file);
1695                return 0;
1696        }
1697
1698        return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
1699}
1700
1701/*
1702 * Smallish @nr_to_scan's are deposited in @nr_saved_scan,
1703 * until we collected @swap_cluster_max pages to scan.
1704 */
1705static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
1706                                       unsigned long *nr_saved_scan)
1707{
1708        unsigned long nr;
1709
1710        *nr_saved_scan += nr_to_scan;
1711        nr = *nr_saved_scan;
1712
1713        if (nr >= SWAP_CLUSTER_MAX)
1714                *nr_saved_scan = 0;
1715        else
1716                nr = 0;
1717
1718        return nr;
1719}
1720
1721/*
1722 * Determine how aggressively the anon and file LRU lists should be
1723 * scanned.  The relative value of each set of LRU lists is determined
1724 * by looking at the fraction of the pages scanned we did rotate back
1725 * onto the active list instead of evict.
1726 *
1727 * nr[0] = anon pages to scan; nr[1] = file pages to scan
1728 */
1729static void get_scan_count(struct zone *zone, struct scan_control *sc,
1730                                        unsigned long *nr, int priority)
1731{
1732        unsigned long anon, file, free;
1733        unsigned long anon_prio, file_prio;
1734        unsigned long ap, fp;
1735        struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1736        u64 fraction[2], denominator;
1737        enum lru_list l;
1738        int noswap = 0;
1739
1740        /* If we have no swap space, do not bother scanning anon pages. */
1741        if (!sc->may_swap || (nr_swap_pages <= 0)) {
1742                noswap = 1;
1743                fraction[0] = 0;
1744                fraction[1] = 1;
1745                denominator = 1;
1746                goto out;
1747        }
1748
1749        anon  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
1750                zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
1751        file  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
1752                zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
1753
1754        if (scanning_global_lru(sc)) {
1755                free  = zone_page_state(zone, NR_FREE_PAGES);
1756                /* If we have very few page cache pages,
1757                   force-scan anon pages. */
1758                if (unlikely(file + free <= high_wmark_pages(zone))) {
1759                        fraction[0] = 1;
1760                        fraction[1] = 0;
1761                        denominator = 1;
1762                        goto out;
1763                }
1764        }
1765
1766        /*
1767         * With swappiness at 100, anonymous and file have the same priority.
1768         * This scanning priority is essentially the inverse of IO cost.
1769         */
1770        anon_prio = sc->swappiness;
1771        file_prio = 200 - sc->swappiness;
1772
1773        /*
1774         * OK, so we have swap space and a fair amount of page cache
1775         * pages.  We use the recently rotated / recently scanned
1776         * ratios to determine how valuable each cache is.
1777         *
1778         * Because workloads change over time (and to avoid overflow)
1779         * we keep these statistics as a floating average, which ends
1780         * up weighing recent references more than old ones.
1781         *
1782         * anon in [0], file in [1]
1783         */
1784        spin_lock_irq(&zone->lru_lock);
1785        if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
1786                reclaim_stat->recent_scanned[0] /= 2;
1787                reclaim_stat->recent_rotated[0] /= 2;
1788        }
1789
1790        if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
1791                reclaim_stat->recent_scanned[1] /= 2;
1792                reclaim_stat->recent_rotated[1] /= 2;
1793        }
1794
1795        /*
1796         * The amount of pressure on anon vs file pages is inversely
1797         * proportional to the fraction of recently scanned pages on
1798         * each list that were recently referenced and in active use.
1799         */
1800        ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
1801        ap /= reclaim_stat->recent_rotated[0] + 1;
1802
1803        fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
1804        fp /= reclaim_stat->recent_rotated[1] + 1;
1805        spin_unlock_irq(&zone->lru_lock);
1806
1807        fraction[0] = ap;
1808        fraction[1] = fp;
1809        denominator = ap + fp + 1;
1810out:
1811        for_each_evictable_lru(l) {
1812                int file = is_file_lru(l);
1813                unsigned long scan;
1814
1815                scan = zone_nr_lru_pages(zone, sc, l);
1816                if (priority || noswap) {
1817                        scan >>= priority;
1818                        scan = div64_u64(scan * fraction[file], denominator);
1819                }
1820                nr[l] = nr_scan_try_batch(scan,
1821                                          &reclaim_stat->nr_saved_scan[l]);
1822        }
1823}
1824
1825/*
1826 * Reclaim/compaction depends on a number of pages being freed. To avoid
1827 * disruption to the system, a small number of order-0 pages continue to be
1828 * rotated and reclaimed in the normal fashion. However, by the time we get
1829 * back to the allocator and call try_to_compact_zone(), we ensure that
1830 * there are enough free pages for it to be likely successful
1831 */
1832static inline bool should_continue_reclaim(struct zone *zone,
1833                                        unsigned long nr_reclaimed,
1834                                        unsigned long nr_scanned,
1835                                        struct scan_control *sc)
1836{
1837        unsigned long pages_for_compaction;
1838        unsigned long inactive_lru_pages;
1839
1840        /* If not in reclaim/compaction mode, stop */
1841        if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION))
1842                return false;
1843
1844        /* Consider stopping depending on scan and reclaim activity */
1845        if (sc->gfp_mask & __GFP_REPEAT) {
1846                /*
1847                 * For __GFP_REPEAT allocations, stop reclaiming if the
1848                 * full LRU list has been scanned and we are still failing
1849                 * to reclaim pages. This full LRU scan is potentially
1850                 * expensive but a __GFP_REPEAT caller really wants to succeed
1851                 */
1852                if (!nr_reclaimed && !nr_scanned)
1853                        return false;
1854        } else {
1855                /*
1856                 * For non-__GFP_REPEAT allocations which can presumably
1857                 * fail without consequence, stop if we failed to reclaim
1858                 * any pages from the last SWAP_CLUSTER_MAX number of
1859                 * pages that were scanned. This will return to the
1860                 * caller faster at the risk reclaim/compaction and
1861                 * the resulting allocation attempt fails
1862                 */
1863                if (!nr_reclaimed)
1864                        return false;
1865        }
1866
1867        /*
1868         * If we have not reclaimed enough pages for compaction and the
1869         * inactive lists are large enough, continue reclaiming
1870         */
1871        pages_for_compaction = (2UL << sc->order);
1872        inactive_lru_pages = zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON) +
1873                                zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
1874        if (sc->nr_reclaimed < pages_for_compaction &&
1875                        inactive_lru_pages > pages_for_compaction)
1876                return true;
1877
1878        /* If compaction would go ahead or the allocation would succeed, stop */
1879        switch (compaction_suitable(zone, sc->order)) {
1880        case COMPACT_PARTIAL:
1881        case COMPACT_CONTINUE:
1882                return false;
1883        default:
1884                return true;
1885        }
1886}
1887
1888/*
1889 * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
1890 */
1891static void shrink_zone(int priority, struct zone *zone,
1892                                struct scan_control *sc)
1893{
1894        unsigned long nr[NR_LRU_LISTS];
1895        unsigned long nr_to_scan;
1896        enum lru_list l;
1897        unsigned long nr_reclaimed, nr_scanned;
1898        unsigned long nr_to_reclaim = sc->nr_to_reclaim;
1899
1900restart:
1901        nr_reclaimed = 0;
1902        nr_scanned = sc->nr_scanned;
1903        get_scan_count(zone, sc, nr, priority);
1904
1905        while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
1906                                        nr[LRU_INACTIVE_FILE]) {
1907                for_each_evictable_lru(l) {
1908                        if (nr[l]) {
1909                                nr_to_scan = min_t(unsigned long,
1910                                                   nr[l], SWAP_CLUSTER_MAX);
1911                                nr[l] -= nr_to_scan;
1912
1913                                nr_reclaimed += shrink_list(l, nr_to_scan,
1914                                                            zone, sc, priority);
1915                        }
1916                }
1917                /*
1918                 * On large memory systems, scan >> priority can become
1919                 * really large. This is fine for the starting priority;
1920                 * we want to put equal scanning pressure on each zone.
1921                 * However, if the VM has a harder time of freeing pages,
1922                 * with multiple processes reclaiming pages, the total
1923                 * freeing target can get unreasonably large.
1924                 */
1925                if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY)
1926                        break;
1927        }
1928        sc->nr_reclaimed += nr_reclaimed;
1929
1930        /*
1931         * Even if we did not try to evict anon pages at all, we want to
1932         * rebalance the anon lru active/inactive ratio.
1933         */
1934        if (inactive_anon_is_low(zone, sc))
1935                shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
1936
1937        /* reclaim/compaction might need reclaim to continue */
1938        if (should_continue_reclaim(zone, nr_reclaimed,
1939                                        sc->nr_scanned - nr_scanned, sc))
1940                goto restart;
1941
1942        throttle_vm_writeout(sc->gfp_mask);
1943}
1944
1945/*
1946 * This is the direct reclaim path, for page-allocating processes.  We only
1947 * try to reclaim pages from zones which will satisfy the caller's allocation
1948 * request.
1949 *
1950 * We reclaim from a zone even if that zone is over high_wmark_pages(zone).
1951 * Because:
1952 * a) The caller may be trying to free *extra* pages to satisfy a higher-order
1953 *    allocation or
1954 * b) The target zone may be at high_wmark_pages(zone) but the lower zones
1955 *    must go *over* high_wmark_pages(zone) to satisfy the `incremental min'
1956 *    zone defense algorithm.
1957 *
1958 * If a zone is deemed to be full of pinned pages then just give it a light
1959 * scan then give up on it.
1960 */
1961static void shrink_zones(int priority, struct zonelist *zonelist,
1962                                        struct scan_control *sc)
1963{
1964        struct zoneref *z;
1965        struct zone *zone;
1966
1967        for_each_zone_zonelist_nodemask(zone, z, zonelist,
1968                                        gfp_zone(sc->gfp_mask), sc->nodemask) {
1969                if (!populated_zone(zone))
1970                        continue;
1971                /*
1972                 * Take care memory controller reclaiming has small influence
1973                 * to global LRU.
1974                 */
1975                if (scanning_global_lru(sc)) {
1976                        if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1977                                continue;
1978                        if (zone->all_unreclaimable && priority != DEF_PRIORITY)
1979                                continue;       /* Let kswapd poll it */
1980                }
1981
1982                shrink_zone(priority, zone, sc);
1983        }
1984}
1985
1986static bool zone_reclaimable(struct zone *zone)
1987{
1988        return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
1989}
1990
1991/*
1992 * As hibernation is going on, kswapd is freezed so that it can't mark
1993 * the zone into all_unreclaimable. It can't handle OOM during hibernation.
1994 * So let's check zone's unreclaimable in direct reclaim as well as kswapd.
1995 */
1996static bool all_unreclaimable(struct zonelist *zonelist,
1997                struct scan_control *sc)
1998{
1999        struct zoneref *z;
2000        struct zone *zone;
2001        bool all_unreclaimable = true;
2002
2003        for_each_zone_zonelist_nodemask(zone, z, zonelist,
2004                        gfp_zone(sc->gfp_mask), sc->nodemask) {
2005                if (!populated_zone(zone))
2006                        continue;
2007                if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2008                        continue;
2009                if (zone_reclaimable(zone)) {
2010                        all_unreclaimable = false;
2011                        break;
2012                }
2013        }
2014
2015        return all_unreclaimable;
2016}
2017
2018/*
2019 * This is the main entry point to direct page reclaim.
2020 *
2021 * If a full scan of the inactive list fails to free enough memory then we
2022 * are "out of memory" and something needs to be killed.
2023 *
2024 * If the caller is !__GFP_FS then the probability of a failure is reasonably
2025 * high - the zone may be full of dirty or under-writeback pages, which this
2026 * caller can't do much about.  We kick the writeback threads and take explicit
2027 * naps in the hope that some of these pages can be written.  But if the
2028 * allocating task holds filesystem locks which prevent writeout this might not
2029 * work, and the allocation attempt will fail.
2030 *
2031 * returns:     0, if no pages reclaimed
2032 *              else, the number of pages reclaimed
2033 */
2034static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2035                                        struct scan_control *sc)
2036{
2037        int priority;
2038        unsigned long total_scanned = 0;
2039        struct reclaim_state *reclaim_state = current->reclaim_state;
2040        struct zoneref *z;
2041        struct zone *zone;
2042        unsigned long writeback_threshold;
2043
2044        get_mems_allowed();
2045        delayacct_freepages_start();
2046
2047        if (scanning_global_lru(sc))
2048                count_vm_event(ALLOCSTALL);
2049
2050        for (priority = DEF_PRIORITY; priority >= 0; priority--) {
2051                sc->nr_scanned = 0;
2052                if (!priority)
2053                        disable_swap_token();
2054                shrink_zones(priority, zonelist, sc);
2055                /*
2056                 * Don't shrink slabs when reclaiming memory from
2057                 * over limit cgroups
2058                 */
2059                if (scanning_global_lru(sc)) {
2060                        unsigned long lru_pages = 0;
2061                        for_each_zone_zonelist(zone, z, zonelist,
2062                                        gfp_zone(sc->gfp_mask)) {
2063                                if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2064                                        continue;
2065
2066                                lru_pages += zone_reclaimable_pages(zone);
2067                        }
2068
2069                        shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
2070                        if (reclaim_state) {
2071                                sc->nr_reclaimed += reclaim_state->reclaimed_slab;
2072                                reclaim_state->reclaimed_slab = 0;
2073                        }
2074                }
2075                total_scanned += sc->nr_scanned;
2076                if (sc->nr_reclaimed >= sc->nr_to_reclaim)
2077                        goto out;
2078
2079                /*
2080                 * Try to write back as many pages as we just scanned.  This
2081                 * tends to cause slow streaming writers to write data to the
2082                 * disk smoothly, at the dirtying rate, which is nice.   But
2083                 * that's undesirable in laptop mode, where we *want* lumpy
2084                 * writeout.  So in laptop mode, write out the whole world.
2085                 */
2086                writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
2087                if (total_scanned > writeback_threshold) {
2088                        wakeup_flusher_threads(laptop_mode ? 0 : total_scanned);
2089                        sc->may_writepage = 1;
2090                }
2091
2092                /* Take a nap, wait for some writeback to complete */
2093                if (!sc->hibernation_mode && sc->nr_scanned &&
2094                    priority < DEF_PRIORITY - 2) {
2095                        struct zone *preferred_zone;
2096
2097                        first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask),
2098                                                &cpuset_current_mems_allowed,
2099                                                &preferred_zone);
2100                        wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10);
2101                }
2102        }
2103
2104out:
2105        delayacct_freepages_end();
2106        put_mems_allowed();
2107
2108        if (sc->nr_reclaimed)
2109                return sc->nr_reclaimed;
2110
2111        /* top priority shrink_zones still had more to do? don't OOM, then */
2112        if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc))
2113                return 1;
2114
2115        return 0;
2116}
2117
2118unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
2119                                gfp_t gfp_mask, nodemask_t *nodemask)
2120{
2121        unsigned long nr_reclaimed;
2122        struct scan_control sc = {
2123                .gfp_mask = gfp_mask,
2124                .may_writepage = !laptop_mode,
2125                .nr_to_reclaim = SWAP_CLUSTER_MAX,
2126                .may_unmap = 1,
2127                .may_swap = 1,
2128                .swappiness = vm_swappiness,
2129                .order = order,
2130                .mem_cgroup = NULL,
2131                .nodemask = nodemask,
2132        };
2133
2134        trace_mm_vmscan_direct_reclaim_begin(order,
2135                                sc.may_writepage,
2136                                gfp_mask);
2137
2138        nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
2139
2140        trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
2141
2142        return nr_reclaimed;
2143}
2144
2145#ifdef CONFIG_CGROUP_MEM_RES_CTLR
2146
2147unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
2148                                                gfp_t gfp_mask, bool noswap,
2149                                                unsigned int swappiness,
2150                                                struct zone *zone)
2151{
2152        struct scan_control sc = {
2153                .nr_to_reclaim = SWAP_CLUSTER_MAX,
2154                .may_writepage = !laptop_mode,
2155                .may_unmap = 1,
2156                .may_swap = !noswap,
2157                .swappiness = swappiness,
2158                .order = 0,
2159                .mem_cgroup = mem,
2160        };
2161        sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2162                        (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
2163
2164        trace_mm_vmscan_memcg_softlimit_reclaim_begin(0,
2165                                                      sc.may_writepage,
2166                                                      sc.gfp_mask);
2167
2168        /*
2169         * NOTE: Although we can get the priority field, using it
2170         * here is not a good idea, since it limits the pages we can scan.
2171         * if we don't reclaim here, the shrink_zone from balance_pgdat
2172         * will pick up pages from other mem cgroup's as well. We hack
2173         * the priority and make it zero.
2174         */
2175        shrink_zone(0, zone, &sc);
2176
2177        trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
2178
2179        return sc.nr_reclaimed;
2180}
2181
2182unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
2183                                           gfp_t gfp_mask,
2184                                           bool noswap,
2185                                           unsigned int swappiness)
2186{
2187        struct zonelist *zonelist;
2188        unsigned long nr_reclaimed;
2189        struct scan_control sc = {
2190                .may_writepage = !laptop_mode,
2191                .may_unmap = 1,
2192                .may_swap = !noswap,
2193                .nr_to_reclaim = SWAP_CLUSTER_MAX,
2194                .swappiness = swappiness,
2195                .order = 0,
2196                .mem_cgroup = mem_cont,
2197                .nodemask = NULL, /* we don't care the placement */
2198        };
2199
2200        sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2201                        (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
2202        zonelist = NODE_DATA(numa_node_id())->node_zonelists;
2203
2204        trace_mm_vmscan_memcg_reclaim_begin(0,
2205                                            sc.may_writepage,
2206                                            sc.gfp_mask);
2207
2208        nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
2209
2210        trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
2211
2212        return nr_reclaimed;
2213}
2214#endif
2215
2216/*
2217 * pgdat_balanced is used when checking if a node is balanced for high-order
2218 * allocations. Only zones that meet watermarks and are in a zone allowed
2219 * by the callers classzone_idx are added to balanced_pages. The total of
2220 * balanced pages must be at least 25% of the zones allowed by classzone_idx
2221 * for the node to be considered balanced. Forcing all zones to be balanced
2222 * for high orders can cause excessive reclaim when there are imbalanced zones.
2223 * The choice of 25% is due to
2224 *   o a 16M DMA zone that is balanced will not balance a zone on any
2225 *     reasonable sized machine
2226 *   o On all other machines, the top zone must be at least a reasonable
2227 *     precentage of the middle zones. For example, on 32-bit x86, highmem
2228 *     would need to be at least 256M for it to be balance a whole node.
2229 *     Similarly, on x86-64 the Normal zone would need to be at least 1G
2230 *     to balance a node on its own. These seemed like reasonable ratios.
2231 */
2232static bool pgdat_balanced(pg_data_t *pgdat, unsigned long balanced_pages,
2233                                                int classzone_idx)
2234{
2235        unsigned long present_pages = 0;
2236        int i;
2237
2238        for (i = 0; i <= classzone_idx; i++)
2239                present_pages += pgdat->node_zones[i].present_pages;
2240
2241        return balanced_pages > (present_pages >> 2);
2242}
2243
2244/* is kswapd sleeping prematurely? */
2245static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
2246                                        int classzone_idx)
2247{
2248        int i;
2249        unsigned long balanced = 0;
2250        bool all_zones_ok = true;
2251
2252        /* If a direct reclaimer woke kswapd within HZ/10, it's premature */
2253        if (remaining)
2254                return true;
2255
2256        /* Check the watermark levels */
2257        for (i = 0; i < pgdat->nr_zones; i++) {
2258                struct zone *zone = pgdat->node_zones + i;
2259
2260                if (!populated_zone(zone))
2261                        continue;
2262
2263                /*
2264                 * balance_pgdat() skips over all_unreclaimable after
2265                 * DEF_PRIORITY. Effectively, it considers them balanced so
2266                 * they must be considered balanced here as well if kswapd
2267                 * is to sleep
2268                 */
2269                if (zone->all_unreclaimable) {
2270                        balanced += zone->present_pages;
2271                        continue;
2272                }
2273
2274                if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
2275                                                        classzone_idx, 0))
2276                        all_zones_ok = false;
2277                else
2278                        balanced += zone->present_pages;
2279        }
2280
2281        /*
2282         * For high-order requests, the balanced zones must contain at least
2283         * 25% of the nodes pages for kswapd to sleep. For order-0, all zones
2284         * must be balanced
2285         */
2286        if (order)
2287                return pgdat_balanced(pgdat, balanced, classzone_idx);
2288        else
2289                return !all_zones_ok;
2290}
2291
2292/*
2293 * For kswapd, balance_pgdat() will work across all this node's zones until
2294 * they are all at high_wmark_pages(zone).
2295 *
2296 * Returns the final order kswapd was reclaiming at
2297 *
2298 * There is special handling here for zones which are full of pinned pages.
2299 * This can happen if the pages are all mlocked, or if they are all used by
2300 * device drivers (say, ZONE_DMA).  Or if they are all in use by hugetlb.
2301 * What we do is to detect the case where all pages in the zone have been
2302 * scanned twice and there has been zero successful reclaim.  Mark the zone as
2303 * dead and from now on, only perform a short scan.  Basically we're polling
2304 * the zone for when the problem goes away.
2305 *
2306 * kswapd scans the zones in the highmem->normal->dma direction.  It skips
2307 * zones which have free_pages > high_wmark_pages(zone), but once a zone is
2308 * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the
2309 * lower zones regardless of the number of free pages in the lower zones. This
2310 * interoperates with the page allocator fallback scheme to ensure that aging
2311 * of pages is balanced across the zones.
2312 */
2313static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
2314                                                        int *classzone_idx)
2315{
2316        int all_zones_ok;
2317        unsigned long balanced;
2318        int priority;
2319        int i;
2320        int end_zone = 0;       /* Inclusive.  0 = ZONE_DMA */
2321        unsigned long total_scanned;
2322        struct reclaim_state *reclaim_state = current->reclaim_state;
2323        struct scan_control sc = {
2324                .gfp_mask = GFP_KERNEL,
2325                .may_unmap = 1,
2326                .may_swap = 1,
2327                /*
2328                 * kswapd doesn't want to be bailed out while reclaim. because
2329                 * we want to put equal scanning pressure on each zone.
2330                 */
2331                .nr_to_reclaim = ULONG_MAX,
2332                .swappiness = vm_swappiness,
2333                .order = order,
2334                .mem_cgroup = NULL,
2335        };
2336loop_again:
2337        total_scanned = 0;
2338        sc.nr_reclaimed = 0;
2339        sc.may_writepage = !laptop_mode;
2340        count_vm_event(PAGEOUTRUN);
2341
2342        for (priority = DEF_PRIORITY; priority >= 0; priority--) {
2343                unsigned long lru_pages = 0;
2344                int has_under_min_watermark_zone = 0;
2345
2346                /* The swap token gets in the way of swapout... */
2347                if (!priority)
2348                        disable_swap_token();
2349
2350                all_zones_ok = 1;
2351                balanced = 0;
2352
2353                /*
2354                 * Scan in the highmem->dma direction for the highest
2355                 * zone which needs scanning
2356                 */
2357                for (i = pgdat->nr_zones - 1; i >= 0; i--) {
2358                        struct zone *zone = pgdat->node_zones + i;
2359
2360                        if (!populated_zone(zone))
2361                                continue;
2362
2363                        if (zone->all_unreclaimable && priority != DEF_PRIORITY)
2364                                continue;
2365
2366                        /*
2367                         * Do some background aging of the anon list, to give
2368                         * pages a chance to be referenced before reclaiming.
2369                         */
2370                        if (inactive_anon_is_low(zone, &sc))
2371                                shrink_active_list(SWAP_CLUSTER_MAX, zone,
2372                                                        &sc, priority, 0);
2373
2374                        if (!zone_watermark_ok_safe(zone, order,
2375                                        high_wmark_pages(zone), 0, 0)) {
2376                                end_zone = i;
2377                                *classzone_idx = i;
2378                                break;
2379                        }
2380                }
2381                if (i < 0)
2382                        goto out;
2383
2384                for (i = 0; i <= end_zone; i++) {
2385                        struct zone *zone = pgdat->node_zones + i;
2386
2387                        lru_pages += zone_reclaimable_pages(zone);
2388                }
2389
2390                /*
2391                 * Now scan the zone in the dma->highmem direction, stopping
2392                 * at the last zone which needs scanning.
2393                 *
2394                 * We do this because the page allocator works in the opposite
2395                 * direction.  This prevents the page allocator from allocating
2396                 * pages behind kswapd's direction of progress, which would
2397                 * cause too much scanning of the lower zones.
2398                 */
2399                for (i = 0; i <= end_zone; i++) {
2400                        int compaction;
2401                        struct zone *zone = pgdat->node_zones + i;
2402                        int nr_slab;
2403
2404                        if (!populated_zone(zone))
2405                                continue;
2406
2407                        if (zone->all_unreclaimable && priority != DEF_PRIORITY)
2408                                continue;
2409
2410                        sc.nr_scanned = 0;
2411
2412                        /*
2413                         * Call soft limit reclaim before calling shrink_zone.
2414                         * For now we ignore the return value
2415                         */
2416                        mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask);
2417
2418                        /*
2419                         * We put equal pressure on every zone, unless one
2420                         * zone has way too many pages free already.
2421                         */
2422                        if (!zone_watermark_ok_safe(zone, order,
2423                                        8*high_wmark_pages(zone), end_zone, 0))
2424                                shrink_zone(priority, zone, &sc);
2425                        reclaim_state->reclaimed_slab = 0;
2426                        nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
2427                                                lru_pages);
2428                        sc.nr_reclaimed += reclaim_state->reclaimed_slab;
2429                        total_scanned += sc.nr_scanned;
2430
2431                        compaction = 0;
2432                        if (order &&
2433                            zone_watermark_ok(zone, 0,
2434                                               high_wmark_pages(zone),
2435                                              end_zone, 0) &&
2436                            !zone_watermark_ok(zone, order,
2437                                               high_wmark_pages(zone),
2438                                               end_zone, 0)) {
2439                                compact_zone_order(zone,
2440                                                   order,
2441                                                   sc.gfp_mask, false,
2442                                                   COMPACT_MODE_KSWAPD);
2443                                compaction = 1;
2444                        }
2445
2446                        if (zone->all_unreclaimable)
2447                                continue;
2448                        if (!compaction && nr_slab == 0 &&
2449                            !zone_reclaimable(zone))
2450                                zone->all_unreclaimable = 1;
2451                        /*
2452                         * If we've done a decent amount of scanning and
2453                         * the reclaim ratio is low, start doing writepage
2454                         * even in laptop mode
2455                         */
2456                        if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
2457                            total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
2458                                sc.may_writepage = 1;
2459
2460                        if (!zone_watermark_ok_safe(zone, order,
2461                                        high_wmark_pages(zone), end_zone, 0)) {
2462                                all_zones_ok = 0;
2463                                /*
2464                                 * We are still under min water mark.  This
2465                                 * means that we have a GFP_ATOMIC allocation
2466                                 * failure risk. Hurry up!
2467                                 */
2468                                if (!zone_watermark_ok_safe(zone, order,
2469                                            min_wmark_pages(zone), end_zone, 0))
2470                                        has_under_min_watermark_zone = 1;
2471                        } else {
2472                                /*
2473                                 * If a zone reaches its high watermark,
2474                                 * consider it to be no longer congested. It's
2475                                 * possible there are dirty pages backed by
2476                                 * congested BDIs but as pressure is relieved,
2477                                 * spectulatively avoid congestion waits
2478                                 */
2479                                zone_clear_flag(zone, ZONE_CONGESTED);
2480                                if (i <= *classzone_idx)
2481                                        balanced += zone->present_pages;
2482                        }
2483
2484                }
2485                if (all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))
2486                        break;          /* kswapd: all done */
2487                /*
2488                 * OK, kswapd is getting into trouble.  Take a nap, then take
2489                 * another pass across the zones.
2490                 */
2491                if (total_scanned && (priority < DEF_PRIORITY - 2)) {
2492                        if (has_under_min_watermark_zone)
2493                                count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
2494                        else
2495                                congestion_wait(BLK_RW_ASYNC, HZ/10);
2496                }
2497
2498                /*
2499                 * We do this so kswapd doesn't build up large priorities for
2500                 * example when it is freeing in parallel with allocators. It
2501                 * matches the direct reclaim path behaviour in terms of impact
2502                 * on zone->*_priority.
2503                 */
2504                if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
2505                        break;
2506        }
2507out:
2508
2509        /*
2510         * order-0: All zones must meet high watermark for a balanced node
2511         * high-order: Balanced zones must make up at least 25% of the node
2512         *             for the node to be balanced
2513         */
2514        if (!(all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))) {
2515                cond_resched();
2516
2517                try_to_freeze();
2518
2519                /*
2520                 * Fragmentation may mean that the system cannot be
2521                 * rebalanced for high-order allocations in all zones.
2522                 * At this point, if nr_reclaimed < SWAP_CLUSTER_MAX,
2523                 * it means the zones have been fully scanned and are still
2524                 * not balanced. For high-order allocations, there is
2525                 * little point trying all over again as kswapd may
2526                 * infinite loop.
2527                 *
2528                 * Instead, recheck all watermarks at order-0 as they
2529                 * are the most important. If watermarks are ok, kswapd will go
2530                 * back to sleep. High-order users can still perform direct
2531                 * reclaim if they wish.
2532                 */
2533                if (sc.nr_reclaimed < SWAP_CLUSTER_MAX)
2534                        order = sc.order = 0;
2535
2536                goto loop_again;
2537        }
2538
2539        /*
2540         * If kswapd was reclaiming at a higher order, it has the option of
2541         * sleeping without all zones being balanced. Before it does, it must
2542         * ensure that the watermarks for order-0 on *all* zones are met and
2543         * that the congestion flags are cleared. The congestion flag must
2544         * be cleared as kswapd is the only mechanism that clears the flag
2545         * and it is potentially going to sleep here.
2546         */
2547        if (order) {
2548                for (i = 0; i <= end_zone; i++) {
2549                        struct zone *zone = pgdat->node_zones + i;
2550
2551                        if (!populated_zone(zone))
2552                                continue;
2553
2554                        if (zone->all_unreclaimable && priority != DEF_PRIORITY)
2555                                continue;
2556
2557                        /* Confirm the zone is balanced for order-0 */
2558                        if (!zone_watermark_ok(zone, 0,
2559                                        high_wmark_pages(zone), 0, 0)) {
2560                                order = sc.order = 0;
2561                                goto loop_again;
2562                        }
2563
2564                        /* If balanced, clear the congested flag */
2565                        zone_clear_flag(zone, ZONE_CONGESTED);
2566                }
2567        }
2568
2569        /*
2570         * Return the order we were reclaiming at so sleeping_prematurely()
2571         * makes a decision on the order we were last reclaiming at. However,
2572         * if another caller entered the allocator slow path while kswapd
2573         * was awake, order will remain at the higher level
2574         */
2575        *classzone_idx = end_zone;
2576        return order;
2577}
2578
2579static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
2580{
2581        long remaining = 0;
2582        DEFINE_WAIT(wait);
2583
2584        if (freezing(current) || kthread_should_stop())
2585                return;
2586
2587        prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
2588
2589        /* Try to sleep for a short interval */
2590        if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) {
2591                remaining = schedule_timeout(HZ/10);
2592                finish_wait(&pgdat->kswapd_wait, &wait);
2593                prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
2594        }
2595
2596        /*
2597         * After a short sleep, check if it was a premature sleep. If not, then
2598         * go fully to sleep until explicitly woken up.
2599         */
2600        if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) {
2601                trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
2602
2603                /*
2604                 * vmstat counters are not perfectly accurate and the estimated
2605                 * value for counters such as NR_FREE_PAGES can deviate from the
2606                 * true value by nr_online_cpus * threshold. To avoid the zone
2607                 * watermarks being breached while under pressure, we reduce the
2608                 * per-cpu vmstat threshold while kswapd is awake and restore
2609                 * them before going back to sleep.
2610                 */
2611                set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
2612                schedule();
2613                set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
2614        } else {
2615                if (remaining)
2616                        count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
2617                else
2618                        count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
2619        }
2620        finish_wait(&pgdat->kswapd_wait, &wait);
2621}
2622
2623/*
2624 * The background pageout daemon, started as a kernel thread
2625 * from the init process.
2626 *
2627 * This basically trickles out pages so that we have _some_
2628 * free memory available even if there is no other activity
2629 * that frees anything up. This is needed for things like routing
2630 * etc, where we otherwise might have all activity going on in
2631 * asynchronous contexts that cannot page things out.
2632 *
2633 * If there are applications that are active memory-allocators
2634 * (most normal use), this basically shouldn't matter.
2635 */
2636static int kswapd(void *p)
2637{
2638        unsigned long order;
2639        int classzone_idx;
2640        pg_data_t *pgdat = (pg_data_t*)p;
2641        struct task_struct *tsk = current;
2642
2643        struct reclaim_state reclaim_state = {
2644                .reclaimed_slab = 0,
2645        };
2646        const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2647
2648        lockdep_set_current_reclaim_state(GFP_KERNEL);
2649
2650        if (!cpumask_empty(cpumask))
2651                set_cpus_allowed_ptr(tsk, cpumask);
2652        current->reclaim_state = &reclaim_state;
2653
2654        /*
2655         * Tell the memory management that we're a "memory allocator",
2656         * and that if we need more memory we should get access to it
2657         * regardless (see "__alloc_pages()"). "kswapd" should
2658         * never get caught in the normal page freeing logic.
2659         *
2660         * (Kswapd normally doesn't need memory anyway, but sometimes
2661         * you need a small amount of memory in order to be able to
2662         * page out something else, and this flag essentially protects
2663         * us from recursively trying to free more memory as we're
2664         * trying to free the first piece of memory in the first place).
2665         */
2666        tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
2667        set_freezable();
2668
2669        order = 0;
2670        classzone_idx = MAX_NR_ZONES - 1;
2671        for ( ; ; ) {
2672                unsigned long new_order;
2673                int new_classzone_idx;
2674                int ret;
2675
2676                new_order = pgdat->kswapd_max_order;
2677                new_classzone_idx = pgdat->classzone_idx;
2678                pgdat->kswapd_max_order = 0;
2679                pgdat->classzone_idx = MAX_NR_ZONES - 1;
2680                if (order < new_order || classzone_idx > new_classzone_idx) {
2681                        /*
2682                         * Don't sleep if someone wants a larger 'order'
2683                         * allocation or has tigher zone constraints
2684                         */
2685                        order = new_order;
2686                        classzone_idx = new_classzone_idx;
2687                } else {
2688                        kswapd_try_to_sleep(pgdat, order, classzone_idx);
2689                        order = pgdat->kswapd_max_order;
2690                        classzone_idx = pgdat->classzone_idx;
2691                        pgdat->kswapd_max_order = 0;
2692                        pgdat->classzone_idx = MAX_NR_ZONES - 1;
2693                }
2694
2695                ret = try_to_freeze();
2696                if (kthread_should_stop())
2697                        break;
2698
2699                /*
2700                 * We can speed up thawing tasks if we don't call balance_pgdat
2701                 * after returning from the refrigerator
2702                 */
2703                if (!ret) {
2704                        trace_mm_vmscan_kswapd_wake(pgdat->node_id, order);
2705                        order = balance_pgdat(pgdat, order, &classzone_idx);
2706                }
2707        }
2708        return 0;
2709}
2710
2711/*
2712 * A zone is low on free memory, so wake its kswapd task to service it.
2713 */
2714void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
2715{
2716        pg_data_t *pgdat;
2717
2718        if (!populated_zone(zone))
2719                return;
2720
2721        if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2722                return;
2723        pgdat = zone->zone_pgdat;
2724        if (pgdat->kswapd_max_order < order) {
2725                pgdat->kswapd_max_order = order;
2726                pgdat->classzone_idx = min(pgdat->classzone_idx, classzone_idx);
2727        }
2728        if (!waitqueue_active(&pgdat->kswapd_wait))
2729                return;
2730        if (zone_watermark_ok_safe(zone, order, low_wmark_pages(zone), 0, 0))
2731                return;
2732
2733        trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
2734        wake_up_interruptible(&pgdat->kswapd_wait);
2735}
2736
2737/*
2738 * The reclaimable count would be mostly accurate.
2739 * The less reclaimable pages may be
2740 * - mlocked pages, which will be moved to unevictable list when encountered
2741 * - mapped pages, which may require several travels to be reclaimed
2742 * - dirty pages, which is not "instantly" reclaimable
2743 */
2744unsigned long global_reclaimable_pages(void)
2745{
2746        int nr;
2747
2748        nr = global_page_state(NR_ACTIVE_FILE) +
2749             global_page_state(NR_INACTIVE_FILE);
2750
2751        if (nr_swap_pages > 0)
2752                nr += global_page_state(NR_ACTIVE_ANON) +
2753                      global_page_state(NR_INACTIVE_ANON);
2754
2755        return nr;
2756}
2757
2758unsigned long zone_reclaimable_pages(struct zone *zone)
2759{
2760        int nr;
2761
2762        nr = zone_page_state(zone, NR_ACTIVE_FILE) +
2763             zone_page_state(zone, NR_INACTIVE_FILE);
2764
2765        if (nr_swap_pages > 0)
2766                nr += zone_page_state(zone, NR_ACTIVE_ANON) +
2767                      zone_page_state(zone, NR_INACTIVE_ANON);
2768
2769        return nr;
2770}
2771
2772#ifdef CONFIG_HIBERNATION
2773/*
2774 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
2775 * freed pages.
2776 *
2777 * Rather than trying to age LRUs the aim is to preserve the overall
2778 * LRU order by reclaiming preferentially
2779 * inactive > active > active referenced > active mapped
2780 */
2781unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
2782{
2783        struct reclaim_state reclaim_state;
2784        struct scan_control sc = {
2785                .gfp_mask = GFP_HIGHUSER_MOVABLE,
2786                .may_swap = 1,
2787                .may_unmap = 1,
2788                .may_writepage = 1,
2789                .nr_to_reclaim = nr_to_reclaim,
2790                .hibernation_mode = 1,
2791                .swappiness = vm_swappiness,
2792                .order = 0,
2793        };
2794        struct zonelist * zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
2795        struct task_struct *p = current;
2796        unsigned long nr_reclaimed;
2797
2798        p->flags |= PF_MEMALLOC;
2799        lockdep_set_current_reclaim_state(sc.gfp_mask);
2800        reclaim_state.reclaimed_slab = 0;
2801        p->reclaim_state = &reclaim_state;
2802
2803        nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
2804
2805        p->reclaim_state = NULL;
2806        lockdep_clear_current_reclaim_state();
2807        p->flags &= ~PF_MEMALLOC;
2808
2809        return nr_reclaimed;
2810}
2811#endif /* CONFIG_HIBERNATION */
2812
2813/* It's optimal to keep kswapds on the same CPUs as their memory, but
2814   not required for correctness.  So if the last cpu in a node goes
2815   away, we get changed to run anywhere: as the first one comes back,
2816   restore their cpu bindings. */
2817static int __devinit cpu_callback(struct notifier_block *nfb,
2818                                  unsigned long action, void *hcpu)
2819{
2820        int nid;
2821
2822        if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
2823                for_each_node_state(nid, N_HIGH_MEMORY) {
2824                        pg_data_t *pgdat = NODE_DATA(nid);
2825                        const struct cpumask *mask;
2826
2827                        mask = cpumask_of_node(pgdat->node_id);
2828
2829                        if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
2830                                /* One of our CPUs online: restore mask */
2831                                set_cpus_allowed_ptr(pgdat->kswapd, mask);
2832                }
2833        }
2834        return NOTIFY_OK;
2835}
2836
2837/*
2838 * This kswapd start function will be called by init and node-hot-add.
2839 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
2840 */
2841int kswapd_run(int nid)
2842{
2843        pg_data_t *pgdat = NODE_DATA(nid);
2844        int ret = 0;
2845
2846        if (pgdat->kswapd)
2847                return 0;
2848
2849        pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
2850        if (IS_ERR(pgdat->kswapd)) {
2851                /* failure at boot is fatal */
2852                BUG_ON(system_state == SYSTEM_BOOTING);
2853                printk("Failed to start kswapd on node %d\n",nid);
2854                ret = -1;
2855        }
2856        return ret;
2857}
2858
2859/*
2860 * Called by memory hotplug when all memory in a node is offlined.
2861 */
2862void kswapd_stop(int nid)
2863{
2864        struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
2865
2866        if (kswapd)
2867                kthread_stop(kswapd);
2868}
2869
2870static int __init kswapd_init(void)
2871{
2872        int nid;
2873
2874        swap_setup();
2875        for_each_node_state(nid, N_HIGH_MEMORY)
2876                kswapd_run(nid);
2877        hotcpu_notifier(cpu_callback, 0);
2878        return 0;
2879}
2880
2881module_init(kswapd_init)
2882
2883#ifdef CONFIG_NUMA
2884/*
2885 * Zone reclaim mode
2886 *
2887 * If non-zero call zone_reclaim when the number of free pages falls below
2888 * the watermarks.
2889 */
2890int zone_reclaim_mode __read_mostly;
2891
2892#define RECLAIM_OFF 0
2893#define RECLAIM_ZONE (1<<0)     /* Run shrink_inactive_list on the zone */
2894#define RECLAIM_WRITE (1<<1)    /* Writeout pages during reclaim */
2895#define RECLAIM_SWAP (1<<2)     /* Swap pages out during reclaim */
2896
2897/*
2898 * Priority for ZONE_RECLAIM. This determines the fraction of pages
2899 * of a node considered for each zone_reclaim. 4 scans 1/16th of
2900 * a zone.
2901 */
2902#define ZONE_RECLAIM_PRIORITY 4
2903
2904/*
2905 * Percentage of pages in a zone that must be unmapped for zone_reclaim to
2906 * occur.
2907 */
2908int sysctl_min_unmapped_ratio = 1;
2909
2910/*
2911 * If the number of slab pages in a zone grows beyond this percentage then
2912 * slab reclaim needs to occur.
2913 */
2914int sysctl_min_slab_ratio = 5;
2915
2916static inline unsigned long zone_unmapped_file_pages(struct zone *zone)
2917{
2918        unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED);
2919        unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) +
2920                zone_page_state(zone, NR_ACTIVE_FILE);
2921
2922        /*
2923         * It's possible for there to be more file mapped pages than
2924         * accounted for by the pages on the file LRU lists because
2925         * tmpfs pages accounted for as ANON can also be FILE_MAPPED
2926         */
2927        return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
2928}
2929
2930/* Work out how many page cache pages we can reclaim in this reclaim_mode */
2931static long zone_pagecache_reclaimable(struct zone *zone)
2932{
2933        long nr_pagecache_reclaimable;
2934        long delta = 0;
2935
2936        /*
2937         * If RECLAIM_SWAP is set, then all file pages are considered
2938         * potentially reclaimable. Otherwise, we have to worry about
2939         * pages like swapcache and zone_unmapped_file_pages() provides
2940         * a better estimate
2941         */
2942        if (zone_reclaim_mode & RECLAIM_SWAP)
2943                nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES);
2944        else
2945                nr_pagecache_reclaimable = zone_unmapped_file_pages(zone);
2946
2947        /* If we can't clean pages, remove dirty pages from consideration */
2948        if (!(zone_reclaim_mode & RECLAIM_WRITE))
2949                delta += zone_page_state(zone, NR_FILE_DIRTY);
2950
2951        /* Watch for any possible underflows due to delta */
2952        if (unlikely(delta > nr_pagecache_reclaimable))
2953                delta = nr_pagecache_reclaimable;
2954
2955        return nr_pagecache_reclaimable - delta;
2956}
2957
2958/*
2959 * Try to free up some pages from this zone through reclaim.
2960 */
2961static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2962{
2963        /* Minimum pages needed in order to stay on node */
2964        const unsigned long nr_pages = 1 << order;
2965        struct task_struct *p = current;
2966        struct reclaim_state reclaim_state;
2967        int priority;
2968        struct scan_control sc = {
2969                .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
2970                .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
2971                .may_swap = 1,
2972                .nr_to_reclaim = max_t(unsigned long, nr_pages,
2973                                       SWAP_CLUSTER_MAX),
2974                .gfp_mask = gfp_mask,
2975                .swappiness = vm_swappiness,
2976                .order = order,
2977        };
2978        unsigned long nr_slab_pages0, nr_slab_pages1;
2979
2980        cond_resched();
2981        /*
2982         * We need to be able to allocate from the reserves for RECLAIM_SWAP
2983         * and we also need to be able to write out pages for RECLAIM_WRITE
2984         * and RECLAIM_SWAP.
2985         */
2986        p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
2987        lockdep_set_current_reclaim_state(gfp_mask);
2988        reclaim_state.reclaimed_slab = 0;
2989        p->reclaim_state = &reclaim_state;
2990
2991        if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
2992                /*
2993                 * Free memory by calling shrink zone with increasing
2994                 * priorities until we have enough memory freed.
2995                 */
2996                priority = ZONE_RECLAIM_PRIORITY;
2997                do {
2998                        shrink_zone(priority, zone, &sc);
2999                        priority--;
3000                } while (priority >= 0 && sc.nr_reclaimed < nr_pages);
3001        }
3002
3003        nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
3004        if (nr_slab_pages0 > zone->min_slab_pages) {
3005                /*
3006                 * shrink_slab() does not currently allow us to determine how
3007                 * many pages were freed in this zone. So we take the current
3008                 * number of slab pages and shake the slab until it is reduced
3009                 * by the same nr_pages that we used for reclaiming unmapped
3010                 * pages.
3011                 *
3012                 * Note that shrink_slab will free memory on all zones and may
3013                 * take a long time.
3014                 */
3015                for (;;) {
3016                        unsigned long lru_pages = zone_reclaimable_pages(zone);
3017
3018                        /* No reclaimable slab or very low memory pressure */
3019                        if (!shrink_slab(sc.nr_scanned, gfp_mask, lru_pages))
3020                                break;
3021
3022                        /* Freed enough memory */
3023                        nr_slab_pages1 = zone_page_state(zone,
3024                                                        NR_SLAB_RECLAIMABLE);
3025                        if (nr_slab_pages1 + nr_pages <= nr_slab_pages0)
3026                                break;
3027                }
3028
3029                /*
3030                 * Update nr_reclaimed by the number of slab pages we
3031                 * reclaimed from this zone.
3032                 */
3033                nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
3034                if (nr_slab_pages1 < nr_slab_pages0)
3035                        sc.nr_reclaimed += nr_slab_pages0 - nr_slab_pages1;
3036        }
3037
3038        p->reclaim_state = NULL;
3039        current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
3040        lockdep_clear_current_reclaim_state();
3041        return sc.nr_reclaimed >= nr_pages;
3042}
3043
3044int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3045{
3046        int node_id;
3047        int ret;
3048
3049        /*
3050         * Zone reclaim reclaims unmapped file backed pages and
3051         * slab pages if we are over the defined limits.
3052         *
3053         * A small portion of unmapped file backed pages is needed for
3054         * file I/O otherwise pages read by file I/O will be immediately
3055         * thrown out if the zone is overallocated. So we do not reclaim
3056         * if less than a specified percentage of the zone is used by
3057         * unmapped file backed pages.
3058         */
3059        if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
3060            zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
3061                return ZONE_RECLAIM_FULL;
3062
3063        if (zone->all_unreclaimable)
3064                return ZONE_RECLAIM_FULL;
3065
3066        /*
3067         * Do not scan if the allocation should not be delayed.
3068         */
3069        if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
3070                return ZONE_RECLAIM_NOSCAN;
3071
3072        /*
3073         * Only run zone reclaim on the local zone or on zones that do not
3074         * have associated processors. This will favor the local processor
3075         * over remote processors and spread off node memory allocations
3076         * as wide as possible.
3077         */
3078        node_id = zone_to_nid(zone);
3079        if (node_state(node_id, N_CPU) && node_id != numa_node_id())
3080                return ZONE_RECLAIM_NOSCAN;
3081
3082        if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
3083                return ZONE_RECLAIM_NOSCAN;
3084
3085        ret = __zone_reclaim(zone, gfp_mask, order);
3086        zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
3087
3088        if (!ret)
3089                count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
3090
3091        return ret;
3092}
3093#endif
3094
3095/*
3096 * page_evictable - test whether a page is evictable
3097 * @page: the page to test
3098 * @vma: the VMA in which the page is or will be mapped, may be NULL
3099 *
3100 * Test whether page is evictable--i.e., should be placed on active/inactive
3101 * lists vs unevictable list.  The vma argument is !NULL when called from the
3102 * fault path to determine how to instantate a new page.
3103 *
3104 * Reasons page might not be evictable:
3105 * (1) page's mapping marked unevictable
3106 * (2) page is part of an mlocked VMA
3107 *
3108 */
3109int page_evictable(struct page *page, struct vm_area_struct *vma)
3110{
3111
3112        if (mapping_unevictable(page_mapping(page)))
3113                return 0;
3114
3115        if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page)))
3116                return 0;
3117
3118        return 1;
3119}
3120
3121/**
3122 * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
3123 * @page: page to check evictability and move to appropriate lru list
3124 * @zone: zone page is in
3125 *
3126 * Checks a page for evictability and moves the page to the appropriate
3127 * zone lru list.
3128 *
3129 * Restrictions: zone->lru_lock must be held, page must be on LRU and must
3130 * have PageUnevictable set.
3131 */
3132static void check_move_unevictable_page(struct page *page, struct zone *zone)
3133{
3134        VM_BUG_ON(PageActive(page));
3135
3136retry:
3137        ClearPageUnevictable(page);
3138        if (page_evictable(page, NULL)) {
3139                enum lru_list l = page_lru_base_type(page);
3140
3141                __dec_zone_state(zone, NR_UNEVICTABLE);
3142                list_move(&page->lru, &zone->lru[l].list);
3143                mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l);
3144                __inc_zone_state(zone, NR_INACTIVE_ANON + l);
3145                __count_vm_event(UNEVICTABLE_PGRESCUED);
3146        } else {
3147                /*
3148                 * rotate unevictable list
3149                 */
3150                SetPageUnevictable(page);
3151                list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
3152                mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE);
3153                if (page_evictable(page, NULL))
3154                        goto retry;
3155        }
3156}
3157
3158/**
3159 * scan_mapping_unevictable_pages - scan an address space for evictable pages
3160 * @mapping: struct address_space to scan for evictable pages
3161 *
3162 * Scan all pages in mapping.  Check unevictable pages for
3163 * evictability and move them to the appropriate zone lru list.
3164 */
3165void scan_mapping_unevictable_pages(struct address_space *mapping)
3166{
3167        pgoff_t next = 0;
3168        pgoff_t end   = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
3169                         PAGE_CACHE_SHIFT;
3170        struct zone *zone;
3171        struct pagevec pvec;
3172
3173        if (mapping->nrpages == 0)
3174                return;
3175
3176        pagevec_init(&pvec, 0);
3177        while (next < end &&
3178                pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
3179                int i;
3180                int pg_scanned = 0;
3181
3182                zone = NULL;
3183
3184                for (i = 0; i < pagevec_count(&pvec); i++) {
3185                        struct page *page = pvec.pages[i];
3186                        pgoff_t page_index = page->index;
3187                        struct zone *pagezone = page_zone(page);
3188
3189                        pg_scanned++;
3190                        if (page_index > next)
3191                                next = page_index;
3192                        next++;
3193
3194                        if (pagezone != zone) {
3195                                if (zone)
3196                                        spin_unlock_irq(&zone->lru_lock);
3197                                zone = pagezone;
3198                                spin_lock_irq(&zone->lru_lock);
3199                        }
3200
3201                        if (PageLRU(page) && PageUnevictable(page))
3202                                check_move_unevictable_page(page, zone);
3203                }
3204                if (zone)
3205                        spin_unlock_irq(&zone->lru_lock);
3206                pagevec_release(&pvec);
3207
3208                count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
3209        }
3210
3211}
3212
3213/**
3214 * scan_zone_unevictable_pages - check unevictable list for evictable pages
3215 * @zone - zone of which to scan the unevictable list
3216 *
3217 * Scan @zone's unevictable LRU lists to check for pages that have become
3218 * evictable.  Move those that have to @zone's inactive list where they
3219 * become candidates for reclaim, unless shrink_inactive_zone() decides
3220 * to reactivate them.  Pages that are still unevictable are rotated
3221 * back onto @zone's unevictable list.
3222 */
3223#define SCAN_UNEVICTABLE_BATCH_SIZE 16UL /* arbitrary lock hold batch size */
3224static void scan_zone_unevictable_pages(struct zone *zone)
3225{
3226        struct list_head *l_unevictable = &zone->lru[LRU_UNEVICTABLE].list;
3227        unsigned long scan;
3228        unsigned long nr_to_scan = zone_page_state(zone, NR_UNEVICTABLE);
3229
3230        while (nr_to_scan > 0) {
3231                unsigned long batch_size = min(nr_to_scan,
3232                                                SCAN_UNEVICTABLE_BATCH_SIZE);
3233
3234                spin_lock_irq(&zone->lru_lock);
3235                for (scan = 0;  scan < batch_size; scan++) {
3236                        struct page *page = lru_to_page(l_unevictable);
3237
3238                        if (!trylock_page(page))
3239                                continue;
3240
3241                        prefetchw_prev_lru_page(page, l_unevictable, flags);
3242
3243                        if (likely(PageLRU(page) && PageUnevictable(page)))
3244                                check_move_unevictable_page(page, zone);
3245
3246                        unlock_page(page);
3247                }
3248                spin_unlock_irq(&zone->lru_lock);
3249
3250                nr_to_scan -= batch_size;
3251        }
3252}
3253
3254
3255/**
3256 * scan_all_zones_unevictable_pages - scan all unevictable lists for evictable pages
3257 *
3258 * A really big hammer:  scan all zones' unevictable LRU lists to check for
3259 * pages that have become evictable.  Move those back to the zones'
3260 * inactive list where they become candidates for reclaim.
3261 * This occurs when, e.g., we have unswappable pages on the unevictable lists,
3262 * and we add swap to the system.  As such, it runs in the context of a task
3263 * that has possibly/probably made some previously unevictable pages
3264 * evictable.
3265 */
3266static void scan_all_zones_unevictable_pages(void)
3267{
3268        struct zone *zone;
3269
3270        for_each_zone(zone) {
3271                scan_zone_unevictable_pages(zone);
3272        }
3273}
3274
3275/*
3276 * scan_unevictable_pages [vm] sysctl handler.  On demand re-scan of
3277 * all nodes' unevictable lists for evictable pages
3278 */
3279unsigned long scan_unevictable_pages;
3280
3281int scan_unevictable_handler(struct ctl_table *table, int write,
3282                           void __user *buffer,
3283                           size_t *length, loff_t *ppos)
3284{
3285        proc_doulongvec_minmax(table, write, buffer, length, ppos);
3286
3287        if (write && *(unsigned long *)table->data)
3288                scan_all_zones_unevictable_pages();
3289
3290        scan_unevictable_pages = 0;
3291        return 0;
3292}
3293
3294#ifdef CONFIG_NUMA
3295/*
3296 * per node 'scan_unevictable_pages' attribute.  On demand re-scan of
3297 * a specified node's per zone unevictable lists for evictable pages.
3298 */
3299
3300static ssize_t read_scan_unevictable_node(struct sys_device *dev,
3301                                          struct sysdev_attribute *attr,
3302                                          char *buf)
3303{
3304        return sprintf(buf, "0\n");     /* always zero; should fit... */
3305}
3306
3307static ssize_t write_scan_unevictable_node(struct sys_device *dev,
3308                                           struct sysdev_attribute *attr,
3309                                        const char *buf, size_t count)
3310{
3311        struct zone *node_zones = NODE_DATA(dev->id)->node_zones;
3312        struct zone *zone;
3313        unsigned long res;
3314        unsigned long req = strict_strtoul(buf, 10, &res);
3315
3316        if (!req)
3317                return 1;       /* zero is no-op */
3318
3319        for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
3320                if (!populated_zone(zone))
3321                        continue;
3322                scan_zone_unevictable_pages(zone);
3323        }
3324        return 1;
3325}
3326
3327
3328static SYSDEV_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
3329                        read_scan_unevictable_node,
3330                        write_scan_unevictable_node);
3331
3332int scan_unevictable_register_node(struct node *node)
3333{
3334        return sysdev_create_file(&node->sysdev, &attr_scan_unevictable_pages);
3335}
3336
3337void scan_unevictable_unregister_node(struct node *node)
3338{
3339        sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages);
3340}
3341#endif
3342