linux/mm/vmscan.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  linux/mm/vmscan.c
   4 *
   5 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   6 *
   7 *  Swap reorganised 29.12.95, Stephen Tweedie.
   8 *  kswapd added: 7.1.96  sct
   9 *  Removed kswapd_ctl limits, and swap out as many pages as needed
  10 *  to bring the system back to freepages.high: 2.4.97, Rik van Riel.
  11 *  Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
  12 *  Multiqueue VM started 5.8.00, Rik van Riel.
  13 */
  14
  15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  16
  17#include <linux/mm.h>
  18#include <linux/sched/mm.h>
  19#include <linux/module.h>
  20#include <linux/gfp.h>
  21#include <linux/kernel_stat.h>
  22#include <linux/swap.h>
  23#include <linux/pagemap.h>
  24#include <linux/init.h>
  25#include <linux/highmem.h>
  26#include <linux/vmpressure.h>
  27#include <linux/vmstat.h>
  28#include <linux/file.h>
  29#include <linux/writeback.h>
  30#include <linux/blkdev.h>
  31#include <linux/buffer_head.h>  /* for try_to_release_page(),
  32                                        buffer_heads_over_limit */
  33#include <linux/mm_inline.h>
  34#include <linux/backing-dev.h>
  35#include <linux/rmap.h>
  36#include <linux/topology.h>
  37#include <linux/cpu.h>
  38#include <linux/cpuset.h>
  39#include <linux/compaction.h>
  40#include <linux/notifier.h>
  41#include <linux/rwsem.h>
  42#include <linux/delay.h>
  43#include <linux/kthread.h>
  44#include <linux/freezer.h>
  45#include <linux/memcontrol.h>
  46#include <linux/delayacct.h>
  47#include <linux/sysctl.h>
  48#include <linux/oom.h>
  49#include <linux/pagevec.h>
  50#include <linux/prefetch.h>
  51#include <linux/printk.h>
  52#include <linux/dax.h>
  53#include <linux/psi.h>
  54
  55#include <asm/tlbflush.h>
  56#include <asm/div64.h>
  57
  58#include <linux/swapops.h>
  59#include <linux/balloon_compaction.h>
  60
  61#include "internal.h"
  62
  63#define CREATE_TRACE_POINTS
  64#include <trace/events/vmscan.h>
  65
  66struct scan_control {
  67        /* How many pages shrink_list() should reclaim */
  68        unsigned long nr_to_reclaim;
  69
  70        /*
  71         * Nodemask of nodes allowed by the caller. If NULL, all nodes
  72         * are scanned.
  73         */
  74        nodemask_t      *nodemask;
  75
  76        /*
  77         * The memory cgroup that hit its limit and as a result is the
  78         * primary target of this reclaim invocation.
  79         */
  80        struct mem_cgroup *target_mem_cgroup;
  81
  82        /* Writepage batching in laptop mode; RECLAIM_WRITE */
  83        unsigned int may_writepage:1;
  84
  85        /* Can mapped pages be reclaimed? */
  86        unsigned int may_unmap:1;
  87
  88        /* Can pages be swapped as part of reclaim? */
  89        unsigned int may_swap:1;
  90
  91        /* e.g. boosted watermark reclaim leaves slabs alone */
  92        unsigned int may_shrinkslab:1;
  93
  94        /*
  95         * Cgroups are not reclaimed below their configured memory.low,
  96         * unless we threaten to OOM. If any cgroups are skipped due to
  97         * memory.low and nothing was reclaimed, go back for memory.low.
  98         */
  99        unsigned int memcg_low_reclaim:1;
 100        unsigned int memcg_low_skipped:1;
 101
 102        unsigned int hibernation_mode:1;
 103
 104        /* One of the zones is ready for compaction */
 105        unsigned int compaction_ready:1;
 106
 107        /* Allocation order */
 108        s8 order;
 109
 110        /* Scan (total_size >> priority) pages at once */
 111        s8 priority;
 112
 113        /* The highest zone to isolate pages for reclaim from */
 114        s8 reclaim_idx;
 115
 116        /* This context's GFP mask */
 117        gfp_t gfp_mask;
 118
 119        /* Incremented by the number of inactive pages that were scanned */
 120        unsigned long nr_scanned;
 121
 122        /* Number of pages freed so far during a call to shrink_zones() */
 123        unsigned long nr_reclaimed;
 124
 125        struct {
 126                unsigned int dirty;
 127                unsigned int unqueued_dirty;
 128                unsigned int congested;
 129                unsigned int writeback;
 130                unsigned int immediate;
 131                unsigned int file_taken;
 132                unsigned int taken;
 133        } nr;
 134};
 135
 136#ifdef ARCH_HAS_PREFETCH
 137#define prefetch_prev_lru_page(_page, _base, _field)                    \
 138        do {                                                            \
 139                if ((_page)->lru.prev != _base) {                       \
 140                        struct page *prev;                              \
 141                                                                        \
 142                        prev = lru_to_page(&(_page->lru));              \
 143                        prefetch(&prev->_field);                        \
 144                }                                                       \
 145        } while (0)
 146#else
 147#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
 148#endif
 149
 150#ifdef ARCH_HAS_PREFETCHW
 151#define prefetchw_prev_lru_page(_page, _base, _field)                   \
 152        do {                                                            \
 153                if ((_page)->lru.prev != _base) {                       \
 154                        struct page *prev;                              \
 155                                                                        \
 156                        prev = lru_to_page(&(_page->lru));              \
 157                        prefetchw(&prev->_field);                       \
 158                }                                                       \
 159        } while (0)
 160#else
 161#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
 162#endif
 163
 164/*
 165 * From 0 .. 100.  Higher means more swappy.
 166 */
 167int vm_swappiness = 60;
 168/*
 169 * The total number of pages which are beyond the high watermark within all
 170 * zones.
 171 */
 172unsigned long vm_total_pages;
 173
 174static LIST_HEAD(shrinker_list);
 175static DECLARE_RWSEM(shrinker_rwsem);
 176
 177#ifdef CONFIG_MEMCG_KMEM
 178
 179/*
 180 * We allow subsystems to populate their shrinker-related
 181 * LRU lists before register_shrinker_prepared() is called
 182 * for the shrinker, since we don't want to impose
 183 * restrictions on their internal registration order.
 184 * In this case shrink_slab_memcg() may find corresponding
 185 * bit is set in the shrinkers map.
 186 *
 187 * This value is used by the function to detect registering
 188 * shrinkers and to skip do_shrink_slab() calls for them.
 189 */
 190#define SHRINKER_REGISTERING ((struct shrinker *)~0UL)
 191
 192static DEFINE_IDR(shrinker_idr);
 193static int shrinker_nr_max;
 194
 195static int prealloc_memcg_shrinker(struct shrinker *shrinker)
 196{
 197        int id, ret = -ENOMEM;
 198
 199        down_write(&shrinker_rwsem);
 200        /* This may call shrinker, so it must use down_read_trylock() */
 201        id = idr_alloc(&shrinker_idr, SHRINKER_REGISTERING, 0, 0, GFP_KERNEL);
 202        if (id < 0)
 203                goto unlock;
 204
 205        if (id >= shrinker_nr_max) {
 206                if (memcg_expand_shrinker_maps(id)) {
 207                        idr_remove(&shrinker_idr, id);
 208                        goto unlock;
 209                }
 210
 211                shrinker_nr_max = id + 1;
 212        }
 213        shrinker->id = id;
 214        ret = 0;
 215unlock:
 216        up_write(&shrinker_rwsem);
 217        return ret;
 218}
 219
 220static void unregister_memcg_shrinker(struct shrinker *shrinker)
 221{
 222        int id = shrinker->id;
 223
 224        BUG_ON(id < 0);
 225
 226        down_write(&shrinker_rwsem);
 227        idr_remove(&shrinker_idr, id);
 228        up_write(&shrinker_rwsem);
 229}
 230#else /* CONFIG_MEMCG_KMEM */
 231static int prealloc_memcg_shrinker(struct shrinker *shrinker)
 232{
 233        return 0;
 234}
 235
 236static void unregister_memcg_shrinker(struct shrinker *shrinker)
 237{
 238}
 239#endif /* CONFIG_MEMCG_KMEM */
 240
 241#ifdef CONFIG_MEMCG
 242static bool global_reclaim(struct scan_control *sc)
 243{
 244        return !sc->target_mem_cgroup;
 245}
 246
 247/**
 248 * sane_reclaim - is the usual dirty throttling mechanism operational?
 249 * @sc: scan_control in question
 250 *
 251 * The normal page dirty throttling mechanism in balance_dirty_pages() is
 252 * completely broken with the legacy memcg and direct stalling in
 253 * shrink_page_list() is used for throttling instead, which lacks all the
 254 * niceties such as fairness, adaptive pausing, bandwidth proportional
 255 * allocation and configurability.
 256 *
 257 * This function tests whether the vmscan currently in progress can assume
 258 * that the normal dirty throttling mechanism is operational.
 259 */
 260static bool sane_reclaim(struct scan_control *sc)
 261{
 262        struct mem_cgroup *memcg = sc->target_mem_cgroup;
 263
 264        if (!memcg)
 265                return true;
 266#ifdef CONFIG_CGROUP_WRITEBACK
 267        if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
 268                return true;
 269#endif
 270        return false;
 271}
 272
 273static void set_memcg_congestion(pg_data_t *pgdat,
 274                                struct mem_cgroup *memcg,
 275                                bool congested)
 276{
 277        struct mem_cgroup_per_node *mn;
 278
 279        if (!memcg)
 280                return;
 281
 282        mn = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
 283        WRITE_ONCE(mn->congested, congested);
 284}
 285
 286static bool memcg_congested(pg_data_t *pgdat,
 287                        struct mem_cgroup *memcg)
 288{
 289        struct mem_cgroup_per_node *mn;
 290
 291        mn = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
 292        return READ_ONCE(mn->congested);
 293
 294}
 295#else
 296static bool global_reclaim(struct scan_control *sc)
 297{
 298        return true;
 299}
 300
 301static bool sane_reclaim(struct scan_control *sc)
 302{
 303        return true;
 304}
 305
 306static inline void set_memcg_congestion(struct pglist_data *pgdat,
 307                                struct mem_cgroup *memcg, bool congested)
 308{
 309}
 310
 311static inline bool memcg_congested(struct pglist_data *pgdat,
 312                        struct mem_cgroup *memcg)
 313{
 314        return false;
 315
 316}
 317#endif
 318
 319/*
 320 * This misses isolated pages which are not accounted for to save counters.
 321 * As the data only determines if reclaim or compaction continues, it is
 322 * not expected that isolated pages will be a dominating factor.
 323 */
 324unsigned long zone_reclaimable_pages(struct zone *zone)
 325{
 326        unsigned long nr;
 327
 328        nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) +
 329                zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE);
 330        if (get_nr_swap_pages() > 0)
 331                nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
 332                        zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
 333
 334        return nr;
 335}
 336
 337/**
 338 * lruvec_lru_size -  Returns the number of pages on the given LRU list.
 339 * @lruvec: lru vector
 340 * @lru: lru to use
 341 * @zone_idx: zones to consider (use MAX_NR_ZONES for the whole LRU list)
 342 */
 343unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx)
 344{
 345        unsigned long lru_size;
 346        int zid;
 347
 348        if (!mem_cgroup_disabled())
 349                lru_size = lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
 350        else
 351                lru_size = node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru);
 352
 353        for (zid = zone_idx + 1; zid < MAX_NR_ZONES; zid++) {
 354                struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid];
 355                unsigned long size;
 356
 357                if (!managed_zone(zone))
 358                        continue;
 359
 360                if (!mem_cgroup_disabled())
 361                        size = mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
 362                else
 363                        size = zone_page_state(&lruvec_pgdat(lruvec)->node_zones[zid],
 364                                       NR_ZONE_LRU_BASE + lru);
 365                lru_size -= min(size, lru_size);
 366        }
 367
 368        return lru_size;
 369
 370}
 371
 372/*
 373 * Add a shrinker callback to be called from the vm.
 374 */
 375int prealloc_shrinker(struct shrinker *shrinker)
 376{
 377        unsigned int size = sizeof(*shrinker->nr_deferred);
 378
 379        if (shrinker->flags & SHRINKER_NUMA_AWARE)
 380                size *= nr_node_ids;
 381
 382        shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
 383        if (!shrinker->nr_deferred)
 384                return -ENOMEM;
 385
 386        if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
 387                if (prealloc_memcg_shrinker(shrinker))
 388                        goto free_deferred;
 389        }
 390
 391        return 0;
 392
 393free_deferred:
 394        kfree(shrinker->nr_deferred);
 395        shrinker->nr_deferred = NULL;
 396        return -ENOMEM;
 397}
 398
 399void free_prealloced_shrinker(struct shrinker *shrinker)
 400{
 401        if (!shrinker->nr_deferred)
 402                return;
 403
 404        if (shrinker->flags & SHRINKER_MEMCG_AWARE)
 405                unregister_memcg_shrinker(shrinker);
 406
 407        kfree(shrinker->nr_deferred);
 408        shrinker->nr_deferred = NULL;
 409}
 410
 411void register_shrinker_prepared(struct shrinker *shrinker)
 412{
 413        down_write(&shrinker_rwsem);
 414        list_add_tail(&shrinker->list, &shrinker_list);
 415#ifdef CONFIG_MEMCG_KMEM
 416        if (shrinker->flags & SHRINKER_MEMCG_AWARE)
 417                idr_replace(&shrinker_idr, shrinker, shrinker->id);
 418#endif
 419        up_write(&shrinker_rwsem);
 420}
 421
 422int register_shrinker(struct shrinker *shrinker)
 423{
 424        int err = prealloc_shrinker(shrinker);
 425
 426        if (err)
 427                return err;
 428        register_shrinker_prepared(shrinker);
 429        return 0;
 430}
 431EXPORT_SYMBOL(register_shrinker);
 432
 433/*
 434 * Remove one
 435 */
 436void unregister_shrinker(struct shrinker *shrinker)
 437{
 438        if (!shrinker->nr_deferred)
 439                return;
 440        if (shrinker->flags & SHRINKER_MEMCG_AWARE)
 441                unregister_memcg_shrinker(shrinker);
 442        down_write(&shrinker_rwsem);
 443        list_del(&shrinker->list);
 444        up_write(&shrinker_rwsem);
 445        kfree(shrinker->nr_deferred);
 446        shrinker->nr_deferred = NULL;
 447}
 448EXPORT_SYMBOL(unregister_shrinker);
 449
 450#define SHRINK_BATCH 128
 451
 452static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
 453                                    struct shrinker *shrinker, int priority)
 454{
 455        unsigned long freed = 0;
 456        unsigned long long delta;
 457        long total_scan;
 458        long freeable;
 459        long nr;
 460        long new_nr;
 461        int nid = shrinkctl->nid;
 462        long batch_size = shrinker->batch ? shrinker->batch
 463                                          : SHRINK_BATCH;
 464        long scanned = 0, next_deferred;
 465
 466        if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
 467                nid = 0;
 468
 469        freeable = shrinker->count_objects(shrinker, shrinkctl);
 470        if (freeable == 0 || freeable == SHRINK_EMPTY)
 471                return freeable;
 472
 473        /*
 474         * copy the current shrinker scan count into a local variable
 475         * and zero it so that other concurrent shrinker invocations
 476         * don't also do this scanning work.
 477         */
 478        nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
 479
 480        total_scan = nr;
 481        if (shrinker->seeks) {
 482                delta = freeable >> priority;
 483                delta *= 4;
 484                do_div(delta, shrinker->seeks);
 485        } else {
 486                /*
 487                 * These objects don't require any IO to create. Trim
 488                 * them aggressively under memory pressure to keep
 489                 * them from causing refetches in the IO caches.
 490                 */
 491                delta = freeable / 2;
 492        }
 493
 494        total_scan += delta;
 495        if (total_scan < 0) {
 496                pr_err("shrink_slab: %pS negative objects to delete nr=%ld\n",
 497                       shrinker->scan_objects, total_scan);
 498                total_scan = freeable;
 499                next_deferred = nr;
 500        } else
 501                next_deferred = total_scan;
 502
 503        /*
 504         * We need to avoid excessive windup on filesystem shrinkers
 505         * due to large numbers of GFP_NOFS allocations causing the
 506         * shrinkers to return -1 all the time. This results in a large
 507         * nr being built up so when a shrink that can do some work
 508         * comes along it empties the entire cache due to nr >>>
 509         * freeable. This is bad for sustaining a working set in
 510         * memory.
 511         *
 512         * Hence only allow the shrinker to scan the entire cache when
 513         * a large delta change is calculated directly.
 514         */
 515        if (delta < freeable / 4)
 516                total_scan = min(total_scan, freeable / 2);
 517
 518        /*
 519         * Avoid risking looping forever due to too large nr value:
 520         * never try to free more than twice the estimate number of
 521         * freeable entries.
 522         */
 523        if (total_scan > freeable * 2)
 524                total_scan = freeable * 2;
 525
 526        trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
 527                                   freeable, delta, total_scan, priority);
 528
 529        /*
 530         * Normally, we should not scan less than batch_size objects in one
 531         * pass to avoid too frequent shrinker calls, but if the slab has less
 532         * than batch_size objects in total and we are really tight on memory,
 533         * we will try to reclaim all available objects, otherwise we can end
 534         * up failing allocations although there are plenty of reclaimable
 535         * objects spread over several slabs with usage less than the
 536         * batch_size.
 537         *
 538         * We detect the "tight on memory" situations by looking at the total
 539         * number of objects we want to scan (total_scan). If it is greater
 540         * than the total number of objects on slab (freeable), we must be
 541         * scanning at high prio and therefore should try to reclaim as much as
 542         * possible.
 543         */
 544        while (total_scan >= batch_size ||
 545               total_scan >= freeable) {
 546                unsigned long ret;
 547                unsigned long nr_to_scan = min(batch_size, total_scan);
 548
 549                shrinkctl->nr_to_scan = nr_to_scan;
 550                shrinkctl->nr_scanned = nr_to_scan;
 551                ret = shrinker->scan_objects(shrinker, shrinkctl);
 552                if (ret == SHRINK_STOP)
 553                        break;
 554                freed += ret;
 555
 556                count_vm_events(SLABS_SCANNED, shrinkctl->nr_scanned);
 557                total_scan -= shrinkctl->nr_scanned;
 558                scanned += shrinkctl->nr_scanned;
 559
 560                cond_resched();
 561        }
 562
 563        if (next_deferred >= scanned)
 564                next_deferred -= scanned;
 565        else
 566                next_deferred = 0;
 567        /*
 568         * move the unused scan count back into the shrinker in a
 569         * manner that handles concurrent updates. If we exhausted the
 570         * scan, there is no need to do an update.
 571         */
 572        if (next_deferred > 0)
 573                new_nr = atomic_long_add_return(next_deferred,
 574                                                &shrinker->nr_deferred[nid]);
 575        else
 576                new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
 577
 578        trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan);
 579        return freed;
 580}
 581
 582#ifdef CONFIG_MEMCG_KMEM
 583static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
 584                        struct mem_cgroup *memcg, int priority)
 585{
 586        struct memcg_shrinker_map *map;
 587        unsigned long ret, freed = 0;
 588        int i;
 589
 590        if (!memcg_kmem_enabled() || !mem_cgroup_online(memcg))
 591                return 0;
 592
 593        if (!down_read_trylock(&shrinker_rwsem))
 594                return 0;
 595
 596        map = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_map,
 597                                        true);
 598        if (unlikely(!map))
 599                goto unlock;
 600
 601        for_each_set_bit(i, map->map, shrinker_nr_max) {
 602                struct shrink_control sc = {
 603                        .gfp_mask = gfp_mask,
 604                        .nid = nid,
 605                        .memcg = memcg,
 606                };
 607                struct shrinker *shrinker;
 608
 609                shrinker = idr_find(&shrinker_idr, i);
 610                if (unlikely(!shrinker || shrinker == SHRINKER_REGISTERING)) {
 611                        if (!shrinker)
 612                                clear_bit(i, map->map);
 613                        continue;
 614                }
 615
 616                ret = do_shrink_slab(&sc, shrinker, priority);
 617                if (ret == SHRINK_EMPTY) {
 618                        clear_bit(i, map->map);
 619                        /*
 620                         * After the shrinker reported that it had no objects to
 621                         * free, but before we cleared the corresponding bit in
 622                         * the memcg shrinker map, a new object might have been
 623                         * added. To make sure, we have the bit set in this
 624                         * case, we invoke the shrinker one more time and reset
 625                         * the bit if it reports that it is not empty anymore.
 626                         * The memory barrier here pairs with the barrier in
 627                         * memcg_set_shrinker_bit():
 628                         *
 629                         * list_lru_add()     shrink_slab_memcg()
 630                         *   list_add_tail()    clear_bit()
 631                         *   <MB>               <MB>
 632                         *   set_bit()          do_shrink_slab()
 633                         */
 634                        smp_mb__after_atomic();
 635                        ret = do_shrink_slab(&sc, shrinker, priority);
 636                        if (ret == SHRINK_EMPTY)
 637                                ret = 0;
 638                        else
 639                                memcg_set_shrinker_bit(memcg, nid, i);
 640                }
 641                freed += ret;
 642
 643                if (rwsem_is_contended(&shrinker_rwsem)) {
 644                        freed = freed ? : 1;
 645                        break;
 646                }
 647        }
 648unlock:
 649        up_read(&shrinker_rwsem);
 650        return freed;
 651}
 652#else /* CONFIG_MEMCG_KMEM */
 653static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
 654                        struct mem_cgroup *memcg, int priority)
 655{
 656        return 0;
 657}
 658#endif /* CONFIG_MEMCG_KMEM */
 659
 660/**
 661 * shrink_slab - shrink slab caches
 662 * @gfp_mask: allocation context
 663 * @nid: node whose slab caches to target
 664 * @memcg: memory cgroup whose slab caches to target
 665 * @priority: the reclaim priority
 666 *
 667 * Call the shrink functions to age shrinkable caches.
 668 *
 669 * @nid is passed along to shrinkers with SHRINKER_NUMA_AWARE set,
 670 * unaware shrinkers will receive a node id of 0 instead.
 671 *
 672 * @memcg specifies the memory cgroup to target. Unaware shrinkers
 673 * are called only if it is the root cgroup.
 674 *
 675 * @priority is sc->priority, we take the number of objects and >> by priority
 676 * in order to get the scan target.
 677 *
 678 * Returns the number of reclaimed slab objects.
 679 */
 680static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
 681                                 struct mem_cgroup *memcg,
 682                                 int priority)
 683{
 684        unsigned long ret, freed = 0;
 685        struct shrinker *shrinker;
 686
 687        if (!mem_cgroup_is_root(memcg))
 688                return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
 689
 690        if (!down_read_trylock(&shrinker_rwsem))
 691                goto out;
 692
 693        list_for_each_entry(shrinker, &shrinker_list, list) {
 694                struct shrink_control sc = {
 695                        .gfp_mask = gfp_mask,
 696                        .nid = nid,
 697                        .memcg = memcg,
 698                };
 699
 700                ret = do_shrink_slab(&sc, shrinker, priority);
 701                if (ret == SHRINK_EMPTY)
 702                        ret = 0;
 703                freed += ret;
 704                /*
 705                 * Bail out if someone want to register a new shrinker to
 706                 * prevent the regsitration from being stalled for long periods
 707                 * by parallel ongoing shrinking.
 708                 */
 709                if (rwsem_is_contended(&shrinker_rwsem)) {
 710                        freed = freed ? : 1;
 711                        break;
 712                }
 713        }
 714
 715        up_read(&shrinker_rwsem);
 716out:
 717        cond_resched();
 718        return freed;
 719}
 720
 721void drop_slab_node(int nid)
 722{
 723        unsigned long freed;
 724
 725        do {
 726                struct mem_cgroup *memcg = NULL;
 727
 728                freed = 0;
 729                memcg = mem_cgroup_iter(NULL, NULL, NULL);
 730                do {
 731                        freed += shrink_slab(GFP_KERNEL, nid, memcg, 0);
 732                } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
 733        } while (freed > 10);
 734}
 735
 736void drop_slab(void)
 737{
 738        int nid;
 739
 740        for_each_online_node(nid)
 741                drop_slab_node(nid);
 742}
 743
 744static inline int is_page_cache_freeable(struct page *page)
 745{
 746        /*
 747         * A freeable page cache page is referenced only by the caller
 748         * that isolated the page, the page cache and optional buffer
 749         * heads at page->private.
 750         */
 751        int page_cache_pins = PageTransHuge(page) && PageSwapCache(page) ?
 752                HPAGE_PMD_NR : 1;
 753        return page_count(page) - page_has_private(page) == 1 + page_cache_pins;
 754}
 755
 756static int may_write_to_inode(struct inode *inode, struct scan_control *sc)
 757{
 758        if (current->flags & PF_SWAPWRITE)
 759                return 1;
 760        if (!inode_write_congested(inode))
 761                return 1;
 762        if (inode_to_bdi(inode) == current->backing_dev_info)
 763                return 1;
 764        return 0;
 765}
 766
 767/*
 768 * We detected a synchronous write error writing a page out.  Probably
 769 * -ENOSPC.  We need to propagate that into the address_space for a subsequent
 770 * fsync(), msync() or close().
 771 *
 772 * The tricky part is that after writepage we cannot touch the mapping: nothing
 773 * prevents it from being freed up.  But we have a ref on the page and once
 774 * that page is locked, the mapping is pinned.
 775 *
 776 * We're allowed to run sleeping lock_page() here because we know the caller has
 777 * __GFP_FS.
 778 */
 779static void handle_write_error(struct address_space *mapping,
 780                                struct page *page, int error)
 781{
 782        lock_page(page);
 783        if (page_mapping(page) == mapping)
 784                mapping_set_error(mapping, error);
 785        unlock_page(page);
 786}
 787
 788/* possible outcome of pageout() */
 789typedef enum {
 790        /* failed to write page out, page is locked */
 791        PAGE_KEEP,
 792        /* move page to the active list, page is locked */
 793        PAGE_ACTIVATE,
 794        /* page has been sent to the disk successfully, page is unlocked */
 795        PAGE_SUCCESS,
 796        /* page is clean and locked */
 797        PAGE_CLEAN,
 798} pageout_t;
 799
 800/*
 801 * pageout is called by shrink_page_list() for each dirty page.
 802 * Calls ->writepage().
 803 */
 804static pageout_t pageout(struct page *page, struct address_space *mapping,
 805                         struct scan_control *sc)
 806{
 807        /*
 808         * If the page is dirty, only perform writeback if that write
 809         * will be non-blocking.  To prevent this allocation from being
 810         * stalled by pagecache activity.  But note that there may be
 811         * stalls if we need to run get_block().  We could test
 812         * PagePrivate for that.
 813         *
 814         * If this process is currently in __generic_file_write_iter() against
 815         * this page's queue, we can perform writeback even if that
 816         * will block.
 817         *
 818         * If the page is swapcache, write it back even if that would
 819         * block, for some throttling. This happens by accident, because
 820         * swap_backing_dev_info is bust: it doesn't reflect the
 821         * congestion state of the swapdevs.  Easy to fix, if needed.
 822         */
 823        if (!is_page_cache_freeable(page))
 824                return PAGE_KEEP;
 825        if (!mapping) {
 826                /*
 827                 * Some data journaling orphaned pages can have
 828                 * page->mapping == NULL while being dirty with clean buffers.
 829                 */
 830                if (page_has_private(page)) {
 831                        if (try_to_free_buffers(page)) {
 832                                ClearPageDirty(page);
 833                                pr_info("%s: orphaned page\n", __func__);
 834                                return PAGE_CLEAN;
 835                        }
 836                }
 837                return PAGE_KEEP;
 838        }
 839        if (mapping->a_ops->writepage == NULL)
 840                return PAGE_ACTIVATE;
 841        if (!may_write_to_inode(mapping->host, sc))
 842                return PAGE_KEEP;
 843
 844        if (clear_page_dirty_for_io(page)) {
 845                int res;
 846                struct writeback_control wbc = {
 847                        .sync_mode = WB_SYNC_NONE,
 848                        .nr_to_write = SWAP_CLUSTER_MAX,
 849                        .range_start = 0,
 850                        .range_end = LLONG_MAX,
 851                        .for_reclaim = 1,
 852                };
 853
 854                SetPageReclaim(page);
 855                res = mapping->a_ops->writepage(page, &wbc);
 856                if (res < 0)
 857                        handle_write_error(mapping, page, res);
 858                if (res == AOP_WRITEPAGE_ACTIVATE) {
 859                        ClearPageReclaim(page);
 860                        return PAGE_ACTIVATE;
 861                }
 862
 863                if (!PageWriteback(page)) {
 864                        /* synchronous write or broken a_ops? */
 865                        ClearPageReclaim(page);
 866                }
 867                trace_mm_vmscan_writepage(page);
 868                inc_node_page_state(page, NR_VMSCAN_WRITE);
 869                return PAGE_SUCCESS;
 870        }
 871
 872        return PAGE_CLEAN;
 873}
 874
 875/*
 876 * Same as remove_mapping, but if the page is removed from the mapping, it
 877 * gets returned with a refcount of 0.
 878 */
 879static int __remove_mapping(struct address_space *mapping, struct page *page,
 880                            bool reclaimed)
 881{
 882        unsigned long flags;
 883        int refcount;
 884
 885        BUG_ON(!PageLocked(page));
 886        BUG_ON(mapping != page_mapping(page));
 887
 888        xa_lock_irqsave(&mapping->i_pages, flags);
 889        /*
 890         * The non racy check for a busy page.
 891         *
 892         * Must be careful with the order of the tests. When someone has
 893         * a ref to the page, it may be possible that they dirty it then
 894         * drop the reference. So if PageDirty is tested before page_count
 895         * here, then the following race may occur:
 896         *
 897         * get_user_pages(&page);
 898         * [user mapping goes away]
 899         * write_to(page);
 900         *                              !PageDirty(page)    [good]
 901         * SetPageDirty(page);
 902         * put_page(page);
 903         *                              !page_count(page)   [good, discard it]
 904         *
 905         * [oops, our write_to data is lost]
 906         *
 907         * Reversing the order of the tests ensures such a situation cannot
 908         * escape unnoticed. The smp_rmb is needed to ensure the page->flags
 909         * load is not satisfied before that of page->_refcount.
 910         *
 911         * Note that if SetPageDirty is always performed via set_page_dirty,
 912         * and thus under the i_pages lock, then this ordering is not required.
 913         */
 914        if (unlikely(PageTransHuge(page)) && PageSwapCache(page))
 915                refcount = 1 + HPAGE_PMD_NR;
 916        else
 917                refcount = 2;
 918        if (!page_ref_freeze(page, refcount))
 919                goto cannot_free;
 920        /* note: atomic_cmpxchg in page_ref_freeze provides the smp_rmb */
 921        if (unlikely(PageDirty(page))) {
 922                page_ref_unfreeze(page, refcount);
 923                goto cannot_free;
 924        }
 925
 926        if (PageSwapCache(page)) {
 927                swp_entry_t swap = { .val = page_private(page) };
 928                mem_cgroup_swapout(page, swap);
 929                __delete_from_swap_cache(page, swap);
 930                xa_unlock_irqrestore(&mapping->i_pages, flags);
 931                put_swap_page(page, swap);
 932        } else {
 933                void (*freepage)(struct page *);
 934                void *shadow = NULL;
 935
 936                freepage = mapping->a_ops->freepage;
 937                /*
 938                 * Remember a shadow entry for reclaimed file cache in
 939                 * order to detect refaults, thus thrashing, later on.
 940                 *
 941                 * But don't store shadows in an address space that is
 942                 * already exiting.  This is not just an optizimation,
 943                 * inode reclaim needs to empty out the radix tree or
 944                 * the nodes are lost.  Don't plant shadows behind its
 945                 * back.
 946                 *
 947                 * We also don't store shadows for DAX mappings because the
 948                 * only page cache pages found in these are zero pages
 949                 * covering holes, and because we don't want to mix DAX
 950                 * exceptional entries and shadow exceptional entries in the
 951                 * same address_space.
 952                 */
 953                if (reclaimed && page_is_file_cache(page) &&
 954                    !mapping_exiting(mapping) && !dax_mapping(mapping))
 955                        shadow = workingset_eviction(page);
 956                __delete_from_page_cache(page, shadow);
 957                xa_unlock_irqrestore(&mapping->i_pages, flags);
 958
 959                if (freepage != NULL)
 960                        freepage(page);
 961        }
 962
 963        return 1;
 964
 965cannot_free:
 966        xa_unlock_irqrestore(&mapping->i_pages, flags);
 967        return 0;
 968}
 969
 970/*
 971 * Attempt to detach a locked page from its ->mapping.  If it is dirty or if
 972 * someone else has a ref on the page, abort and return 0.  If it was
 973 * successfully detached, return 1.  Assumes the caller has a single ref on
 974 * this page.
 975 */
 976int remove_mapping(struct address_space *mapping, struct page *page)
 977{
 978        if (__remove_mapping(mapping, page, false)) {
 979                /*
 980                 * Unfreezing the refcount with 1 rather than 2 effectively
 981                 * drops the pagecache ref for us without requiring another
 982                 * atomic operation.
 983                 */
 984                page_ref_unfreeze(page, 1);
 985                return 1;
 986        }
 987        return 0;
 988}
 989
 990/**
 991 * putback_lru_page - put previously isolated page onto appropriate LRU list
 992 * @page: page to be put back to appropriate lru list
 993 *
 994 * Add previously isolated @page to appropriate LRU list.
 995 * Page may still be unevictable for other reasons.
 996 *
 997 * lru_lock must not be held, interrupts must be enabled.
 998 */
 999void putback_lru_page(struct page *page)
1000{
1001        lru_cache_add(page);
1002        put_page(page);         /* drop ref from isolate */
1003}
1004
1005enum page_references {
1006        PAGEREF_RECLAIM,
1007        PAGEREF_RECLAIM_CLEAN,
1008        PAGEREF_KEEP,
1009        PAGEREF_ACTIVATE,
1010};
1011
1012static enum page_references page_check_references(struct page *page,
1013                                                  struct scan_control *sc)
1014{
1015        int referenced_ptes, referenced_page;
1016        unsigned long vm_flags;
1017
1018        referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
1019                                          &vm_flags);
1020        referenced_page = TestClearPageReferenced(page);
1021
1022        /*
1023         * Mlock lost the isolation race with us.  Let try_to_unmap()
1024         * move the page to the unevictable list.
1025         */
1026        if (vm_flags & VM_LOCKED)
1027                return PAGEREF_RECLAIM;
1028
1029        if (referenced_ptes) {
1030                if (PageSwapBacked(page))
1031                        return PAGEREF_ACTIVATE;
1032                /*
1033                 * All mapped pages start out with page table
1034                 * references from the instantiating fault, so we need
1035                 * to look twice if a mapped file page is used more
1036                 * than once.
1037                 *
1038                 * Mark it and spare it for another trip around the
1039                 * inactive list.  Another page table reference will
1040                 * lead to its activation.
1041                 *
1042                 * Note: the mark is set for activated pages as well
1043                 * so that recently deactivated but used pages are
1044                 * quickly recovered.
1045                 */
1046                SetPageReferenced(page);
1047
1048                if (referenced_page || referenced_ptes > 1)
1049                        return PAGEREF_ACTIVATE;
1050
1051                /*
1052                 * Activate file-backed executable pages after first usage.
1053                 */
1054                if (vm_flags & VM_EXEC)
1055                        return PAGEREF_ACTIVATE;
1056
1057                return PAGEREF_KEEP;
1058        }
1059
1060        /* Reclaim if clean, defer dirty pages to writeback */
1061        if (referenced_page && !PageSwapBacked(page))
1062                return PAGEREF_RECLAIM_CLEAN;
1063
1064        return PAGEREF_RECLAIM;
1065}
1066
1067/* Check if a page is dirty or under writeback */
1068static void page_check_dirty_writeback(struct page *page,
1069                                       bool *dirty, bool *writeback)
1070{
1071        struct address_space *mapping;
1072
1073        /*
1074         * Anonymous pages are not handled by flushers and must be written
1075         * from reclaim context. Do not stall reclaim based on them
1076         */
1077        if (!page_is_file_cache(page) ||
1078            (PageAnon(page) && !PageSwapBacked(page))) {
1079                *dirty = false;
1080                *writeback = false;
1081                return;
1082        }
1083
1084        /* By default assume that the page flags are accurate */
1085        *dirty = PageDirty(page);
1086        *writeback = PageWriteback(page);
1087
1088        /* Verify dirty/writeback state if the filesystem supports it */
1089        if (!page_has_private(page))
1090                return;
1091
1092        mapping = page_mapping(page);
1093        if (mapping && mapping->a_ops->is_dirty_writeback)
1094                mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
1095}
1096
1097/*
1098 * shrink_page_list() returns the number of reclaimed pages
1099 */
1100static unsigned long shrink_page_list(struct list_head *page_list,
1101                                      struct pglist_data *pgdat,
1102                                      struct scan_control *sc,
1103                                      enum ttu_flags ttu_flags,
1104                                      struct reclaim_stat *stat,
1105                                      bool force_reclaim)
1106{
1107        LIST_HEAD(ret_pages);
1108        LIST_HEAD(free_pages);
1109        unsigned nr_reclaimed = 0;
1110        unsigned pgactivate = 0;
1111
1112        memset(stat, 0, sizeof(*stat));
1113        cond_resched();
1114
1115        while (!list_empty(page_list)) {
1116                struct address_space *mapping;
1117                struct page *page;
1118                int may_enter_fs;
1119                enum page_references references = PAGEREF_RECLAIM_CLEAN;
1120                bool dirty, writeback;
1121
1122                cond_resched();
1123
1124                page = lru_to_page(page_list);
1125                list_del(&page->lru);
1126
1127                if (!trylock_page(page))
1128                        goto keep;
1129
1130                VM_BUG_ON_PAGE(PageActive(page), page);
1131
1132                sc->nr_scanned++;
1133
1134                if (unlikely(!page_evictable(page)))
1135                        goto activate_locked;
1136
1137                if (!sc->may_unmap && page_mapped(page))
1138                        goto keep_locked;
1139
1140                /* Double the slab pressure for mapped and swapcache pages */
1141                if ((page_mapped(page) || PageSwapCache(page)) &&
1142                    !(PageAnon(page) && !PageSwapBacked(page)))
1143                        sc->nr_scanned++;
1144
1145                may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
1146                        (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
1147
1148                /*
1149                 * The number of dirty pages determines if a node is marked
1150                 * reclaim_congested which affects wait_iff_congested. kswapd
1151                 * will stall and start writing pages if the tail of the LRU
1152                 * is all dirty unqueued pages.
1153                 */
1154                page_check_dirty_writeback(page, &dirty, &writeback);
1155                if (dirty || writeback)
1156                        stat->nr_dirty++;
1157
1158                if (dirty && !writeback)
1159                        stat->nr_unqueued_dirty++;
1160
1161                /*
1162                 * Treat this page as congested if the underlying BDI is or if
1163                 * pages are cycling through the LRU so quickly that the
1164                 * pages marked for immediate reclaim are making it to the
1165                 * end of the LRU a second time.
1166                 */
1167                mapping = page_mapping(page);
1168                if (((dirty || writeback) && mapping &&
1169                     inode_write_congested(mapping->host)) ||
1170                    (writeback && PageReclaim(page)))
1171                        stat->nr_congested++;
1172
1173                /*
1174                 * If a page at the tail of the LRU is under writeback, there
1175                 * are three cases to consider.
1176                 *
1177                 * 1) If reclaim is encountering an excessive number of pages
1178                 *    under writeback and this page is both under writeback and
1179                 *    PageReclaim then it indicates that pages are being queued
1180                 *    for IO but are being recycled through the LRU before the
1181                 *    IO can complete. Waiting on the page itself risks an
1182                 *    indefinite stall if it is impossible to writeback the
1183                 *    page due to IO error or disconnected storage so instead
1184                 *    note that the LRU is being scanned too quickly and the
1185                 *    caller can stall after page list has been processed.
1186                 *
1187                 * 2) Global or new memcg reclaim encounters a page that is
1188                 *    not marked for immediate reclaim, or the caller does not
1189                 *    have __GFP_FS (or __GFP_IO if it's simply going to swap,
1190                 *    not to fs). In this case mark the page for immediate
1191                 *    reclaim and continue scanning.
1192                 *
1193                 *    Require may_enter_fs because we would wait on fs, which
1194                 *    may not have submitted IO yet. And the loop driver might
1195                 *    enter reclaim, and deadlock if it waits on a page for
1196                 *    which it is needed to do the write (loop masks off
1197                 *    __GFP_IO|__GFP_FS for this reason); but more thought
1198                 *    would probably show more reasons.
1199                 *
1200                 * 3) Legacy memcg encounters a page that is already marked
1201                 *    PageReclaim. memcg does not have any dirty pages
1202                 *    throttling so we could easily OOM just because too many
1203                 *    pages are in writeback and there is nothing else to
1204                 *    reclaim. Wait for the writeback to complete.
1205                 *
1206                 * In cases 1) and 2) we activate the pages to get them out of
1207                 * the way while we continue scanning for clean pages on the
1208                 * inactive list and refilling from the active list. The
1209                 * observation here is that waiting for disk writes is more
1210                 * expensive than potentially causing reloads down the line.
1211                 * Since they're marked for immediate reclaim, they won't put
1212                 * memory pressure on the cache working set any longer than it
1213                 * takes to write them to disk.
1214                 */
1215                if (PageWriteback(page)) {
1216                        /* Case 1 above */
1217                        if (current_is_kswapd() &&
1218                            PageReclaim(page) &&
1219                            test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
1220                                stat->nr_immediate++;
1221                                goto activate_locked;
1222
1223                        /* Case 2 above */
1224                        } else if (sane_reclaim(sc) ||
1225                            !PageReclaim(page) || !may_enter_fs) {
1226                                /*
1227                                 * This is slightly racy - end_page_writeback()
1228                                 * might have just cleared PageReclaim, then
1229                                 * setting PageReclaim here end up interpreted
1230                                 * as PageReadahead - but that does not matter
1231                                 * enough to care.  What we do want is for this
1232                                 * page to have PageReclaim set next time memcg
1233                                 * reclaim reaches the tests above, so it will
1234                                 * then wait_on_page_writeback() to avoid OOM;
1235                                 * and it's also appropriate in global reclaim.
1236                                 */
1237                                SetPageReclaim(page);
1238                                stat->nr_writeback++;
1239                                goto activate_locked;
1240
1241                        /* Case 3 above */
1242                        } else {
1243                                unlock_page(page);
1244                                wait_on_page_writeback(page);
1245                                /* then go back and try same page again */
1246                                list_add_tail(&page->lru, page_list);
1247                                continue;
1248                        }
1249                }
1250
1251                if (!force_reclaim)
1252                        references = page_check_references(page, sc);
1253
1254                switch (references) {
1255                case PAGEREF_ACTIVATE:
1256                        goto activate_locked;
1257                case PAGEREF_KEEP:
1258                        stat->nr_ref_keep++;
1259                        goto keep_locked;
1260                case PAGEREF_RECLAIM:
1261                case PAGEREF_RECLAIM_CLEAN:
1262                        ; /* try to reclaim the page below */
1263                }
1264
1265                /*
1266                 * Anonymous process memory has backing store?
1267                 * Try to allocate it some swap space here.
1268                 * Lazyfree page could be freed directly
1269                 */
1270                if (PageAnon(page) && PageSwapBacked(page)) {
1271                        if (!PageSwapCache(page)) {
1272                                if (!(sc->gfp_mask & __GFP_IO))
1273                                        goto keep_locked;
1274                                if (PageTransHuge(page)) {
1275                                        /* cannot split THP, skip it */
1276                                        if (!can_split_huge_page(page, NULL))
1277                                                goto activate_locked;
1278                                        /*
1279                                         * Split pages without a PMD map right
1280                                         * away. Chances are some or all of the
1281                                         * tail pages can be freed without IO.
1282                                         */
1283                                        if (!compound_mapcount(page) &&
1284                                            split_huge_page_to_list(page,
1285                                                                    page_list))
1286                                                goto activate_locked;
1287                                }
1288                                if (!add_to_swap(page)) {
1289                                        if (!PageTransHuge(page))
1290                                                goto activate_locked;
1291                                        /* Fallback to swap normal pages */
1292                                        if (split_huge_page_to_list(page,
1293                                                                    page_list))
1294                                                goto activate_locked;
1295#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1296                                        count_vm_event(THP_SWPOUT_FALLBACK);
1297#endif
1298                                        if (!add_to_swap(page))
1299                                                goto activate_locked;
1300                                }
1301
1302                                may_enter_fs = 1;
1303
1304                                /* Adding to swap updated mapping */
1305                                mapping = page_mapping(page);
1306                        }
1307                } else if (unlikely(PageTransHuge(page))) {
1308                        /* Split file THP */
1309                        if (split_huge_page_to_list(page, page_list))
1310                                goto keep_locked;
1311                }
1312
1313                /*
1314                 * The page is mapped into the page tables of one or more
1315                 * processes. Try to unmap it here.
1316                 */
1317                if (page_mapped(page)) {
1318                        enum ttu_flags flags = ttu_flags | TTU_BATCH_FLUSH;
1319
1320                        if (unlikely(PageTransHuge(page)))
1321                                flags |= TTU_SPLIT_HUGE_PMD;
1322                        if (!try_to_unmap(page, flags)) {
1323                                stat->nr_unmap_fail++;
1324                                goto activate_locked;
1325                        }
1326                }
1327
1328                if (PageDirty(page)) {
1329                        /*
1330                         * Only kswapd can writeback filesystem pages
1331                         * to avoid risk of stack overflow. But avoid
1332                         * injecting inefficient single-page IO into
1333                         * flusher writeback as much as possible: only
1334                         * write pages when we've encountered many
1335                         * dirty pages, and when we've already scanned
1336                         * the rest of the LRU for clean pages and see
1337                         * the same dirty pages again (PageReclaim).
1338                         */
1339                        if (page_is_file_cache(page) &&
1340                            (!current_is_kswapd() || !PageReclaim(page) ||
1341                             !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
1342                                /*
1343                                 * Immediately reclaim when written back.
1344                                 * Similar in principal to deactivate_page()
1345                                 * except we already have the page isolated
1346                                 * and know it's dirty
1347                                 */
1348                                inc_node_page_state(page, NR_VMSCAN_IMMEDIATE);
1349                                SetPageReclaim(page);
1350
1351                                goto activate_locked;
1352                        }
1353
1354                        if (references == PAGEREF_RECLAIM_CLEAN)
1355                                goto keep_locked;
1356                        if (!may_enter_fs)
1357                                goto keep_locked;
1358                        if (!sc->may_writepage)
1359                                goto keep_locked;
1360
1361                        /*
1362                         * Page is dirty. Flush the TLB if a writable entry
1363                         * potentially exists to avoid CPU writes after IO
1364                         * starts and then write it out here.
1365                         */
1366                        try_to_unmap_flush_dirty();
1367                        switch (pageout(page, mapping, sc)) {
1368                        case PAGE_KEEP:
1369                                goto keep_locked;
1370                        case PAGE_ACTIVATE:
1371                                goto activate_locked;
1372                        case PAGE_SUCCESS:
1373                                if (PageWriteback(page))
1374                                        goto keep;
1375                                if (PageDirty(page))
1376                                        goto keep;
1377
1378                                /*
1379                                 * A synchronous write - probably a ramdisk.  Go
1380                                 * ahead and try to reclaim the page.
1381                                 */
1382                                if (!trylock_page(page))
1383                                        goto keep;
1384                                if (PageDirty(page) || PageWriteback(page))
1385                                        goto keep_locked;
1386                                mapping = page_mapping(page);
1387                        case PAGE_CLEAN:
1388                                ; /* try to free the page below */
1389                        }
1390                }
1391
1392                /*
1393                 * If the page has buffers, try to free the buffer mappings
1394                 * associated with this page. If we succeed we try to free
1395                 * the page as well.
1396                 *
1397                 * We do this even if the page is PageDirty().
1398                 * try_to_release_page() does not perform I/O, but it is
1399                 * possible for a page to have PageDirty set, but it is actually
1400                 * clean (all its buffers are clean).  This happens if the
1401                 * buffers were written out directly, with submit_bh(). ext3
1402                 * will do this, as well as the blockdev mapping.
1403                 * try_to_release_page() will discover that cleanness and will
1404                 * drop the buffers and mark the page clean - it can be freed.
1405                 *
1406                 * Rarely, pages can have buffers and no ->mapping.  These are
1407                 * the pages which were not successfully invalidated in
1408                 * truncate_complete_page().  We try to drop those buffers here
1409                 * and if that worked, and the page is no longer mapped into
1410                 * process address space (page_count == 1) it can be freed.
1411                 * Otherwise, leave the page on the LRU so it is swappable.
1412                 */
1413                if (page_has_private(page)) {
1414                        if (!try_to_release_page(page, sc->gfp_mask))
1415                                goto activate_locked;
1416                        if (!mapping && page_count(page) == 1) {
1417                                unlock_page(page);
1418                                if (put_page_testzero(page))
1419                                        goto free_it;
1420                                else {
1421                                        /*
1422                                         * rare race with speculative reference.
1423                                         * the speculative reference will free
1424                                         * this page shortly, so we may
1425                                         * increment nr_reclaimed here (and
1426                                         * leave it off the LRU).
1427                                         */
1428                                        nr_reclaimed++;
1429                                        continue;
1430                                }
1431                        }
1432                }
1433
1434                if (PageAnon(page) && !PageSwapBacked(page)) {
1435                        /* follow __remove_mapping for reference */
1436                        if (!page_ref_freeze(page, 1))
1437                                goto keep_locked;
1438                        if (PageDirty(page)) {
1439                                page_ref_unfreeze(page, 1);
1440                                goto keep_locked;
1441                        }
1442
1443                        count_vm_event(PGLAZYFREED);
1444                        count_memcg_page_event(page, PGLAZYFREED);
1445                } else if (!mapping || !__remove_mapping(mapping, page, true))
1446                        goto keep_locked;
1447
1448                unlock_page(page);
1449free_it:
1450                nr_reclaimed++;
1451
1452                /*
1453                 * Is there need to periodically free_page_list? It would
1454                 * appear not as the counts should be low
1455                 */
1456                if (unlikely(PageTransHuge(page))) {
1457                        mem_cgroup_uncharge(page);
1458                        (*get_compound_page_dtor(page))(page);
1459                } else
1460                        list_add(&page->lru, &free_pages);
1461                continue;
1462
1463activate_locked:
1464                /* Not a candidate for swapping, so reclaim swap space. */
1465                if (PageSwapCache(page) && (mem_cgroup_swap_full(page) ||
1466                                                PageMlocked(page)))
1467                        try_to_free_swap(page);
1468                VM_BUG_ON_PAGE(PageActive(page), page);
1469                if (!PageMlocked(page)) {
1470                        int type = page_is_file_cache(page);
1471                        SetPageActive(page);
1472                        pgactivate++;
1473                        stat->nr_activate[type] += hpage_nr_pages(page);
1474                        count_memcg_page_event(page, PGACTIVATE);
1475                }
1476keep_locked:
1477                unlock_page(page);
1478keep:
1479                list_add(&page->lru, &ret_pages);
1480                VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
1481        }
1482
1483        mem_cgroup_uncharge_list(&free_pages);
1484        try_to_unmap_flush();
1485        free_unref_page_list(&free_pages);
1486
1487        list_splice(&ret_pages, page_list);
1488        count_vm_events(PGACTIVATE, pgactivate);
1489
1490        return nr_reclaimed;
1491}
1492
1493unsigned long reclaim_clean_pages_from_list(struct zone *zone,
1494                                            struct list_head *page_list)
1495{
1496        struct scan_control sc = {
1497                .gfp_mask = GFP_KERNEL,
1498                .priority = DEF_PRIORITY,
1499                .may_unmap = 1,
1500        };
1501        struct reclaim_stat dummy_stat;
1502        unsigned long ret;
1503        struct page *page, *next;
1504        LIST_HEAD(clean_pages);
1505
1506        list_for_each_entry_safe(page, next, page_list, lru) {
1507                if (page_is_file_cache(page) && !PageDirty(page) &&
1508                    !__PageMovable(page) && !PageUnevictable(page)) {
1509                        ClearPageActive(page);
1510                        list_move(&page->lru, &clean_pages);
1511                }
1512        }
1513
1514        ret = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
1515                        TTU_IGNORE_ACCESS, &dummy_stat, true);
1516        list_splice(&clean_pages, page_list);
1517        mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -ret);
1518        return ret;
1519}
1520
1521/*
1522 * Attempt to remove the specified page from its LRU.  Only take this page
1523 * if it is of the appropriate PageActive status.  Pages which are being
1524 * freed elsewhere are also ignored.
1525 *
1526 * page:        page to consider
1527 * mode:        one of the LRU isolation modes defined above
1528 *
1529 * returns 0 on success, -ve errno on failure.
1530 */
1531int __isolate_lru_page(struct page *page, isolate_mode_t mode)
1532{
1533        int ret = -EINVAL;
1534
1535        /* Only take pages on the LRU. */
1536        if (!PageLRU(page))
1537                return ret;
1538
1539        /* Compaction should not handle unevictable pages but CMA can do so */
1540        if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
1541                return ret;
1542
1543        ret = -EBUSY;
1544
1545        /*
1546         * To minimise LRU disruption, the caller can indicate that it only
1547         * wants to isolate pages it will be able to operate on without
1548         * blocking - clean pages for the most part.
1549         *
1550         * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
1551         * that it is possible to migrate without blocking
1552         */
1553        if (mode & ISOLATE_ASYNC_MIGRATE) {
1554                /* All the caller can do on PageWriteback is block */
1555                if (PageWriteback(page))
1556                        return ret;
1557
1558                if (PageDirty(page)) {
1559                        struct address_space *mapping;
1560                        bool migrate_dirty;
1561
1562                        /*
1563                         * Only pages without mappings or that have a
1564                         * ->migratepage callback are possible to migrate
1565                         * without blocking. However, we can be racing with
1566                         * truncation so it's necessary to lock the page
1567                         * to stabilise the mapping as truncation holds
1568                         * the page lock until after the page is removed
1569                         * from the page cache.
1570                         */
1571                        if (!trylock_page(page))
1572                                return ret;
1573
1574                        mapping = page_mapping(page);
1575                        migrate_dirty = !mapping || mapping->a_ops->migratepage;
1576                        unlock_page(page);
1577                        if (!migrate_dirty)
1578                                return ret;
1579                }
1580        }
1581
1582        if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
1583                return ret;
1584
1585        if (likely(get_page_unless_zero(page))) {
1586                /*
1587                 * Be careful not to clear PageLRU until after we're
1588                 * sure the page is not being freed elsewhere -- the
1589                 * page release code relies on it.
1590                 */
1591                ClearPageLRU(page);
1592                ret = 0;
1593        }
1594
1595        return ret;
1596}
1597
1598
1599/*
1600 * Update LRU sizes after isolating pages. The LRU size updates must
1601 * be complete before mem_cgroup_update_lru_size due to a santity check.
1602 */
1603static __always_inline void update_lru_sizes(struct lruvec *lruvec,
1604                        enum lru_list lru, unsigned long *nr_zone_taken)
1605{
1606        int zid;
1607
1608        for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1609                if (!nr_zone_taken[zid])
1610                        continue;
1611
1612                __update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
1613#ifdef CONFIG_MEMCG
1614                mem_cgroup_update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
1615#endif
1616        }
1617
1618}
1619
1620/**
1621 * pgdat->lru_lock is heavily contended.  Some of the functions that
1622 * shrink the lists perform better by taking out a batch of pages
1623 * and working on them outside the LRU lock.
1624 *
1625 * For pagecache intensive workloads, this function is the hottest
1626 * spot in the kernel (apart from copy_*_user functions).
1627 *
1628 * Appropriate locks must be held before calling this function.
1629 *
1630 * @nr_to_scan: The number of eligible pages to look through on the list.
1631 * @lruvec:     The LRU vector to pull pages from.
1632 * @dst:        The temp list to put pages on to.
1633 * @nr_scanned: The number of pages that were scanned.
1634 * @sc:         The scan_control struct for this reclaim session
1635 * @mode:       One of the LRU isolation modes
1636 * @lru:        LRU list id for isolating
1637 *
1638 * returns how many pages were moved onto *@dst.
1639 */
1640static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1641                struct lruvec *lruvec, struct list_head *dst,
1642                unsigned long *nr_scanned, struct scan_control *sc,
1643                enum lru_list lru)
1644{
1645        struct list_head *src = &lruvec->lists[lru];
1646        unsigned long nr_taken = 0;
1647        unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 };
1648        unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
1649        unsigned long skipped = 0;
1650        unsigned long scan, total_scan, nr_pages;
1651        LIST_HEAD(pages_skipped);
1652        isolate_mode_t mode = (sc->may_unmap ? 0 : ISOLATE_UNMAPPED);
1653
1654        scan = 0;
1655        for (total_scan = 0;
1656             scan < nr_to_scan && nr_taken < nr_to_scan && !list_empty(src);
1657             total_scan++) {
1658                struct page *page;
1659
1660                page = lru_to_page(src);
1661                prefetchw_prev_lru_page(page, src, flags);
1662
1663                VM_BUG_ON_PAGE(!PageLRU(page), page);
1664
1665                if (page_zonenum(page) > sc->reclaim_idx) {
1666                        list_move(&page->lru, &pages_skipped);
1667                        nr_skipped[page_zonenum(page)]++;
1668                        continue;
1669                }
1670
1671                /*
1672                 * Do not count skipped pages because that makes the function
1673                 * return with no isolated pages if the LRU mostly contains
1674                 * ineligible pages.  This causes the VM to not reclaim any
1675                 * pages, triggering a premature OOM.
1676                 */
1677                scan++;
1678                switch (__isolate_lru_page(page, mode)) {
1679                case 0:
1680                        nr_pages = hpage_nr_pages(page);
1681                        nr_taken += nr_pages;
1682                        nr_zone_taken[page_zonenum(page)] += nr_pages;
1683                        list_move(&page->lru, dst);
1684                        break;
1685
1686                case -EBUSY:
1687                        /* else it is being freed elsewhere */
1688                        list_move(&page->lru, src);
1689                        continue;
1690
1691                default:
1692                        BUG();
1693                }
1694        }
1695
1696        /*
1697         * Splice any skipped pages to the start of the LRU list. Note that
1698         * this disrupts the LRU order when reclaiming for lower zones but
1699         * we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX
1700         * scanning would soon rescan the same pages to skip and put the
1701         * system at risk of premature OOM.
1702         */
1703        if (!list_empty(&pages_skipped)) {
1704                int zid;
1705
1706                list_splice(&pages_skipped, src);
1707                for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1708                        if (!nr_skipped[zid])
1709                                continue;
1710
1711                        __count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]);
1712                        skipped += nr_skipped[zid];
1713                }
1714        }
1715        *nr_scanned = total_scan;
1716        trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan,
1717                                    total_scan, skipped, nr_taken, mode, lru);
1718        update_lru_sizes(lruvec, lru, nr_zone_taken);
1719        return nr_taken;
1720}
1721
1722/**
1723 * isolate_lru_page - tries to isolate a page from its LRU list
1724 * @page: page to isolate from its LRU list
1725 *
1726 * Isolates a @page from an LRU list, clears PageLRU and adjusts the
1727 * vmstat statistic corresponding to whatever LRU list the page was on.
1728 *
1729 * Returns 0 if the page was removed from an LRU list.
1730 * Returns -EBUSY if the page was not on an LRU list.
1731 *
1732 * The returned page will have PageLRU() cleared.  If it was found on
1733 * the active list, it will have PageActive set.  If it was found on
1734 * the unevictable list, it will have the PageUnevictable bit set. That flag
1735 * may need to be cleared by the caller before letting the page go.
1736 *
1737 * The vmstat statistic corresponding to the list on which the page was
1738 * found will be decremented.
1739 *
1740 * Restrictions:
1741 *
1742 * (1) Must be called with an elevated refcount on the page. This is a
1743 *     fundamentnal difference from isolate_lru_pages (which is called
1744 *     without a stable reference).
1745 * (2) the lru_lock must not be held.
1746 * (3) interrupts must be enabled.
1747 */
1748int isolate_lru_page(struct page *page)
1749{
1750        int ret = -EBUSY;
1751
1752        VM_BUG_ON_PAGE(!page_count(page), page);
1753        WARN_RATELIMIT(PageTail(page), "trying to isolate tail page");
1754
1755        if (PageLRU(page)) {
1756                pg_data_t *pgdat = page_pgdat(page);
1757                struct lruvec *lruvec;
1758
1759                spin_lock_irq(&pgdat->lru_lock);
1760                lruvec = mem_cgroup_page_lruvec(page, pgdat);
1761                if (PageLRU(page)) {
1762                        int lru = page_lru(page);
1763                        get_page(page);
1764                        ClearPageLRU(page);
1765                        del_page_from_lru_list(page, lruvec, lru);
1766                        ret = 0;
1767                }
1768                spin_unlock_irq(&pgdat->lru_lock);
1769        }
1770        return ret;
1771}
1772
1773/*
1774 * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
1775 * then get resheduled. When there are massive number of tasks doing page
1776 * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
1777 * the LRU list will go small and be scanned faster than necessary, leading to
1778 * unnecessary swapping, thrashing and OOM.
1779 */
1780static int too_many_isolated(struct pglist_data *pgdat, int file,
1781                struct scan_control *sc)
1782{
1783        unsigned long inactive, isolated;
1784
1785        if (current_is_kswapd())
1786                return 0;
1787
1788        if (!sane_reclaim(sc))
1789                return 0;
1790
1791        if (file) {
1792                inactive = node_page_state(pgdat, NR_INACTIVE_FILE);
1793                isolated = node_page_state(pgdat, NR_ISOLATED_FILE);
1794        } else {
1795                inactive = node_page_state(pgdat, NR_INACTIVE_ANON);
1796                isolated = node_page_state(pgdat, NR_ISOLATED_ANON);
1797        }
1798
1799        /*
1800         * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they
1801         * won't get blocked by normal direct-reclaimers, forming a circular
1802         * deadlock.
1803         */
1804        if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
1805                inactive >>= 3;
1806
1807        return isolated > inactive;
1808}
1809
1810/*
1811 * This moves pages from @list to corresponding LRU list.
1812 *
1813 * We move them the other way if the page is referenced by one or more
1814 * processes, from rmap.
1815 *
1816 * If the pages are mostly unmapped, the processing is fast and it is
1817 * appropriate to hold zone_lru_lock across the whole operation.  But if
1818 * the pages are mapped, the processing is slow (page_referenced()) so we
1819 * should drop zone_lru_lock around each page.  It's impossible to balance
1820 * this, so instead we remove the pages from the LRU while processing them.
1821 * It is safe to rely on PG_active against the non-LRU pages in here because
1822 * nobody will play with that bit on a non-LRU page.
1823 *
1824 * The downside is that we have to touch page->_refcount against each page.
1825 * But we had to alter page->flags anyway.
1826 *
1827 * Returns the number of pages moved to the given lruvec.
1828 */
1829
1830static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
1831                                                     struct list_head *list)
1832{
1833        struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1834        int nr_pages, nr_moved = 0;
1835        LIST_HEAD(pages_to_free);
1836        struct page *page;
1837        enum lru_list lru;
1838
1839        while (!list_empty(list)) {
1840                page = lru_to_page(list);
1841                VM_BUG_ON_PAGE(PageLRU(page), page);
1842                if (unlikely(!page_evictable(page))) {
1843                        list_del(&page->lru);
1844                        spin_unlock_irq(&pgdat->lru_lock);
1845                        putback_lru_page(page);
1846                        spin_lock_irq(&pgdat->lru_lock);
1847                        continue;
1848                }
1849                lruvec = mem_cgroup_page_lruvec(page, pgdat);
1850
1851                SetPageLRU(page);
1852                lru = page_lru(page);
1853
1854                nr_pages = hpage_nr_pages(page);
1855                update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
1856                list_move(&page->lru, &lruvec->lists[lru]);
1857
1858                if (put_page_testzero(page)) {
1859                        __ClearPageLRU(page);
1860                        __ClearPageActive(page);
1861                        del_page_from_lru_list(page, lruvec, lru);
1862
1863                        if (unlikely(PageCompound(page))) {
1864                                spin_unlock_irq(&pgdat->lru_lock);
1865                                mem_cgroup_uncharge(page);
1866                                (*get_compound_page_dtor(page))(page);
1867                                spin_lock_irq(&pgdat->lru_lock);
1868                        } else
1869                                list_add(&page->lru, &pages_to_free);
1870                } else {
1871                        nr_moved += nr_pages;
1872                }
1873        }
1874
1875        /*
1876         * To save our caller's stack, now use input list for pages to free.
1877         */
1878        list_splice(&pages_to_free, list);
1879
1880        return nr_moved;
1881}
1882
1883/*
1884 * If a kernel thread (such as nfsd for loop-back mounts) services
1885 * a backing device by writing to the page cache it sets PF_LESS_THROTTLE.
1886 * In that case we should only throttle if the backing device it is
1887 * writing to is congested.  In other cases it is safe to throttle.
1888 */
1889static int current_may_throttle(void)
1890{
1891        return !(current->flags & PF_LESS_THROTTLE) ||
1892                current->backing_dev_info == NULL ||
1893                bdi_write_congested(current->backing_dev_info);
1894}
1895
1896/*
1897 * shrink_inactive_list() is a helper for shrink_node().  It returns the number
1898 * of reclaimed pages
1899 */
1900static noinline_for_stack unsigned long
1901shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1902                     struct scan_control *sc, enum lru_list lru)
1903{
1904        LIST_HEAD(page_list);
1905        unsigned long nr_scanned;
1906        unsigned long nr_reclaimed = 0;
1907        unsigned long nr_taken;
1908        struct reclaim_stat stat;
1909        int file = is_file_lru(lru);
1910        enum vm_event_item item;
1911        struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1912        struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1913        bool stalled = false;
1914
1915        while (unlikely(too_many_isolated(pgdat, file, sc))) {
1916                if (stalled)
1917                        return 0;
1918
1919                /* wait a bit for the reclaimer. */
1920                msleep(100);
1921                stalled = true;
1922
1923                /* We are about to die and free our memory. Return now. */
1924                if (fatal_signal_pending(current))
1925                        return SWAP_CLUSTER_MAX;
1926        }
1927
1928        lru_add_drain();
1929
1930        spin_lock_irq(&pgdat->lru_lock);
1931
1932        nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
1933                                     &nr_scanned, sc, lru);
1934
1935        __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
1936        reclaim_stat->recent_scanned[file] += nr_taken;
1937
1938        item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIRECT;
1939        if (global_reclaim(sc))
1940                __count_vm_events(item, nr_scanned);
1941        __count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned);
1942        spin_unlock_irq(&pgdat->lru_lock);
1943
1944        if (nr_taken == 0)
1945                return 0;
1946
1947        nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, 0,
1948                                &stat, false);
1949
1950        spin_lock_irq(&pgdat->lru_lock);
1951
1952        item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
1953        if (global_reclaim(sc))
1954                __count_vm_events(item, nr_reclaimed);
1955        __count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
1956        reclaim_stat->recent_rotated[0] += stat.nr_activate[0];
1957        reclaim_stat->recent_rotated[1] += stat.nr_activate[1];
1958
1959        move_pages_to_lru(lruvec, &page_list);
1960
1961        __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
1962
1963        spin_unlock_irq(&pgdat->lru_lock);
1964
1965        mem_cgroup_uncharge_list(&page_list);
1966        free_unref_page_list(&page_list);
1967
1968        /*
1969         * If dirty pages are scanned that are not queued for IO, it
1970         * implies that flushers are not doing their job. This can
1971         * happen when memory pressure pushes dirty pages to the end of
1972         * the LRU before the dirty limits are breached and the dirty
1973         * data has expired. It can also happen when the proportion of
1974         * dirty pages grows not through writes but through memory
1975         * pressure reclaiming all the clean cache. And in some cases,
1976         * the flushers simply cannot keep up with the allocation
1977         * rate. Nudge the flusher threads in case they are asleep.
1978         */
1979        if (stat.nr_unqueued_dirty == nr_taken)
1980                wakeup_flusher_threads(WB_REASON_VMSCAN);
1981
1982        sc->nr.dirty += stat.nr_dirty;
1983        sc->nr.congested += stat.nr_congested;
1984        sc->nr.unqueued_dirty += stat.nr_unqueued_dirty;
1985        sc->nr.writeback += stat.nr_writeback;
1986        sc->nr.immediate += stat.nr_immediate;
1987        sc->nr.taken += nr_taken;
1988        if (file)
1989                sc->nr.file_taken += nr_taken;
1990
1991        trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
1992                        nr_scanned, nr_reclaimed, &stat, sc->priority, file);
1993        return nr_reclaimed;
1994}
1995
1996static void shrink_active_list(unsigned long nr_to_scan,
1997                               struct lruvec *lruvec,
1998                               struct scan_control *sc,
1999                               enum lru_list lru)
2000{
2001        unsigned long nr_taken;
2002        unsigned long nr_scanned;
2003        unsigned long vm_flags;
2004        LIST_HEAD(l_hold);      /* The pages which were snipped off */
2005        LIST_HEAD(l_active);
2006        LIST_HEAD(l_inactive);
2007        struct page *page;
2008        struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
2009        unsigned nr_deactivate, nr_activate;
2010        unsigned nr_rotated = 0;
2011        int file = is_file_lru(lru);
2012        struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2013
2014        lru_add_drain();
2015
2016        spin_lock_irq(&pgdat->lru_lock);
2017
2018        nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
2019                                     &nr_scanned, sc, lru);
2020
2021        __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
2022        reclaim_stat->recent_scanned[file] += nr_taken;
2023
2024        __count_vm_events(PGREFILL, nr_scanned);
2025        __count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned);
2026
2027        spin_unlock_irq(&pgdat->lru_lock);
2028
2029        while (!list_empty(&l_hold)) {
2030                cond_resched();
2031                page = lru_to_page(&l_hold);
2032                list_del(&page->lru);
2033
2034                if (unlikely(!page_evictable(page))) {
2035                        putback_lru_page(page);
2036                        continue;
2037                }
2038
2039                if (unlikely(buffer_heads_over_limit)) {
2040                        if (page_has_private(page) && trylock_page(page)) {
2041                                if (page_has_private(page))
2042                                        try_to_release_page(page, 0);
2043                                unlock_page(page);
2044                        }
2045                }
2046
2047                if (page_referenced(page, 0, sc->target_mem_cgroup,
2048                                    &vm_flags)) {
2049                        nr_rotated += hpage_nr_pages(page);
2050                        /*
2051                         * Identify referenced, file-backed active pages and
2052                         * give them one more trip around the active list. So
2053                         * that executable code get better chances to stay in
2054                         * memory under moderate memory pressure.  Anon pages
2055                         * are not likely to be evicted by use-once streaming
2056                         * IO, plus JVM can create lots of anon VM_EXEC pages,
2057                         * so we ignore them here.
2058                         */
2059                        if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
2060                                list_add(&page->lru, &l_active);
2061                                continue;
2062                        }
2063                }
2064
2065                ClearPageActive(page);  /* we are de-activating */
2066                SetPageWorkingset(page);
2067                list_add(&page->lru, &l_inactive);
2068        }
2069
2070        /*
2071         * Move pages back to the lru list.
2072         */
2073        spin_lock_irq(&pgdat->lru_lock);
2074        /*
2075         * Count referenced pages from currently used mappings as rotated,
2076         * even though only some of them are actually re-activated.  This
2077         * helps balance scan pressure between file and anonymous pages in
2078         * get_scan_count.
2079         */
2080        reclaim_stat->recent_rotated[file] += nr_rotated;
2081
2082        nr_activate = move_pages_to_lru(lruvec, &l_active);
2083        nr_deactivate = move_pages_to_lru(lruvec, &l_inactive);
2084        /* Keep all free pages in l_active list */
2085        list_splice(&l_inactive, &l_active);
2086
2087        __count_vm_events(PGDEACTIVATE, nr_deactivate);
2088        __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
2089
2090        __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
2091        spin_unlock_irq(&pgdat->lru_lock);
2092
2093        mem_cgroup_uncharge_list(&l_active);
2094        free_unref_page_list(&l_active);
2095        trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate,
2096                        nr_deactivate, nr_rotated, sc->priority, file);
2097}
2098
2099/*
2100 * The inactive anon list should be small enough that the VM never has
2101 * to do too much work.
2102 *
2103 * The inactive file list should be small enough to leave most memory
2104 * to the established workingset on the scan-resistant active list,
2105 * but large enough to avoid thrashing the aggregate readahead window.
2106 *
2107 * Both inactive lists should also be large enough that each inactive
2108 * page has a chance to be referenced again before it is reclaimed.
2109 *
2110 * If that fails and refaulting is observed, the inactive list grows.
2111 *
2112 * The inactive_ratio is the target ratio of ACTIVE to INACTIVE pages
2113 * on this LRU, maintained by the pageout code. An inactive_ratio
2114 * of 3 means 3:1 or 25% of the pages are kept on the inactive list.
2115 *
2116 * total     target    max
2117 * memory    ratio     inactive
2118 * -------------------------------------
2119 *   10MB       1         5MB
2120 *  100MB       1        50MB
2121 *    1GB       3       250MB
2122 *   10GB      10       0.9GB
2123 *  100GB      31         3GB
2124 *    1TB     101        10GB
2125 *   10TB     320        32GB
2126 */
2127static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
2128                                 struct scan_control *sc, bool actual_reclaim)
2129{
2130        enum lru_list active_lru = file * LRU_FILE + LRU_ACTIVE;
2131        struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2132        enum lru_list inactive_lru = file * LRU_FILE;
2133        unsigned long inactive, active;
2134        unsigned long inactive_ratio;
2135        unsigned long refaults;
2136        unsigned long gb;
2137
2138        /*
2139         * If we don't have swap space, anonymous page deactivation
2140         * is pointless.
2141         */
2142        if (!file && !total_swap_pages)
2143                return false;
2144
2145        inactive = lruvec_lru_size(lruvec, inactive_lru, sc->reclaim_idx);
2146        active = lruvec_lru_size(lruvec, active_lru, sc->reclaim_idx);
2147
2148        /*
2149         * When refaults are being observed, it means a new workingset
2150         * is being established. Disable active list protection to get
2151         * rid of the stale workingset quickly.
2152         */
2153        refaults = lruvec_page_state_local(lruvec, WORKINGSET_ACTIVATE);
2154        if (file && actual_reclaim && lruvec->refaults != refaults) {
2155                inactive_ratio = 0;
2156        } else {
2157                gb = (inactive + active) >> (30 - PAGE_SHIFT);
2158                if (gb)
2159                        inactive_ratio = int_sqrt(10 * gb);
2160                else
2161                        inactive_ratio = 1;
2162        }
2163
2164        if (actual_reclaim)
2165                trace_mm_vmscan_inactive_list_is_low(pgdat->node_id, sc->reclaim_idx,
2166                        lruvec_lru_size(lruvec, inactive_lru, MAX_NR_ZONES), inactive,
2167                        lruvec_lru_size(lruvec, active_lru, MAX_NR_ZONES), active,
2168                        inactive_ratio, file);
2169
2170        return inactive * inactive_ratio < active;
2171}
2172
2173static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
2174                                 struct lruvec *lruvec, struct scan_control *sc)
2175{
2176        if (is_active_lru(lru)) {
2177                if (inactive_list_is_low(lruvec, is_file_lru(lru), sc, true))
2178                        shrink_active_list(nr_to_scan, lruvec, sc, lru);
2179                return 0;
2180        }
2181
2182        return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
2183}
2184
2185enum scan_balance {
2186        SCAN_EQUAL,
2187        SCAN_FRACT,
2188        SCAN_ANON,
2189        SCAN_FILE,
2190};
2191
2192/*
2193 * Determine how aggressively the anon and file LRU lists should be
2194 * scanned.  The relative value of each set of LRU lists is determined
2195 * by looking at the fraction of the pages scanned we did rotate back
2196 * onto the active list instead of evict.
2197 *
2198 * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
2199 * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
2200 */
2201static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
2202                           struct scan_control *sc, unsigned long *nr,
2203                           unsigned long *lru_pages)
2204{
2205        int swappiness = mem_cgroup_swappiness(memcg);
2206        struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
2207        u64 fraction[2];
2208        u64 denominator = 0;    /* gcc */
2209        struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2210        unsigned long anon_prio, file_prio;
2211        enum scan_balance scan_balance;
2212        unsigned long anon, file;
2213        unsigned long ap, fp;
2214        enum lru_list lru;
2215
2216        /* If we have no swap space, do not bother scanning anon pages. */
2217        if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) {
2218                scan_balance = SCAN_FILE;
2219                goto out;
2220        }
2221
2222        /*
2223         * Global reclaim will swap to prevent OOM even with no
2224         * swappiness, but memcg users want to use this knob to
2225         * disable swapping for individual groups completely when
2226         * using the memory controller's swap limit feature would be
2227         * too expensive.
2228         */
2229        if (!global_reclaim(sc) && !swappiness) {
2230                scan_balance = SCAN_FILE;
2231                goto out;
2232        }
2233
2234        /*
2235         * Do not apply any pressure balancing cleverness when the
2236         * system is close to OOM, scan both anon and file equally
2237         * (unless the swappiness setting disagrees with swapping).
2238         */
2239        if (!sc->priority && swappiness) {
2240                scan_balance = SCAN_EQUAL;
2241                goto out;
2242        }
2243
2244        /*
2245         * Prevent the reclaimer from falling into the cache trap: as
2246         * cache pages start out inactive, every cache fault will tip
2247         * the scan balance towards the file LRU.  And as the file LRU
2248         * shrinks, so does the window for rotation from references.
2249         * This means we have a runaway feedback loop where a tiny
2250         * thrashing file LRU becomes infinitely more attractive than
2251         * anon pages.  Try to detect this based on file LRU size.
2252         */
2253        if (global_reclaim(sc)) {
2254                unsigned long pgdatfile;
2255                unsigned long pgdatfree;
2256                int z;
2257                unsigned long total_high_wmark = 0;
2258
2259                pgdatfree = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
2260                pgdatfile = node_page_state(pgdat, NR_ACTIVE_FILE) +
2261                           node_page_state(pgdat, NR_INACTIVE_FILE);
2262
2263                for (z = 0; z < MAX_NR_ZONES; z++) {
2264                        struct zone *zone = &pgdat->node_zones[z];
2265                        if (!managed_zone(zone))
2266                                continue;
2267
2268                        total_high_wmark += high_wmark_pages(zone);
2269                }
2270
2271                if (unlikely(pgdatfile + pgdatfree <= total_high_wmark)) {
2272                        /*
2273                         * Force SCAN_ANON if there are enough inactive
2274                         * anonymous pages on the LRU in eligible zones.
2275                         * Otherwise, the small LRU gets thrashed.
2276                         */
2277                        if (!inactive_list_is_low(lruvec, false, sc, false) &&
2278                            lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, sc->reclaim_idx)
2279                                        >> sc->priority) {
2280                                scan_balance = SCAN_ANON;
2281                                goto out;
2282                        }
2283                }
2284        }
2285
2286        /*
2287         * If there is enough inactive page cache, i.e. if the size of the
2288         * inactive list is greater than that of the active list *and* the
2289         * inactive list actually has some pages to scan on this priority, we
2290         * do not reclaim anything from the anonymous working set right now.
2291         * Without the second condition we could end up never scanning an
2292         * lruvec even if it has plenty of old anonymous pages unless the
2293         * system is under heavy pressure.
2294         */
2295        if (!inactive_list_is_low(lruvec, true, sc, false) &&
2296            lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, sc->reclaim_idx) >> sc->priority) {
2297                scan_balance = SCAN_FILE;
2298                goto out;
2299        }
2300
2301        scan_balance = SCAN_FRACT;
2302
2303        /*
2304         * With swappiness at 100, anonymous and file have the same priority.
2305         * This scanning priority is essentially the inverse of IO cost.
2306         */
2307        anon_prio = swappiness;
2308        file_prio = 200 - anon_prio;
2309
2310        /*
2311         * OK, so we have swap space and a fair amount of page cache
2312         * pages.  We use the recently rotated / recently scanned
2313         * ratios to determine how valuable each cache is.
2314         *
2315         * Because workloads change over time (and to avoid overflow)
2316         * we keep these statistics as a floating average, which ends
2317         * up weighing recent references more than old ones.
2318         *
2319         * anon in [0], file in [1]
2320         */
2321
2322        anon  = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON, MAX_NR_ZONES) +
2323                lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, MAX_NR_ZONES);
2324        file  = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES) +
2325                lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, MAX_NR_ZONES);
2326
2327        spin_lock_irq(&pgdat->lru_lock);
2328        if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
2329                reclaim_stat->recent_scanned[0] /= 2;
2330                reclaim_stat->recent_rotated[0] /= 2;
2331        }
2332
2333        if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
2334                reclaim_stat->recent_scanned[1] /= 2;
2335                reclaim_stat->recent_rotated[1] /= 2;
2336        }
2337
2338        /*
2339         * The amount of pressure on anon vs file pages is inversely
2340         * proportional to the fraction of recently scanned pages on
2341         * each list that were recently referenced and in active use.
2342         */
2343        ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
2344        ap /= reclaim_stat->recent_rotated[0] + 1;
2345
2346        fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
2347        fp /= reclaim_stat->recent_rotated[1] + 1;
2348        spin_unlock_irq(&pgdat->lru_lock);
2349
2350        fraction[0] = ap;
2351        fraction[1] = fp;
2352        denominator = ap + fp + 1;
2353out:
2354        *lru_pages = 0;
2355        for_each_evictable_lru(lru) {
2356                int file = is_file_lru(lru);
2357                unsigned long size;
2358                unsigned long scan;
2359
2360                size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
2361                scan = size >> sc->priority;
2362                /*
2363                 * If the cgroup's already been deleted, make sure to
2364                 * scrape out the remaining cache.
2365                 */
2366                if (!scan && !mem_cgroup_online(memcg))
2367                        scan = min(size, SWAP_CLUSTER_MAX);
2368
2369                switch (scan_balance) {
2370                case SCAN_EQUAL:
2371                        /* Scan lists relative to size */
2372                        break;
2373                case SCAN_FRACT:
2374                        /*
2375                         * Scan types proportional to swappiness and
2376                         * their relative recent reclaim efficiency.
2377                         * Make sure we don't miss the last page
2378                         * because of a round-off error.
2379                         */
2380                        scan = DIV64_U64_ROUND_UP(scan * fraction[file],
2381                                                  denominator);
2382                        break;
2383                case SCAN_FILE:
2384                case SCAN_ANON:
2385                        /* Scan one type exclusively */
2386                        if ((scan_balance == SCAN_FILE) != file) {
2387                                size = 0;
2388                                scan = 0;
2389                        }
2390                        break;
2391                default:
2392                        /* Look ma, no brain */
2393                        BUG();
2394                }
2395
2396                *lru_pages += size;
2397                nr[lru] = scan;
2398        }
2399}
2400
2401/*
2402 * This is a basic per-node page freer.  Used by both kswapd and direct reclaim.
2403 */
2404static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memcg,
2405                              struct scan_control *sc, unsigned long *lru_pages)
2406{
2407        struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg);
2408        unsigned long nr[NR_LRU_LISTS];
2409        unsigned long targets[NR_LRU_LISTS];
2410        unsigned long nr_to_scan;
2411        enum lru_list lru;
2412        unsigned long nr_reclaimed = 0;
2413        unsigned long nr_to_reclaim = sc->nr_to_reclaim;
2414        struct blk_plug plug;
2415        bool scan_adjusted;
2416
2417        get_scan_count(lruvec, memcg, sc, nr, lru_pages);
2418
2419        /* Record the original scan target for proportional adjustments later */
2420        memcpy(targets, nr, sizeof(nr));
2421
2422        /*
2423         * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal
2424         * event that can occur when there is little memory pressure e.g.
2425         * multiple streaming readers/writers. Hence, we do not abort scanning
2426         * when the requested number of pages are reclaimed when scanning at
2427         * DEF_PRIORITY on the assumption that the fact we are direct
2428         * reclaiming implies that kswapd is not keeping up and it is best to
2429         * do a batch of work at once. For memcg reclaim one check is made to
2430         * abort proportional reclaim if either the file or anon lru has already
2431         * dropped to zero at the first pass.
2432         */
2433        scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() &&
2434                         sc->priority == DEF_PRIORITY);
2435
2436        blk_start_plug(&plug);
2437        while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
2438                                        nr[LRU_INACTIVE_FILE]) {
2439                unsigned long nr_anon, nr_file, percentage;
2440                unsigned long nr_scanned;
2441
2442                for_each_evictable_lru(lru) {
2443                        if (nr[lru]) {
2444                                nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX);
2445                                nr[lru] -= nr_to_scan;
2446
2447                                nr_reclaimed += shrink_list(lru, nr_to_scan,
2448                                                            lruvec, sc);
2449                        }
2450                }
2451
2452                cond_resched();
2453
2454                if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
2455                        continue;
2456
2457                /*
2458                 * For kswapd and memcg, reclaim at least the number of pages
2459                 * requested. Ensure that the anon and file LRUs are scanned
2460                 * proportionally what was requested by get_scan_count(). We
2461                 * stop reclaiming one LRU and reduce the amount scanning
2462                 * proportional to the original scan target.
2463                 */
2464                nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE];
2465                nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON];
2466
2467                /*
2468                 * It's just vindictive to attack the larger once the smaller
2469                 * has gone to zero.  And given the way we stop scanning the
2470                 * smaller below, this makes sure that we only make one nudge
2471                 * towards proportionality once we've got nr_to_reclaim.
2472                 */
2473                if (!nr_file || !nr_anon)
2474                        break;
2475
2476                if (nr_file > nr_anon) {
2477                        unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
2478                                                targets[LRU_ACTIVE_ANON] + 1;
2479                        lru = LRU_BASE;
2480                        percentage = nr_anon * 100 / scan_target;
2481                } else {
2482                        unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
2483                                                targets[LRU_ACTIVE_FILE] + 1;
2484                        lru = LRU_FILE;
2485                        percentage = nr_file * 100 / scan_target;
2486                }
2487
2488                /* Stop scanning the smaller of the LRU */
2489                nr[lru] = 0;
2490                nr[lru + LRU_ACTIVE] = 0;
2491
2492                /*
2493                 * Recalculate the other LRU scan count based on its original
2494                 * scan target and the percentage scanning already complete
2495                 */
2496                lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
2497                nr_scanned = targets[lru] - nr[lru];
2498                nr[lru] = targets[lru] * (100 - percentage) / 100;
2499                nr[lru] -= min(nr[lru], nr_scanned);
2500
2501                lru += LRU_ACTIVE;
2502                nr_scanned = targets[lru] - nr[lru];
2503                nr[lru] = targets[lru] * (100 - percentage) / 100;
2504                nr[lru] -= min(nr[lru], nr_scanned);
2505
2506                scan_adjusted = true;
2507        }
2508        blk_finish_plug(&plug);
2509        sc->nr_reclaimed += nr_reclaimed;
2510
2511        /*
2512         * Even if we did not try to evict anon pages at all, we want to
2513         * rebalance the anon lru active/inactive ratio.
2514         */
2515        if (inactive_list_is_low(lruvec, false, sc, true))
2516                shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
2517                                   sc, LRU_ACTIVE_ANON);
2518}
2519
2520/* Use reclaim/compaction for costly allocs or under memory pressure */
2521static bool in_reclaim_compaction(struct scan_control *sc)
2522{
2523        if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
2524                        (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
2525                         sc->priority < DEF_PRIORITY - 2))
2526                return true;
2527
2528        return false;
2529}
2530
2531/*
2532 * Reclaim/compaction is used for high-order allocation requests. It reclaims
2533 * order-0 pages before compacting the zone. should_continue_reclaim() returns
2534 * true if more pages should be reclaimed such that when the page allocator
2535 * calls try_to_compact_zone() that it will have enough free pages to succeed.
2536 * It will give up earlier than that if there is difficulty reclaiming pages.
2537 */
2538static inline bool should_continue_reclaim(struct pglist_data *pgdat,
2539                                        unsigned long nr_reclaimed,
2540                                        unsigned long nr_scanned,
2541                                        struct scan_control *sc)
2542{
2543        unsigned long pages_for_compaction;
2544        unsigned long inactive_lru_pages;
2545        int z;
2546
2547        /* If not in reclaim/compaction mode, stop */
2548        if (!in_reclaim_compaction(sc))
2549                return false;
2550
2551        /* Consider stopping depending on scan and reclaim activity */
2552        if (sc->gfp_mask & __GFP_RETRY_MAYFAIL) {
2553                /*
2554                 * For __GFP_RETRY_MAYFAIL allocations, stop reclaiming if the
2555                 * full LRU list has been scanned and we are still failing
2556                 * to reclaim pages. This full LRU scan is potentially
2557                 * expensive but a __GFP_RETRY_MAYFAIL caller really wants to succeed
2558                 */
2559                if (!nr_reclaimed && !nr_scanned)
2560                        return false;
2561        } else {
2562                /*
2563                 * For non-__GFP_RETRY_MAYFAIL allocations which can presumably
2564                 * fail without consequence, stop if we failed to reclaim
2565                 * any pages from the last SWAP_CLUSTER_MAX number of
2566                 * pages that were scanned. This will return to the
2567                 * caller faster at the risk reclaim/compaction and
2568                 * the resulting allocation attempt fails
2569                 */
2570                if (!nr_reclaimed)
2571                        return false;
2572        }
2573
2574        /*
2575         * If we have not reclaimed enough pages for compaction and the
2576         * inactive lists are large enough, continue reclaiming
2577         */
2578        pages_for_compaction = compact_gap(sc->order);
2579        inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
2580        if (get_nr_swap_pages() > 0)
2581                inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
2582        if (sc->nr_reclaimed < pages_for_compaction &&
2583                        inactive_lru_pages > pages_for_compaction)
2584                return true;
2585
2586        /* If compaction would go ahead or the allocation would succeed, stop */
2587        for (z = 0; z <= sc->reclaim_idx; z++) {
2588                struct zone *zone = &pgdat->node_zones[z];
2589                if (!managed_zone(zone))
2590                        continue;
2591
2592                switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) {
2593                case COMPACT_SUCCESS:
2594                case COMPACT_CONTINUE:
2595                        return false;
2596                default:
2597                        /* check next zone */
2598                        ;
2599                }
2600        }
2601        return true;
2602}
2603
2604static bool pgdat_memcg_congested(pg_data_t *pgdat, struct mem_cgroup *memcg)
2605{
2606        return test_bit(PGDAT_CONGESTED, &pgdat->flags) ||
2607                (memcg && memcg_congested(pgdat, memcg));
2608}
2609
2610static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
2611{
2612        struct reclaim_state *reclaim_state = current->reclaim_state;
2613        unsigned long nr_reclaimed, nr_scanned;
2614        bool reclaimable = false;
2615
2616        do {
2617                struct mem_cgroup *root = sc->target_mem_cgroup;
2618                struct mem_cgroup_reclaim_cookie reclaim = {
2619                        .pgdat = pgdat,
2620                        .priority = sc->priority,
2621                };
2622                unsigned long node_lru_pages = 0;
2623                struct mem_cgroup *memcg;
2624
2625                memset(&sc->nr, 0, sizeof(sc->nr));
2626
2627                nr_reclaimed = sc->nr_reclaimed;
2628                nr_scanned = sc->nr_scanned;
2629
2630                memcg = mem_cgroup_iter(root, NULL, &reclaim);
2631                do {
2632                        unsigned long lru_pages;
2633                        unsigned long reclaimed;
2634                        unsigned long scanned;
2635
2636                        switch (mem_cgroup_protected(root, memcg)) {
2637                        case MEMCG_PROT_MIN:
2638                                /*
2639                                 * Hard protection.
2640                                 * If there is no reclaimable memory, OOM.
2641                                 */
2642                                continue;
2643                        case MEMCG_PROT_LOW:
2644                                /*
2645                                 * Soft protection.
2646                                 * Respect the protection only as long as
2647                                 * there is an unprotected supply
2648                                 * of reclaimable memory from other cgroups.
2649                                 */
2650                                if (!sc->memcg_low_reclaim) {
2651                                        sc->memcg_low_skipped = 1;
2652                                        continue;
2653                                }
2654                                memcg_memory_event(memcg, MEMCG_LOW);
2655                                break;
2656                        case MEMCG_PROT_NONE:
2657                                break;
2658                        }
2659
2660                        reclaimed = sc->nr_reclaimed;
2661                        scanned = sc->nr_scanned;
2662                        shrink_node_memcg(pgdat, memcg, sc, &lru_pages);
2663                        node_lru_pages += lru_pages;
2664
2665                        if (sc->may_shrinkslab) {
2666                                shrink_slab(sc->gfp_mask, pgdat->node_id,
2667                                    memcg, sc->priority);
2668                        }
2669
2670                        /* Record the group's reclaim efficiency */
2671                        vmpressure(sc->gfp_mask, memcg, false,
2672                                   sc->nr_scanned - scanned,
2673                                   sc->nr_reclaimed - reclaimed);
2674
2675                        /*
2676                         * Kswapd have to scan all memory cgroups to fulfill
2677                         * the overall scan target for the node.
2678                         *
2679                         * Limit reclaim, on the other hand, only cares about
2680                         * nr_to_reclaim pages to be reclaimed and it will
2681                         * retry with decreasing priority if one round over the
2682                         * whole hierarchy is not sufficient.
2683                         */
2684                        if (!current_is_kswapd() &&
2685                                        sc->nr_reclaimed >= sc->nr_to_reclaim) {
2686                                mem_cgroup_iter_break(root, memcg);
2687                                break;
2688                        }
2689                } while ((memcg = mem_cgroup_iter(root, memcg, &reclaim)));
2690
2691                if (reclaim_state) {
2692                        sc->nr_reclaimed += reclaim_state->reclaimed_slab;
2693                        reclaim_state->reclaimed_slab = 0;
2694                }
2695
2696                /* Record the subtree's reclaim efficiency */
2697                vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
2698                           sc->nr_scanned - nr_scanned,
2699                           sc->nr_reclaimed - nr_reclaimed);
2700
2701                if (sc->nr_reclaimed - nr_reclaimed)
2702                        reclaimable = true;
2703
2704                if (current_is_kswapd()) {
2705                        /*
2706                         * If reclaim is isolating dirty pages under writeback,
2707                         * it implies that the long-lived page allocation rate
2708                         * is exceeding the page laundering rate. Either the
2709                         * global limits are not being effective at throttling
2710                         * processes due to the page distribution throughout
2711                         * zones or there is heavy usage of a slow backing
2712                         * device. The only option is to throttle from reclaim
2713                         * context which is not ideal as there is no guarantee
2714                         * the dirtying process is throttled in the same way
2715                         * balance_dirty_pages() manages.
2716                         *
2717                         * Once a node is flagged PGDAT_WRITEBACK, kswapd will
2718                         * count the number of pages under pages flagged for
2719                         * immediate reclaim and stall if any are encountered
2720                         * in the nr_immediate check below.
2721                         */
2722                        if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken)
2723                                set_bit(PGDAT_WRITEBACK, &pgdat->flags);
2724
2725                        /*
2726                         * Tag a node as congested if all the dirty pages
2727                         * scanned were backed by a congested BDI and
2728                         * wait_iff_congested will stall.
2729                         */
2730                        if (sc->nr.dirty && sc->nr.dirty == sc->nr.congested)
2731                                set_bit(PGDAT_CONGESTED, &pgdat->flags);
2732
2733                        /* Allow kswapd to start writing pages during reclaim.*/
2734                        if (sc->nr.unqueued_dirty == sc->nr.file_taken)
2735                                set_bit(PGDAT_DIRTY, &pgdat->flags);
2736
2737                        /*
2738                         * If kswapd scans pages marked marked for immediate
2739                         * reclaim and under writeback (nr_immediate), it
2740                         * implies that pages are cycling through the LRU
2741                         * faster than they are written so also forcibly stall.
2742                         */
2743                        if (sc->nr.immediate)
2744                                congestion_wait(BLK_RW_ASYNC, HZ/10);
2745                }
2746
2747                /*
2748                 * Legacy memcg will stall in page writeback so avoid forcibly
2749                 * stalling in wait_iff_congested().
2750                 */
2751                if (!global_reclaim(sc) && sane_reclaim(sc) &&
2752                    sc->nr.dirty && sc->nr.dirty == sc->nr.congested)
2753                        set_memcg_congestion(pgdat, root, true);
2754
2755                /*
2756                 * Stall direct reclaim for IO completions if underlying BDIs
2757                 * and node is congested. Allow kswapd to continue until it
2758                 * starts encountering unqueued dirty pages or cycling through
2759                 * the LRU too quickly.
2760                 */
2761                if (!sc->hibernation_mode && !current_is_kswapd() &&
2762                   current_may_throttle() && pgdat_memcg_congested(pgdat, root))
2763                        wait_iff_congested(BLK_RW_ASYNC, HZ/10);
2764
2765        } while (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
2766                                         sc->nr_scanned - nr_scanned, sc));
2767
2768        /*
2769         * Kswapd gives up on balancing particular nodes after too
2770         * many failures to reclaim anything from them and goes to
2771         * sleep. On reclaim progress, reset the failure counter. A
2772         * successful direct reclaim run will revive a dormant kswapd.
2773         */
2774        if (reclaimable)
2775                pgdat->kswapd_failures = 0;
2776
2777        return reclaimable;
2778}
2779
2780/*
2781 * Returns true if compaction should go ahead for a costly-order request, or
2782 * the allocation would already succeed without compaction. Return false if we
2783 * should reclaim first.
2784 */
2785static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
2786{
2787        unsigned long watermark;
2788        enum compact_result suitable;
2789
2790        suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx);
2791        if (suitable == COMPACT_SUCCESS)
2792                /* Allocation should succeed already. Don't reclaim. */
2793                return true;
2794        if (suitable == COMPACT_SKIPPED)
2795                /* Compaction cannot yet proceed. Do reclaim. */
2796                return false;
2797
2798        /*
2799         * Compaction is already possible, but it takes time to run and there
2800         * are potentially other callers using the pages just freed. So proceed
2801         * with reclaim to make a buffer of free pages available to give
2802         * compaction a reasonable chance of completing and allocating the page.
2803         * Note that we won't actually reclaim the whole buffer in one attempt
2804         * as the target watermark in should_continue_reclaim() is lower. But if
2805         * we are already above the high+gap watermark, don't reclaim at all.
2806         */
2807        watermark = high_wmark_pages(zone) + compact_gap(sc->order);
2808
2809        return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx);
2810}
2811
2812/*
2813 * This is the direct reclaim path, for page-allocating processes.  We only
2814 * try to reclaim pages from zones which will satisfy the caller's allocation
2815 * request.
2816 *
2817 * If a zone is deemed to be full of pinned pages then just give it a light
2818 * scan then give up on it.
2819 */
2820static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2821{
2822        struct zoneref *z;
2823        struct zone *zone;
2824        unsigned long nr_soft_reclaimed;
2825        unsigned long nr_soft_scanned;
2826        gfp_t orig_mask;
2827        pg_data_t *last_pgdat = NULL;
2828
2829        /*
2830         * If the number of buffer_heads in the machine exceeds the maximum
2831         * allowed level, force direct reclaim to scan the highmem zone as
2832         * highmem pages could be pinning lowmem pages storing buffer_heads
2833         */
2834        orig_mask = sc->gfp_mask;
2835        if (buffer_heads_over_limit) {
2836                sc->gfp_mask |= __GFP_HIGHMEM;
2837                sc->reclaim_idx = gfp_zone(sc->gfp_mask);
2838        }
2839
2840        for_each_zone_zonelist_nodemask(zone, z, zonelist,
2841                                        sc->reclaim_idx, sc->nodemask) {
2842                /*
2843                 * Take care memory controller reclaiming has small influence
2844                 * to global LRU.
2845                 */
2846                if (global_reclaim(sc)) {
2847                        if (!cpuset_zone_allowed(zone,
2848                                                 GFP_KERNEL | __GFP_HARDWALL))
2849                                continue;
2850
2851                        /*
2852                         * If we already have plenty of memory free for
2853                         * compaction in this zone, don't free any more.
2854                         * Even though compaction is invoked for any
2855                         * non-zero order, only frequent costly order
2856                         * reclamation is disruptive enough to become a
2857                         * noticeable problem, like transparent huge
2858                         * page allocations.
2859                         */
2860                        if (IS_ENABLED(CONFIG_COMPACTION) &&
2861                            sc->order > PAGE_ALLOC_COSTLY_ORDER &&
2862                            compaction_ready(zone, sc)) {
2863                                sc->compaction_ready = true;
2864                                continue;
2865                        }
2866
2867                        /*
2868                         * Shrink each node in the zonelist once. If the
2869                         * zonelist is ordered by zone (not the default) then a
2870                         * node may be shrunk multiple times but in that case
2871                         * the user prefers lower zones being preserved.
2872                         */
2873                        if (zone->zone_pgdat == last_pgdat)
2874                                continue;
2875
2876                        /*
2877                         * This steals pages from memory cgroups over softlimit
2878                         * and returns the number of reclaimed pages and
2879                         * scanned pages. This works for global memory pressure
2880                         * and balancing, not for a memcg's limit.
2881                         */
2882                        nr_soft_scanned = 0;
2883                        nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone->zone_pgdat,
2884                                                sc->order, sc->gfp_mask,
2885                                                &nr_soft_scanned);
2886                        sc->nr_reclaimed += nr_soft_reclaimed;
2887                        sc->nr_scanned += nr_soft_scanned;
2888                        /* need some check for avoid more shrink_zone() */
2889                }
2890
2891                /* See comment about same check for global reclaim above */
2892                if (zone->zone_pgdat == last_pgdat)
2893                        continue;
2894                last_pgdat = zone->zone_pgdat;
2895                shrink_node(zone->zone_pgdat, sc);
2896        }
2897
2898        /*
2899         * Restore to original mask to avoid the impact on the caller if we
2900         * promoted it to __GFP_HIGHMEM.
2901         */
2902        sc->gfp_mask = orig_mask;
2903}
2904
2905static void snapshot_refaults(struct mem_cgroup *root_memcg, pg_data_t *pgdat)
2906{
2907        struct mem_cgroup *memcg;
2908
2909        memcg = mem_cgroup_iter(root_memcg, NULL, NULL);
2910        do {
2911                unsigned long refaults;
2912                struct lruvec *lruvec;
2913
2914                lruvec = mem_cgroup_lruvec(pgdat, memcg);
2915                refaults = lruvec_page_state_local(lruvec, WORKINGSET_ACTIVATE);
2916                lruvec->refaults = refaults;
2917        } while ((memcg = mem_cgroup_iter(root_memcg, memcg, NULL)));
2918}
2919
2920/*
2921 * This is the main entry point to direct page reclaim.
2922 *
2923 * If a full scan of the inactive list fails to free enough memory then we
2924 * are "out of memory" and something needs to be killed.
2925 *
2926 * If the caller is !__GFP_FS then the probability of a failure is reasonably
2927 * high - the zone may be full of dirty or under-writeback pages, which this
2928 * caller can't do much about.  We kick the writeback threads and take explicit
2929 * naps in the hope that some of these pages can be written.  But if the
2930 * allocating task holds filesystem locks which prevent writeout this might not
2931 * work, and the allocation attempt will fail.
2932 *
2933 * returns:     0, if no pages reclaimed
2934 *              else, the number of pages reclaimed
2935 */
2936static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2937                                          struct scan_control *sc)
2938{
2939        int initial_priority = sc->priority;
2940        pg_data_t *last_pgdat;
2941        struct zoneref *z;
2942        struct zone *zone;
2943retry:
2944        delayacct_freepages_start();
2945
2946        if (global_reclaim(sc))
2947                __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1);
2948
2949        do {
2950                vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
2951                                sc->priority);
2952                sc->nr_scanned = 0;
2953                shrink_zones(zonelist, sc);
2954
2955                if (sc->nr_reclaimed >= sc->nr_to_reclaim)
2956                        break;
2957
2958                if (sc->compaction_ready)
2959                        break;
2960
2961                /*
2962                 * If we're getting trouble reclaiming, start doing
2963                 * writepage even in laptop mode.
2964                 */
2965                if (sc->priority < DEF_PRIORITY - 2)
2966                        sc->may_writepage = 1;
2967        } while (--sc->priority >= 0);
2968
2969        last_pgdat = NULL;
2970        for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx,
2971                                        sc->nodemask) {
2972                if (zone->zone_pgdat == last_pgdat)
2973                        continue;
2974                last_pgdat = zone->zone_pgdat;
2975                snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat);
2976                set_memcg_congestion(last_pgdat, sc->target_mem_cgroup, false);
2977        }
2978
2979        delayacct_freepages_end();
2980
2981        if (sc->nr_reclaimed)
2982                return sc->nr_reclaimed;
2983
2984        /* Aborted reclaim to try compaction? don't OOM, then */
2985        if (sc->compaction_ready)
2986                return 1;
2987
2988        /* Untapped cgroup reserves?  Don't OOM, retry. */
2989        if (sc->memcg_low_skipped) {
2990                sc->priority = initial_priority;
2991                sc->memcg_low_reclaim = 1;
2992                sc->memcg_low_skipped = 0;
2993                goto retry;
2994        }
2995
2996        return 0;
2997}
2998
2999static bool allow_direct_reclaim(pg_data_t *pgdat)
3000{
3001        struct zone *zone;
3002        unsigned long pfmemalloc_reserve = 0;
3003        unsigned long free_pages = 0;
3004        int i;
3005        bool wmark_ok;
3006
3007        if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
3008                return true;
3009
3010        for (i = 0; i <= ZONE_NORMAL; i++) {
3011                zone = &pgdat->node_zones[i];
3012                if (!managed_zone(zone))
3013                        continue;
3014
3015                if (!zone_reclaimable_pages(zone))
3016                        continue;
3017
3018                pfmemalloc_reserve += min_wmark_pages(zone);
3019                free_pages += zone_page_state(zone, NR_FREE_PAGES);
3020        }
3021
3022        /* If there are no reserves (unexpected config) then do not throttle */
3023        if (!pfmemalloc_reserve)
3024                return true;
3025
3026        wmark_ok = free_pages > pfmemalloc_reserve / 2;
3027
3028        /* kswapd must be awake if processes are being throttled */
3029        if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
3030                pgdat->kswapd_classzone_idx = min(pgdat->kswapd_classzone_idx,
3031                                                (enum zone_type)ZONE_NORMAL);
3032                wake_up_interruptible(&pgdat->kswapd_wait);
3033        }
3034
3035        return wmark_ok;
3036}
3037
3038/*
3039 * Throttle direct reclaimers if backing storage is backed by the network
3040 * and the PFMEMALLOC reserve for the preferred node is getting dangerously
3041 * depleted. kswapd will continue to make progress and wake the processes
3042 * when the low watermark is reached.
3043 *
3044 * Returns true if a fatal signal was delivered during throttling. If this
3045 * happens, the page allocator should not consider triggering the OOM killer.
3046 */
3047static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
3048                                        nodemask_t *nodemask)
3049{
3050        struct zoneref *z;
3051        struct zone *zone;
3052        pg_data_t *pgdat = NULL;
3053
3054        /*
3055         * Kernel threads should not be throttled as they may be indirectly
3056         * responsible for cleaning pages necessary for reclaim to make forward
3057         * progress. kjournald for example may enter direct reclaim while
3058         * committing a transaction where throttling it could forcing other
3059         * processes to block on log_wait_commit().
3060         */
3061        if (current->flags & PF_KTHREAD)
3062                goto out;
3063
3064        /*
3065         * If a fatal signal is pending, this process should not throttle.
3066         * It should return quickly so it can exit and free its memory
3067         */
3068        if (fatal_signal_pending(current))
3069                goto out;
3070
3071        /*
3072         * Check if the pfmemalloc reserves are ok by finding the first node
3073         * with a usable ZONE_NORMAL or lower zone. The expectation is that
3074         * GFP_KERNEL will be required for allocating network buffers when
3075         * swapping over the network so ZONE_HIGHMEM is unusable.
3076         *
3077         * Throttling is based on the first usable node and throttled processes
3078         * wait on a queue until kswapd makes progress and wakes them. There
3079         * is an affinity then between processes waking up and where reclaim
3080         * progress has been made assuming the process wakes on the same node.
3081         * More importantly, processes running on remote nodes will not compete
3082         * for remote pfmemalloc reserves and processes on different nodes
3083         * should make reasonable progress.
3084         */
3085        for_each_zone_zonelist_nodemask(zone, z, zonelist,
3086                                        gfp_zone(gfp_mask), nodemask) {
3087                if (zone_idx(zone) > ZONE_NORMAL)
3088                        continue;
3089
3090                /* Throttle based on the first usable node */
3091                pgdat = zone->zone_pgdat;
3092                if (allow_direct_reclaim(pgdat))
3093                        goto out;
3094                break;
3095        }
3096
3097        /* If no zone was usable by the allocation flags then do not throttle */
3098        if (!pgdat)
3099                goto out;
3100
3101        /* Account for the throttling */
3102        count_vm_event(PGSCAN_DIRECT_THROTTLE);
3103
3104        /*
3105         * If the caller cannot enter the filesystem, it's possible that it
3106         * is due to the caller holding an FS lock or performing a journal
3107         * transaction in the case of a filesystem like ext[3|4]. In this case,
3108         * it is not safe to block on pfmemalloc_wait as kswapd could be
3109         * blocked waiting on the same lock. Instead, throttle for up to a
3110         * second before continuing.
3111         */
3112        if (!(gfp_mask & __GFP_FS)) {
3113                wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
3114                        allow_direct_reclaim(pgdat), HZ);
3115
3116                goto check_pending;
3117        }
3118
3119        /* Throttle until kswapd wakes the process */
3120        wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
3121                allow_direct_reclaim(pgdat));
3122
3123check_pending:
3124        if (fatal_signal_pending(current))
3125                return true;
3126
3127out:
3128        return false;
3129}
3130
3131unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
3132                                gfp_t gfp_mask, nodemask_t *nodemask)
3133{
3134        unsigned long nr_reclaimed;
3135        struct scan_control sc = {
3136                .nr_to_reclaim = SWAP_CLUSTER_MAX,
3137                .gfp_mask = current_gfp_context(gfp_mask),
3138                .reclaim_idx = gfp_zone(gfp_mask),
3139                .order = order,
3140                .nodemask = nodemask,
3141                .priority = DEF_PRIORITY,
3142                .may_writepage = !laptop_mode,
3143                .may_unmap = 1,
3144                .may_swap = 1,
3145                .may_shrinkslab = 1,
3146        };
3147
3148        /*
3149         * scan_control uses s8 fields for order, priority, and reclaim_idx.
3150         * Confirm they are large enough for max values.
3151         */
3152        BUILD_BUG_ON(MAX_ORDER > S8_MAX);
3153        BUILD_BUG_ON(DEF_PRIORITY > S8_MAX);
3154        BUILD_BUG_ON(MAX_NR_ZONES > S8_MAX);
3155
3156        /*
3157         * Do not enter reclaim if fatal signal was delivered while throttled.
3158         * 1 is returned so that the page allocator does not OOM kill at this
3159         * point.
3160         */
3161        if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask))
3162                return 1;
3163
3164        trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask);
3165
3166        nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
3167
3168        trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
3169
3170        return nr_reclaimed;
3171}
3172
3173#ifdef CONFIG_MEMCG
3174
3175unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
3176                                                gfp_t gfp_mask, bool noswap,
3177                                                pg_data_t *pgdat,
3178                                                unsigned long *nr_scanned)
3179{
3180        struct scan_control sc = {
3181                .nr_to_reclaim = SWAP_CLUSTER_MAX,
3182                .target_mem_cgroup = memcg,
3183                .may_writepage = !laptop_mode,
3184                .may_unmap = 1,
3185                .reclaim_idx = MAX_NR_ZONES - 1,
3186                .may_swap = !noswap,
3187                .may_shrinkslab = 1,
3188        };
3189        unsigned long lru_pages;
3190
3191        sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
3192                        (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
3193
3194        trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
3195                                                      sc.gfp_mask);
3196
3197        /*
3198         * NOTE: Although we can get the priority field, using it
3199         * here is not a good idea, since it limits the pages we can scan.
3200         * if we don't reclaim here, the shrink_node from balance_pgdat
3201         * will pick up pages from other mem cgroup's as well. We hack
3202         * the priority and make it zero.
3203         */
3204        shrink_node_memcg(pgdat, memcg, &sc, &lru_pages);
3205
3206        trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
3207
3208        *nr_scanned = sc.nr_scanned;
3209        return sc.nr_reclaimed;
3210}
3211
3212unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
3213                                           unsigned long nr_pages,
3214                                           gfp_t gfp_mask,
3215                                           bool may_swap)
3216{
3217        struct zonelist *zonelist;
3218        unsigned long nr_reclaimed;
3219        unsigned long pflags;
3220        int nid;
3221        unsigned int noreclaim_flag;
3222        struct scan_control sc = {
3223                .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
3224                .gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) |
3225                                (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
3226                .reclaim_idx = MAX_NR_ZONES - 1,
3227                .target_mem_cgroup = memcg,
3228                .priority = DEF_PRIORITY,
3229                .may_writepage = !laptop_mode,
3230                .may_unmap = 1,
3231                .may_swap = may_swap,
3232                .may_shrinkslab = 1,
3233        };
3234
3235        /*
3236         * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
3237         * take care of from where we get pages. So the node where we start the
3238         * scan does not need to be the current node.
3239         */
3240        nid = mem_cgroup_select_victim_node(memcg);
3241
3242        zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
3243
3244        trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask);
3245
3246        psi_memstall_enter(&pflags);
3247        noreclaim_flag = memalloc_noreclaim_save();
3248
3249        nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
3250
3251        memalloc_noreclaim_restore(noreclaim_flag);
3252        psi_memstall_leave(&pflags);
3253
3254        trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
3255
3256        return nr_reclaimed;
3257}
3258#endif
3259
3260static void age_active_anon(struct pglist_data *pgdat,
3261                                struct scan_control *sc)
3262{
3263        struct mem_cgroup *memcg;
3264
3265        if (!total_swap_pages)
3266                return;
3267
3268        memcg = mem_cgroup_iter(NULL, NULL, NULL);
3269        do {
3270                struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg);
3271
3272                if (inactive_list_is_low(lruvec, false, sc, true))
3273                        shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
3274                                           sc, LRU_ACTIVE_ANON);
3275
3276                memcg = mem_cgroup_iter(NULL, memcg, NULL);
3277        } while (memcg);
3278}
3279
3280static bool pgdat_watermark_boosted(pg_data_t *pgdat, int classzone_idx)
3281{
3282        int i;
3283        struct zone *zone;
3284
3285        /*
3286         * Check for watermark boosts top-down as the higher zones
3287         * are more likely to be boosted. Both watermarks and boosts
3288         * should not be checked at the time time as reclaim would
3289         * start prematurely when there is no boosting and a lower
3290         * zone is balanced.
3291         */
3292        for (i = classzone_idx; i >= 0; i--) {
3293                zone = pgdat->node_zones + i;
3294                if (!managed_zone(zone))
3295                        continue;
3296
3297                if (zone->watermark_boost)
3298                        return true;
3299        }
3300
3301        return false;
3302}
3303
3304/*
3305 * Returns true if there is an eligible zone balanced for the request order
3306 * and classzone_idx
3307 */
3308static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
3309{
3310        int i;
3311        unsigned long mark = -1;
3312        struct zone *zone;
3313
3314        /*
3315         * Check watermarks bottom-up as lower zones are more likely to
3316         * meet watermarks.
3317         */
3318        for (i = 0; i <= classzone_idx; i++) {
3319                zone = pgdat->node_zones + i;
3320
3321                if (!managed_zone(zone))
3322                        continue;
3323
3324                mark = high_wmark_pages(zone);
3325                if (zone_watermark_ok_safe(zone, order, mark, classzone_idx))
3326                        return true;
3327        }
3328
3329        /*
3330         * If a node has no populated zone within classzone_idx, it does not
3331         * need balancing by definition. This can happen if a zone-restricted
3332         * allocation tries to wake a remote kswapd.
3333         */
3334        if (mark == -1)
3335                return true;
3336
3337        return false;
3338}
3339
3340/* Clear pgdat state for congested, dirty or under writeback. */
3341static void clear_pgdat_congested(pg_data_t *pgdat)
3342{
3343        clear_bit(PGDAT_CONGESTED, &pgdat->flags);
3344        clear_bit(PGDAT_DIRTY, &pgdat->flags);
3345        clear_bit(PGDAT_WRITEBACK, &pgdat->flags);
3346}
3347
3348/*
3349 * Prepare kswapd for sleeping. This verifies that there are no processes
3350 * waiting in throttle_direct_reclaim() and that watermarks have been met.
3351 *
3352 * Returns true if kswapd is ready to sleep
3353 */
3354static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, int classzone_idx)
3355{
3356        /*
3357         * The throttled processes are normally woken up in balance_pgdat() as
3358         * soon as allow_direct_reclaim() is true. But there is a potential
3359         * race between when kswapd checks the watermarks and a process gets
3360         * throttled. There is also a potential race if processes get
3361         * throttled, kswapd wakes, a large process exits thereby balancing the
3362         * zones, which causes kswapd to exit balance_pgdat() before reaching
3363         * the wake up checks. If kswapd is going to sleep, no process should
3364         * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
3365         * the wake up is premature, processes will wake kswapd and get
3366         * throttled again. The difference from wake ups in balance_pgdat() is
3367         * that here we are under prepare_to_wait().
3368         */
3369        if (waitqueue_active(&pgdat->pfmemalloc_wait))
3370                wake_up_all(&pgdat->pfmemalloc_wait);
3371
3372        /* Hopeless node, leave it to direct reclaim */
3373        if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
3374                return true;
3375
3376        if (pgdat_balanced(pgdat, order, classzone_idx)) {
3377                clear_pgdat_congested(pgdat);
3378                return true;
3379        }
3380
3381        return false;
3382}
3383
3384/*
3385 * kswapd shrinks a node of pages that are at or below the highest usable
3386 * zone that is currently unbalanced.
3387 *
3388 * Returns true if kswapd scanned at least the requested number of pages to
3389 * reclaim or if the lack of progress was due to pages under writeback.
3390 * This is used to determine if the scanning priority needs to be raised.
3391 */
3392static bool kswapd_shrink_node(pg_data_t *pgdat,
3393                               struct scan_control *sc)
3394{
3395        struct zone *zone;
3396        int z;
3397
3398        /* Reclaim a number of pages proportional to the number of zones */
3399        sc->nr_to_reclaim = 0;
3400        for (z = 0; z <= sc->reclaim_idx; z++) {
3401                zone = pgdat->node_zones + z;
3402                if (!managed_zone(zone))
3403                        continue;
3404
3405                sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX);
3406        }
3407
3408        /*
3409         * Historically care was taken to put equal pressure on all zones but
3410         * now pressure is applied based on node LRU order.
3411         */
3412        shrink_node(pgdat, sc);
3413
3414        /*
3415         * Fragmentation may mean that the system cannot be rebalanced for
3416         * high-order allocations. If twice the allocation size has been
3417         * reclaimed then recheck watermarks only at order-0 to prevent
3418         * excessive reclaim. Assume that a process requested a high-order
3419         * can direct reclaim/compact.
3420         */
3421        if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order))
3422                sc->order = 0;
3423
3424        return sc->nr_scanned >= sc->nr_to_reclaim;
3425}
3426
3427/*
3428 * For kswapd, balance_pgdat() will reclaim pages across a node from zones
3429 * that are eligible for use by the caller until at least one zone is
3430 * balanced.
3431 *
3432 * Returns the order kswapd finished reclaiming at.
3433 *
3434 * kswapd scans the zones in the highmem->normal->dma direction.  It skips
3435 * zones which have free_pages > high_wmark_pages(zone), but once a zone is
3436 * found to have free_pages <= high_wmark_pages(zone), any page in that zone
3437 * or lower is eligible for reclaim until at least one usable zone is
3438 * balanced.
3439 */
3440static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
3441{
3442        int i;
3443        unsigned long nr_soft_reclaimed;
3444        unsigned long nr_soft_scanned;
3445        unsigned long pflags;
3446        unsigned long nr_boost_reclaim;
3447        unsigned long zone_boosts[MAX_NR_ZONES] = { 0, };
3448        bool boosted;
3449        struct zone *zone;
3450        struct scan_control sc = {
3451                .gfp_mask = GFP_KERNEL,
3452                .order = order,
3453                .may_unmap = 1,
3454        };
3455
3456        psi_memstall_enter(&pflags);
3457        __fs_reclaim_acquire();
3458
3459        count_vm_event(PAGEOUTRUN);
3460
3461        /*
3462         * Account for the reclaim boost. Note that the zone boost is left in
3463         * place so that parallel allocations that are near the watermark will
3464         * stall or direct reclaim until kswapd is finished.
3465         */
3466        nr_boost_reclaim = 0;
3467        for (i = 0; i <= classzone_idx; i++) {
3468                zone = pgdat->node_zones + i;
3469                if (!managed_zone(zone))
3470                        continue;
3471
3472                nr_boost_reclaim += zone->watermark_boost;
3473                zone_boosts[i] = zone->watermark_boost;
3474        }
3475        boosted = nr_boost_reclaim;
3476
3477restart:
3478        sc.priority = DEF_PRIORITY;
3479        do {
3480                unsigned long nr_reclaimed = sc.nr_reclaimed;
3481                bool raise_priority = true;
3482                bool balanced;
3483                bool ret;
3484
3485                sc.reclaim_idx = classzone_idx;
3486
3487                /*
3488                 * If the number of buffer_heads exceeds the maximum allowed
3489                 * then consider reclaiming from all zones. This has a dual
3490                 * purpose -- on 64-bit systems it is expected that
3491                 * buffer_heads are stripped during active rotation. On 32-bit
3492                 * systems, highmem pages can pin lowmem memory and shrinking
3493                 * buffers can relieve lowmem pressure. Reclaim may still not
3494                 * go ahead if all eligible zones for the original allocation
3495                 * request are balanced to avoid excessive reclaim from kswapd.
3496                 */
3497                if (buffer_heads_over_limit) {
3498                        for (i = MAX_NR_ZONES - 1; i >= 0; i--) {
3499                                zone = pgdat->node_zones + i;
3500                                if (!managed_zone(zone))
3501                                        continue;
3502
3503                                sc.reclaim_idx = i;
3504                                break;
3505                        }
3506                }
3507
3508                /*
3509                 * If the pgdat is imbalanced then ignore boosting and preserve
3510                 * the watermarks for a later time and restart. Note that the
3511                 * zone watermarks will be still reset at the end of balancing
3512                 * on the grounds that the normal reclaim should be enough to
3513                 * re-evaluate if boosting is required when kswapd next wakes.
3514                 */
3515                balanced = pgdat_balanced(pgdat, sc.order, classzone_idx);
3516                if (!balanced && nr_boost_reclaim) {
3517                        nr_boost_reclaim = 0;
3518                        goto restart;
3519                }
3520
3521                /*
3522                 * If boosting is not active then only reclaim if there are no
3523                 * eligible zones. Note that sc.reclaim_idx is not used as
3524                 * buffer_heads_over_limit may have adjusted it.
3525                 */
3526                if (!nr_boost_reclaim && balanced)
3527                        goto out;
3528
3529                /* Limit the priority of boosting to avoid reclaim writeback */
3530                if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2)
3531                        raise_priority = false;
3532
3533                /*
3534                 * Do not writeback or swap pages for boosted reclaim. The
3535                 * intent is to relieve pressure not issue sub-optimal IO
3536                 * from reclaim context. If no pages are reclaimed, the
3537                 * reclaim will be aborted.
3538                 */
3539                sc.may_writepage = !laptop_mode && !nr_boost_reclaim;
3540                sc.may_swap = !nr_boost_reclaim;
3541                sc.may_shrinkslab = !nr_boost_reclaim;
3542
3543                /*
3544                 * Do some background aging of the anon list, to give
3545                 * pages a chance to be referenced before reclaiming. All
3546                 * pages are rotated regardless of classzone as this is
3547                 * about consistent aging.
3548                 */
3549                age_active_anon(pgdat, &sc);
3550
3551                /*
3552                 * If we're getting trouble reclaiming, start doing writepage
3553                 * even in laptop mode.
3554                 */
3555                if (sc.priority < DEF_PRIORITY - 2)
3556                        sc.may_writepage = 1;
3557
3558                /* Call soft limit reclaim before calling shrink_node. */
3559                sc.nr_scanned = 0;
3560                nr_soft_scanned = 0;
3561                nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order,
3562                                                sc.gfp_mask, &nr_soft_scanned);
3563                sc.nr_reclaimed += nr_soft_reclaimed;
3564
3565                /*
3566                 * There should be no need to raise the scanning priority if
3567                 * enough pages are already being scanned that that high
3568                 * watermark would be met at 100% efficiency.
3569                 */
3570                if (kswapd_shrink_node(pgdat, &sc))
3571                        raise_priority = false;
3572
3573                /*
3574                 * If the low watermark is met there is no need for processes
3575                 * to be throttled on pfmemalloc_wait as they should not be
3576                 * able to safely make forward progress. Wake them
3577                 */
3578                if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
3579                                allow_direct_reclaim(pgdat))
3580                        wake_up_all(&pgdat->pfmemalloc_wait);
3581
3582                /* Check if kswapd should be suspending */
3583                __fs_reclaim_release();
3584                ret = try_to_freeze();
3585                __fs_reclaim_acquire();
3586                if (ret || kthread_should_stop())
3587                        break;
3588
3589                /*
3590                 * Raise priority if scanning rate is too low or there was no
3591                 * progress in reclaiming pages
3592                 */
3593                nr_reclaimed = sc.nr_reclaimed - nr_reclaimed;
3594                nr_boost_reclaim -= min(nr_boost_reclaim, nr_reclaimed);
3595
3596                /*
3597                 * If reclaim made no progress for a boost, stop reclaim as
3598                 * IO cannot be queued and it could be an infinite loop in
3599                 * extreme circumstances.
3600                 */
3601                if (nr_boost_reclaim && !nr_reclaimed)
3602                        break;
3603
3604                if (raise_priority || !nr_reclaimed)
3605                        sc.priority--;
3606        } while (sc.priority >= 1);
3607
3608        if (!sc.nr_reclaimed)
3609                pgdat->kswapd_failures++;
3610
3611out:
3612        /* If reclaim was boosted, account for the reclaim done in this pass */
3613        if (boosted) {
3614                unsigned long flags;
3615
3616                for (i = 0; i <= classzone_idx; i++) {
3617                        if (!zone_boosts[i])
3618                                continue;
3619
3620                        /* Increments are under the zone lock */
3621                        zone = pgdat->node_zones + i;
3622                        spin_lock_irqsave(&zone->lock, flags);
3623                        zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]);
3624                        spin_unlock_irqrestore(&zone->lock, flags);
3625                }
3626
3627                /*
3628                 * As there is now likely space, wakeup kcompact to defragment
3629                 * pageblocks.
3630                 */
3631                wakeup_kcompactd(pgdat, pageblock_order, classzone_idx);
3632        }
3633
3634        snapshot_refaults(NULL, pgdat);
3635        __fs_reclaim_release();
3636        psi_memstall_leave(&pflags);
3637        /*
3638         * Return the order kswapd stopped reclaiming at as
3639         * prepare_kswapd_sleep() takes it into account. If another caller
3640         * entered the allocator slow path while kswapd was awake, order will
3641         * remain at the higher level.
3642         */
3643        return sc.order;
3644}
3645
3646/*
3647 * The pgdat->kswapd_classzone_idx is used to pass the highest zone index to be
3648 * reclaimed by kswapd from the waker. If the value is MAX_NR_ZONES which is not
3649 * a valid index then either kswapd runs for first time or kswapd couldn't sleep
3650 * after previous reclaim attempt (node is still unbalanced). In that case
3651 * return the zone index of the previous kswapd reclaim cycle.
3652 */
3653static enum zone_type kswapd_classzone_idx(pg_data_t *pgdat,
3654                                           enum zone_type prev_classzone_idx)
3655{
3656        if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES)
3657                return prev_classzone_idx;
3658        return pgdat->kswapd_classzone_idx;
3659}
3660
3661static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order,
3662                                unsigned int classzone_idx)
3663{
3664        long remaining = 0;
3665        DEFINE_WAIT(wait);
3666
3667        if (freezing(current) || kthread_should_stop())
3668                return;
3669
3670        prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
3671
3672        /*
3673         * Try to sleep for a short interval. Note that kcompactd will only be
3674         * woken if it is possible to sleep for a short interval. This is
3675         * deliberate on the assumption that if reclaim cannot keep an
3676         * eligible zone balanced that it's also unlikely that compaction will
3677         * succeed.
3678         */
3679        if (prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) {
3680                /*
3681                 * Compaction records what page blocks it recently failed to
3682                 * isolate pages from and skips them in the future scanning.
3683                 * When kswapd is going to sleep, it is reasonable to assume
3684                 * that pages and compaction may succeed so reset the cache.
3685                 */
3686                reset_isolation_suitable(pgdat);
3687
3688                /*
3689                 * We have freed the memory, now we should compact it to make
3690                 * allocation of the requested order possible.
3691                 */
3692                wakeup_kcompactd(pgdat, alloc_order, classzone_idx);
3693
3694                remaining = schedule_timeout(HZ/10);
3695
3696                /*
3697                 * If woken prematurely then reset kswapd_classzone_idx and
3698                 * order. The values will either be from a wakeup request or
3699                 * the previous request that slept prematurely.
3700                 */
3701                if (remaining) {
3702                        pgdat->kswapd_classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
3703                        pgdat->kswapd_order = max(pgdat->kswapd_order, reclaim_order);
3704                }
3705
3706                finish_wait(&pgdat->kswapd_wait, &wait);
3707                prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
3708        }
3709
3710        /*
3711         * After a short sleep, check if it was a premature sleep. If not, then
3712         * go fully to sleep until explicitly woken up.
3713         */
3714        if (!remaining &&
3715            prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) {
3716                trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
3717
3718                /*
3719                 * vmstat counters are not perfectly accurate and the estimated
3720                 * value for counters such as NR_FREE_PAGES can deviate from the
3721                 * true value by nr_online_cpus * threshold. To avoid the zone
3722                 * watermarks being breached while under pressure, we reduce the
3723                 * per-cpu vmstat threshold while kswapd is awake and restore
3724                 * them before going back to sleep.
3725                 */
3726                set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
3727
3728                if (!kthread_should_stop())
3729                        schedule();
3730
3731                set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
3732        } else {
3733                if (remaining)
3734                        count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
3735                else
3736                        count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
3737        }
3738        finish_wait(&pgdat->kswapd_wait, &wait);
3739}
3740
3741/*
3742 * The background pageout daemon, started as a kernel thread
3743 * from the init process.
3744 *
3745 * This basically trickles out pages so that we have _some_
3746 * free memory available even if there is no other activity
3747 * that frees anything up. This is needed for things like routing
3748 * etc, where we otherwise might have all activity going on in
3749 * asynchronous contexts that cannot page things out.
3750 *
3751 * If there are applications that are active memory-allocators
3752 * (most normal use), this basically shouldn't matter.
3753 */
3754static int kswapd(void *p)
3755{
3756        unsigned int alloc_order, reclaim_order;
3757        unsigned int classzone_idx = MAX_NR_ZONES - 1;
3758        pg_data_t *pgdat = (pg_data_t*)p;
3759        struct task_struct *tsk = current;
3760
3761        struct reclaim_state reclaim_state = {
3762                .reclaimed_slab = 0,
3763        };
3764        const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
3765
3766        if (!cpumask_empty(cpumask))
3767                set_cpus_allowed_ptr(tsk, cpumask);
3768        current->reclaim_state = &reclaim_state;
3769
3770        /*
3771         * Tell the memory management that we're a "memory allocator",
3772         * and that if we need more memory we should get access to it
3773         * regardless (see "__alloc_pages()"). "kswapd" should
3774         * never get caught in the normal page freeing logic.
3775         *
3776         * (Kswapd normally doesn't need memory anyway, but sometimes
3777         * you need a small amount of memory in order to be able to
3778         * page out something else, and this flag essentially protects
3779         * us from recursively trying to free more memory as we're
3780         * trying to free the first piece of memory in the first place).
3781         */
3782        tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
3783        set_freezable();
3784
3785        pgdat->kswapd_order = 0;
3786        pgdat->kswapd_classzone_idx = MAX_NR_ZONES;
3787        for ( ; ; ) {
3788                bool ret;
3789
3790                alloc_order = reclaim_order = pgdat->kswapd_order;
3791                classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
3792
3793kswapd_try_sleep:
3794                kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order,
3795                                        classzone_idx);
3796
3797                /* Read the new order and classzone_idx */
3798                alloc_order = reclaim_order = pgdat->kswapd_order;
3799                classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
3800                pgdat->kswapd_order = 0;
3801                pgdat->kswapd_classzone_idx = MAX_NR_ZONES;
3802
3803                ret = try_to_freeze();
3804                if (kthread_should_stop())
3805                        break;
3806
3807                /*
3808                 * We can speed up thawing tasks if we don't call balance_pgdat
3809                 * after returning from the refrigerator
3810                 */
3811                if (ret)
3812                        continue;
3813
3814                /*
3815                 * Reclaim begins at the requested order but if a high-order
3816                 * reclaim fails then kswapd falls back to reclaiming for
3817                 * order-0. If that happens, kswapd will consider sleeping
3818                 * for the order it finished reclaiming at (reclaim_order)
3819                 * but kcompactd is woken to compact for the original
3820                 * request (alloc_order).
3821                 */
3822                trace_mm_vmscan_kswapd_wake(pgdat->node_id, classzone_idx,
3823                                                alloc_order);
3824                reclaim_order = balance_pgdat(pgdat, alloc_order, classzone_idx);
3825                if (reclaim_order < alloc_order)
3826                        goto kswapd_try_sleep;
3827        }
3828
3829        tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD);
3830        current->reclaim_state = NULL;
3831
3832        return 0;
3833}
3834
3835/*
3836 * A zone is low on free memory or too fragmented for high-order memory.  If
3837 * kswapd should reclaim (direct reclaim is deferred), wake it up for the zone's
3838 * pgdat.  It will wake up kcompactd after reclaiming memory.  If kswapd reclaim
3839 * has failed or is not needed, still wake up kcompactd if only compaction is
3840 * needed.
3841 */
3842void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order,
3843                   enum zone_type classzone_idx)
3844{
3845        pg_data_t *pgdat;
3846
3847        if (!managed_zone(zone))
3848                return;
3849
3850        if (!cpuset_zone_allowed(zone, gfp_flags))
3851                return;
3852        pgdat = zone->zone_pgdat;
3853
3854        if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES)
3855                pgdat->kswapd_classzone_idx = classzone_idx;
3856        else
3857                pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx,
3858                                                  classzone_idx);
3859        pgdat->kswapd_order = max(pgdat->kswapd_order, order);
3860        if (!waitqueue_active(&pgdat->kswapd_wait))
3861                return;
3862
3863        /* Hopeless node, leave it to direct reclaim if possible */
3864        if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ||
3865            (pgdat_balanced(pgdat, order, classzone_idx) &&
3866             !pgdat_watermark_boosted(pgdat, classzone_idx))) {
3867                /*
3868                 * There may be plenty of free memory available, but it's too
3869                 * fragmented for high-order allocations.  Wake up kcompactd
3870                 * and rely on compaction_suitable() to determine if it's
3871                 * needed.  If it fails, it will defer subsequent attempts to
3872                 * ratelimit its work.
3873                 */
3874                if (!(gfp_flags & __GFP_DIRECT_RECLAIM))
3875                        wakeup_kcompactd(pgdat, order, classzone_idx);
3876                return;
3877        }
3878
3879        trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, classzone_idx, order,
3880                                      gfp_flags);
3881        wake_up_interruptible(&pgdat->kswapd_wait);
3882}
3883
3884#ifdef CONFIG_HIBERNATION
3885/*
3886 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
3887 * freed pages.
3888 *
3889 * Rather than trying to age LRUs the aim is to preserve the overall
3890 * LRU order by reclaiming preferentially
3891 * inactive > active > active referenced > active mapped
3892 */
3893unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
3894{
3895        struct reclaim_state reclaim_state;
3896        struct scan_control sc = {
3897                .nr_to_reclaim = nr_to_reclaim,
3898                .gfp_mask = GFP_HIGHUSER_MOVABLE,
3899                .reclaim_idx = MAX_NR_ZONES - 1,
3900                .priority = DEF_PRIORITY,
3901                .may_writepage = 1,
3902                .may_unmap = 1,
3903                .may_swap = 1,
3904                .hibernation_mode = 1,
3905        };
3906        struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
3907        struct task_struct *p = current;
3908        unsigned long nr_reclaimed;
3909        unsigned int noreclaim_flag;
3910
3911        fs_reclaim_acquire(sc.gfp_mask);
3912        noreclaim_flag = memalloc_noreclaim_save();
3913        reclaim_state.reclaimed_slab = 0;
3914        p->reclaim_state = &reclaim_state;
3915
3916        nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
3917
3918        p->reclaim_state = NULL;
3919        memalloc_noreclaim_restore(noreclaim_flag);
3920        fs_reclaim_release(sc.gfp_mask);
3921
3922        return nr_reclaimed;
3923}
3924#endif /* CONFIG_HIBERNATION */
3925
3926/* It's optimal to keep kswapds on the same CPUs as their memory, but
3927   not required for correctness.  So if the last cpu in a node goes
3928   away, we get changed to run anywhere: as the first one comes back,
3929   restore their cpu bindings. */
3930static int kswapd_cpu_online(unsigned int cpu)
3931{
3932        int nid;
3933
3934        for_each_node_state(nid, N_MEMORY) {
3935                pg_data_t *pgdat = NODE_DATA(nid);
3936                const struct cpumask *mask;
3937
3938                mask = cpumask_of_node(pgdat->node_id);
3939
3940                if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
3941                        /* One of our CPUs online: restore mask */
3942                        set_cpus_allowed_ptr(pgdat->kswapd, mask);
3943        }
3944        return 0;
3945}
3946
3947/*
3948 * This kswapd start function will be called by init and node-hot-add.
3949 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
3950 */
3951int kswapd_run(int nid)
3952{
3953        pg_data_t *pgdat = NODE_DATA(nid);
3954        int ret = 0;
3955
3956        if (pgdat->kswapd)
3957                return 0;
3958
3959        pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
3960        if (IS_ERR(pgdat->kswapd)) {
3961                /* failure at boot is fatal */
3962                BUG_ON(system_state < SYSTEM_RUNNING);
3963                pr_err("Failed to start kswapd on node %d\n", nid);
3964                ret = PTR_ERR(pgdat->kswapd);
3965                pgdat->kswapd = NULL;
3966        }
3967        return ret;
3968}
3969
3970/*
3971 * Called by memory hotplug when all memory in a node is offlined.  Caller must
3972 * hold mem_hotplug_begin/end().
3973 */
3974void kswapd_stop(int nid)
3975{
3976        struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
3977
3978        if (kswapd) {
3979                kthread_stop(kswapd);
3980                NODE_DATA(nid)->kswapd = NULL;
3981        }
3982}
3983
3984static int __init kswapd_init(void)
3985{
3986        int nid, ret;
3987
3988        swap_setup();
3989        for_each_node_state(nid, N_MEMORY)
3990                kswapd_run(nid);
3991        ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
3992                                        "mm/vmscan:online", kswapd_cpu_online,
3993                                        NULL);
3994        WARN_ON(ret < 0);
3995        return 0;
3996}
3997
3998module_init(kswapd_init)
3999
4000#ifdef CONFIG_NUMA
4001/*
4002 * Node reclaim mode
4003 *
4004 * If non-zero call node_reclaim when the number of free pages falls below
4005 * the watermarks.
4006 */
4007int node_reclaim_mode __read_mostly;
4008
4009#define RECLAIM_OFF 0
4010#define RECLAIM_ZONE (1<<0)     /* Run shrink_inactive_list on the zone */
4011#define RECLAIM_WRITE (1<<1)    /* Writeout pages during reclaim */
4012#define RECLAIM_UNMAP (1<<2)    /* Unmap pages during reclaim */
4013
4014/*
4015 * Priority for NODE_RECLAIM. This determines the fraction of pages
4016 * of a node considered for each zone_reclaim. 4 scans 1/16th of
4017 * a zone.
4018 */
4019#define NODE_RECLAIM_PRIORITY 4
4020
4021/*
4022 * Percentage of pages in a zone that must be unmapped for node_reclaim to
4023 * occur.
4024 */
4025int sysctl_min_unmapped_ratio = 1;
4026
4027/*
4028 * If the number of slab pages in a zone grows beyond this percentage then
4029 * slab reclaim needs to occur.
4030 */
4031int sysctl_min_slab_ratio = 5;
4032
4033static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat)
4034{
4035        unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED);
4036        unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) +
4037                node_page_state(pgdat, NR_ACTIVE_FILE);
4038
4039        /*
4040         * It's possible for there to be more file mapped pages than
4041         * accounted for by the pages on the file LRU lists because
4042         * tmpfs pages accounted for as ANON can also be FILE_MAPPED
4043         */
4044        return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
4045}
4046
4047/* Work out how many page cache pages we can reclaim in this reclaim_mode */
4048static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat)
4049{
4050        unsigned long nr_pagecache_reclaimable;
4051        unsigned long delta = 0;
4052
4053        /*
4054         * If RECLAIM_UNMAP is set, then all file pages are considered
4055         * potentially reclaimable. Otherwise, we have to worry about
4056         * pages like swapcache and node_unmapped_file_pages() provides
4057         * a better estimate
4058         */
4059        if (node_reclaim_mode & RECLAIM_UNMAP)
4060                nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES);
4061        else
4062                nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat);
4063
4064        /* If we can't clean pages, remove dirty pages from consideration */
4065        if (!(node_reclaim_mode & RECLAIM_WRITE))
4066                delta += node_page_state(pgdat, NR_FILE_DIRTY);
4067
4068        /* Watch for any possible underflows due to delta */
4069        if (unlikely(delta > nr_pagecache_reclaimable))
4070                delta = nr_pagecache_reclaimable;
4071
4072        return nr_pagecache_reclaimable - delta;
4073}
4074
4075/*
4076 * Try to free up some pages from this node through reclaim.
4077 */
4078static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
4079{
4080        /* Minimum pages needed in order to stay on node */
4081        const unsigned long nr_pages = 1 << order;
4082        struct task_struct *p = current;
4083        struct reclaim_state reclaim_state;
4084        unsigned int noreclaim_flag;
4085        struct scan_control sc = {
4086                .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
4087                .gfp_mask = current_gfp_context(gfp_mask),
4088                .order = order,
4089                .priority = NODE_RECLAIM_PRIORITY,
4090                .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
4091                .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
4092                .may_swap = 1,
4093                .reclaim_idx = gfp_zone(gfp_mask),
4094        };
4095
4096        trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order,
4097                                           sc.gfp_mask);
4098
4099        cond_resched();
4100        fs_reclaim_acquire(sc.gfp_mask);
4101        /*
4102         * We need to be able to allocate from the reserves for RECLAIM_UNMAP
4103         * and we also need to be able to write out pages for RECLAIM_WRITE
4104         * and RECLAIM_UNMAP.
4105         */
4106        noreclaim_flag = memalloc_noreclaim_save();
4107        p->flags |= PF_SWAPWRITE;
4108        reclaim_state.reclaimed_slab = 0;
4109        p->reclaim_state = &reclaim_state;
4110
4111        if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) {
4112                /*
4113                 * Free memory by calling shrink node with increasing
4114                 * priorities until we have enough memory freed.
4115                 */
4116                do {
4117                        shrink_node(pgdat, &sc);
4118                } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
4119        }
4120
4121        p->reclaim_state = NULL;
4122        current->flags &= ~PF_SWAPWRITE;
4123        memalloc_noreclaim_restore(noreclaim_flag);
4124        fs_reclaim_release(sc.gfp_mask);
4125
4126        trace_mm_vmscan_node_reclaim_end(sc.nr_reclaimed);
4127
4128        return sc.nr_reclaimed >= nr_pages;
4129}
4130
4131int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
4132{
4133        int ret;
4134
4135        /*
4136         * Node reclaim reclaims unmapped file backed pages and
4137         * slab pages if we are over the defined limits.
4138         *
4139         * A small portion of unmapped file backed pages is needed for
4140         * file I/O otherwise pages read by file I/O will be immediately
4141         * thrown out if the node is overallocated. So we do not reclaim
4142         * if less than a specified percentage of the node is used by
4143         * unmapped file backed pages.
4144         */
4145        if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages &&
4146            node_page_state(pgdat, NR_SLAB_RECLAIMABLE) <= pgdat->min_slab_pages)
4147                return NODE_RECLAIM_FULL;
4148
4149        /*
4150         * Do not scan if the allocation should not be delayed.
4151         */
4152        if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC))
4153                return NODE_RECLAIM_NOSCAN;
4154
4155        /*
4156         * Only run node reclaim on the local node or on nodes that do not
4157         * have associated processors. This will favor the local processor
4158         * over remote processors and spread off node memory allocations
4159         * as wide as possible.
4160         */
4161        if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id())
4162                return NODE_RECLAIM_NOSCAN;
4163
4164        if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags))
4165                return NODE_RECLAIM_NOSCAN;
4166
4167        ret = __node_reclaim(pgdat, gfp_mask, order);
4168        clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
4169
4170        if (!ret)
4171                count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
4172
4173        return ret;
4174}
4175#endif
4176
4177/*
4178 * page_evictable - test whether a page is evictable
4179 * @page: the page to test
4180 *
4181 * Test whether page is evictable--i.e., should be placed on active/inactive
4182 * lists vs unevictable list.
4183 *
4184 * Reasons page might not be evictable:
4185 * (1) page's mapping marked unevictable
4186 * (2) page is part of an mlocked VMA
4187 *
4188 */
4189int page_evictable(struct page *page)
4190{
4191        int ret;
4192
4193        /* Prevent address_space of inode and swap cache from being freed */
4194        rcu_read_lock();
4195        ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
4196        rcu_read_unlock();
4197        return ret;
4198}
4199
4200/**
4201 * check_move_unevictable_pages - check pages for evictability and move to
4202 * appropriate zone lru list
4203 * @pvec: pagevec with lru pages to check
4204 *
4205 * Checks pages for evictability, if an evictable page is in the unevictable
4206 * lru list, moves it to the appropriate evictable lru list. This function
4207 * should be only used for lru pages.
4208 */
4209void check_move_unevictable_pages(struct pagevec *pvec)
4210{
4211        struct lruvec *lruvec;
4212        struct pglist_data *pgdat = NULL;
4213        int pgscanned = 0;
4214        int pgrescued = 0;
4215        int i;
4216
4217        for (i = 0; i < pvec->nr; i++) {
4218                struct page *page = pvec->pages[i];
4219                struct pglist_data *pagepgdat = page_pgdat(page);
4220
4221                pgscanned++;
4222                if (pagepgdat != pgdat) {
4223                        if (pgdat)
4224                                spin_unlock_irq(&pgdat->lru_lock);
4225                        pgdat = pagepgdat;
4226                        spin_lock_irq(&pgdat->lru_lock);
4227                }
4228                lruvec = mem_cgroup_page_lruvec(page, pgdat);
4229
4230                if (!PageLRU(page) || !PageUnevictable(page))
4231                        continue;
4232
4233                if (page_evictable(page)) {
4234                        enum lru_list lru = page_lru_base_type(page);
4235
4236                        VM_BUG_ON_PAGE(PageActive(page), page);
4237                        ClearPageUnevictable(page);
4238                        del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
4239                        add_page_to_lru_list(page, lruvec, lru);
4240                        pgrescued++;
4241                }
4242        }
4243
4244        if (pgdat) {
4245                __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
4246                __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
4247                spin_unlock_irq(&pgdat->lru_lock);
4248        }
4249}
4250EXPORT_SYMBOL_GPL(check_move_unevictable_pages);
4251