linux/mm/workingset.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Workingset detection
   4 *
   5 * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner
   6 */
   7
   8#include <linux/memcontrol.h>
   9#include <linux/writeback.h>
  10#include <linux/shmem_fs.h>
  11#include <linux/pagemap.h>
  12#include <linux/atomic.h>
  13#include <linux/module.h>
  14#include <linux/swap.h>
  15#include <linux/dax.h>
  16#include <linux/fs.h>
  17#include <linux/mm.h>
  18
  19/*
  20 *              Double CLOCK lists
  21 *
  22 * Per node, two clock lists are maintained for file pages: the
  23 * inactive and the active list.  Freshly faulted pages start out at
  24 * the head of the inactive list and page reclaim scans pages from the
  25 * tail.  Pages that are accessed multiple times on the inactive list
  26 * are promoted to the active list, to protect them from reclaim,
  27 * whereas active pages are demoted to the inactive list when the
  28 * active list grows too big.
  29 *
  30 *   fault ------------------------+
  31 *                                 |
  32 *              +--------------+   |            +-------------+
  33 *   reclaim <- |   inactive   | <-+-- demotion |    active   | <--+
  34 *              +--------------+                +-------------+    |
  35 *                     |                                           |
  36 *                     +-------------- promotion ------------------+
  37 *
  38 *
  39 *              Access frequency and refault distance
  40 *
  41 * A workload is thrashing when its pages are frequently used but they
  42 * are evicted from the inactive list every time before another access
  43 * would have promoted them to the active list.
  44 *
  45 * In cases where the average access distance between thrashing pages
  46 * is bigger than the size of memory there is nothing that can be
  47 * done - the thrashing set could never fit into memory under any
  48 * circumstance.
  49 *
  50 * However, the average access distance could be bigger than the
  51 * inactive list, yet smaller than the size of memory.  In this case,
  52 * the set could fit into memory if it weren't for the currently
  53 * active pages - which may be used more, hopefully less frequently:
  54 *
  55 *      +-memory available to cache-+
  56 *      |                           |
  57 *      +-inactive------+-active----+
  58 *  a b | c d e f g h i | J K L M N |
  59 *      +---------------+-----------+
  60 *
  61 * It is prohibitively expensive to accurately track access frequency
  62 * of pages.  But a reasonable approximation can be made to measure
  63 * thrashing on the inactive list, after which refaulting pages can be
  64 * activated optimistically to compete with the existing active pages.
  65 *
  66 * Approximating inactive page access frequency - Observations:
  67 *
  68 * 1. When a page is accessed for the first time, it is added to the
  69 *    head of the inactive list, slides every existing inactive page
  70 *    towards the tail by one slot, and pushes the current tail page
  71 *    out of memory.
  72 *
  73 * 2. When a page is accessed for the second time, it is promoted to
  74 *    the active list, shrinking the inactive list by one slot.  This
  75 *    also slides all inactive pages that were faulted into the cache
  76 *    more recently than the activated page towards the tail of the
  77 *    inactive list.
  78 *
  79 * Thus:
  80 *
  81 * 1. The sum of evictions and activations between any two points in
  82 *    time indicate the minimum number of inactive pages accessed in
  83 *    between.
  84 *
  85 * 2. Moving one inactive page N page slots towards the tail of the
  86 *    list requires at least N inactive page accesses.
  87 *
  88 * Combining these:
  89 *
  90 * 1. When a page is finally evicted from memory, the number of
  91 *    inactive pages accessed while the page was in cache is at least
  92 *    the number of page slots on the inactive list.
  93 *
  94 * 2. In addition, measuring the sum of evictions and activations (E)
  95 *    at the time of a page's eviction, and comparing it to another
  96 *    reading (R) at the time the page faults back into memory tells
  97 *    the minimum number of accesses while the page was not cached.
  98 *    This is called the refault distance.
  99 *
 100 * Because the first access of the page was the fault and the second
 101 * access the refault, we combine the in-cache distance with the
 102 * out-of-cache distance to get the complete minimum access distance
 103 * of this page:
 104 *
 105 *      NR_inactive + (R - E)
 106 *
 107 * And knowing the minimum access distance of a page, we can easily
 108 * tell if the page would be able to stay in cache assuming all page
 109 * slots in the cache were available:
 110 *
 111 *   NR_inactive + (R - E) <= NR_inactive + NR_active
 112 *
 113 * which can be further simplified to
 114 *
 115 *   (R - E) <= NR_active
 116 *
 117 * Put into words, the refault distance (out-of-cache) can be seen as
 118 * a deficit in inactive list space (in-cache).  If the inactive list
 119 * had (R - E) more page slots, the page would not have been evicted
 120 * in between accesses, but activated instead.  And on a full system,
 121 * the only thing eating into inactive list space is active pages.
 122 *
 123 *
 124 *              Refaulting inactive pages
 125 *
 126 * All that is known about the active list is that the pages have been
 127 * accessed more than once in the past.  This means that at any given
 128 * time there is actually a good chance that pages on the active list
 129 * are no longer in active use.
 130 *
 131 * So when a refault distance of (R - E) is observed and there are at
 132 * least (R - E) active pages, the refaulting page is activated
 133 * optimistically in the hope that (R - E) active pages are actually
 134 * used less frequently than the refaulting page - or even not used at
 135 * all anymore.
 136 *
 137 * That means if inactive cache is refaulting with a suitable refault
 138 * distance, we assume the cache workingset is transitioning and put
 139 * pressure on the current active list.
 140 *
 141 * If this is wrong and demotion kicks in, the pages which are truly
 142 * used more frequently will be reactivated while the less frequently
 143 * used once will be evicted from memory.
 144 *
 145 * But if this is right, the stale pages will be pushed out of memory
 146 * and the used pages get to stay in cache.
 147 *
 148 *              Refaulting active pages
 149 *
 150 * If on the other hand the refaulting pages have recently been
 151 * deactivated, it means that the active list is no longer protecting
 152 * actively used cache from reclaim. The cache is NOT transitioning to
 153 * a different workingset; the existing workingset is thrashing in the
 154 * space allocated to the page cache.
 155 *
 156 *
 157 *              Implementation
 158 *
 159 * For each node's file LRU lists, a counter for inactive evictions
 160 * and activations is maintained (node->inactive_age).
 161 *
 162 * On eviction, a snapshot of this counter (along with some bits to
 163 * identify the node) is stored in the now empty page cache
 164 * slot of the evicted page.  This is called a shadow entry.
 165 *
 166 * On cache misses for which there are shadow entries, an eligible
 167 * refault distance will immediately activate the refaulting page.
 168 */
 169
 170#define EVICTION_SHIFT  ((BITS_PER_LONG - BITS_PER_XA_VALUE) +  \
 171                         1 + NODES_SHIFT + MEM_CGROUP_ID_SHIFT)
 172#define EVICTION_MASK   (~0UL >> EVICTION_SHIFT)
 173
 174/*
 175 * Eviction timestamps need to be able to cover the full range of
 176 * actionable refaults. However, bits are tight in the xarray
 177 * entry, and after storing the identifier for the lruvec there might
 178 * not be enough left to represent every single actionable refault. In
 179 * that case, we have to sacrifice granularity for distance, and group
 180 * evictions into coarser buckets by shaving off lower timestamp bits.
 181 */
 182static unsigned int bucket_order __read_mostly;
 183
 184static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction,
 185                         bool workingset)
 186{
 187        eviction >>= bucket_order;
 188        eviction &= EVICTION_MASK;
 189        eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
 190        eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
 191        eviction = (eviction << 1) | workingset;
 192
 193        return xa_mk_value(eviction);
 194}
 195
 196static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
 197                          unsigned long *evictionp, bool *workingsetp)
 198{
 199        unsigned long entry = xa_to_value(shadow);
 200        int memcgid, nid;
 201        bool workingset;
 202
 203        workingset = entry & 1;
 204        entry >>= 1;
 205        nid = entry & ((1UL << NODES_SHIFT) - 1);
 206        entry >>= NODES_SHIFT;
 207        memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
 208        entry >>= MEM_CGROUP_ID_SHIFT;
 209
 210        *memcgidp = memcgid;
 211        *pgdat = NODE_DATA(nid);
 212        *evictionp = entry << bucket_order;
 213        *workingsetp = workingset;
 214}
 215
 216/**
 217 * workingset_eviction - note the eviction of a page from memory
 218 * @page: the page being evicted
 219 *
 220 * Returns a shadow entry to be stored in @page->mapping->i_pages in place
 221 * of the evicted @page so that a later refault can be detected.
 222 */
 223void *workingset_eviction(struct page *page)
 224{
 225        struct pglist_data *pgdat = page_pgdat(page);
 226        struct mem_cgroup *memcg = page_memcg(page);
 227        int memcgid = mem_cgroup_id(memcg);
 228        unsigned long eviction;
 229        struct lruvec *lruvec;
 230
 231        /* Page is fully exclusive and pins page->mem_cgroup */
 232        VM_BUG_ON_PAGE(PageLRU(page), page);
 233        VM_BUG_ON_PAGE(page_count(page), page);
 234        VM_BUG_ON_PAGE(!PageLocked(page), page);
 235
 236        lruvec = mem_cgroup_lruvec(pgdat, memcg);
 237        eviction = atomic_long_inc_return(&lruvec->inactive_age);
 238        return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page));
 239}
 240
 241/**
 242 * workingset_refault - evaluate the refault of a previously evicted page
 243 * @page: the freshly allocated replacement page
 244 * @shadow: shadow entry of the evicted page
 245 *
 246 * Calculates and evaluates the refault distance of the previously
 247 * evicted page in the context of the node it was allocated in.
 248 */
 249void workingset_refault(struct page *page, void *shadow)
 250{
 251        unsigned long refault_distance;
 252        struct pglist_data *pgdat;
 253        unsigned long active_file;
 254        struct mem_cgroup *memcg;
 255        unsigned long eviction;
 256        struct lruvec *lruvec;
 257        unsigned long refault;
 258        bool workingset;
 259        int memcgid;
 260
 261        unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset);
 262
 263        rcu_read_lock();
 264        /*
 265         * Look up the memcg associated with the stored ID. It might
 266         * have been deleted since the page's eviction.
 267         *
 268         * Note that in rare events the ID could have been recycled
 269         * for a new cgroup that refaults a shared page. This is
 270         * impossible to tell from the available data. However, this
 271         * should be a rare and limited disturbance, and activations
 272         * are always speculative anyway. Ultimately, it's the aging
 273         * algorithm's job to shake out the minimum access frequency
 274         * for the active cache.
 275         *
 276         * XXX: On !CONFIG_MEMCG, this will always return NULL; it
 277         * would be better if the root_mem_cgroup existed in all
 278         * configurations instead.
 279         */
 280        memcg = mem_cgroup_from_id(memcgid);
 281        if (!mem_cgroup_disabled() && !memcg)
 282                goto out;
 283        lruvec = mem_cgroup_lruvec(pgdat, memcg);
 284        refault = atomic_long_read(&lruvec->inactive_age);
 285        active_file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES);
 286
 287        /*
 288         * Calculate the refault distance
 289         *
 290         * The unsigned subtraction here gives an accurate distance
 291         * across inactive_age overflows in most cases. There is a
 292         * special case: usually, shadow entries have a short lifetime
 293         * and are either refaulted or reclaimed along with the inode
 294         * before they get too old.  But it is not impossible for the
 295         * inactive_age to lap a shadow entry in the field, which can
 296         * then result in a false small refault distance, leading to a
 297         * false activation should this old entry actually refault
 298         * again.  However, earlier kernels used to deactivate
 299         * unconditionally with *every* reclaim invocation for the
 300         * longest time, so the occasional inappropriate activation
 301         * leading to pressure on the active list is not a problem.
 302         */
 303        refault_distance = (refault - eviction) & EVICTION_MASK;
 304
 305        inc_lruvec_state(lruvec, WORKINGSET_REFAULT);
 306
 307        /*
 308         * Compare the distance to the existing workingset size. We
 309         * don't act on pages that couldn't stay resident even if all
 310         * the memory was available to the page cache.
 311         */
 312        if (refault_distance > active_file)
 313                goto out;
 314
 315        SetPageActive(page);
 316        atomic_long_inc(&lruvec->inactive_age);
 317        inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE);
 318
 319        /* Page was active prior to eviction */
 320        if (workingset) {
 321                SetPageWorkingset(page);
 322                inc_lruvec_state(lruvec, WORKINGSET_RESTORE);
 323        }
 324out:
 325        rcu_read_unlock();
 326}
 327
 328/**
 329 * workingset_activation - note a page activation
 330 * @page: page that is being activated
 331 */
 332void workingset_activation(struct page *page)
 333{
 334        struct mem_cgroup *memcg;
 335        struct lruvec *lruvec;
 336
 337        rcu_read_lock();
 338        /*
 339         * Filter non-memcg pages here, e.g. unmap can call
 340         * mark_page_accessed() on VDSO pages.
 341         *
 342         * XXX: See workingset_refault() - this should return
 343         * root_mem_cgroup even for !CONFIG_MEMCG.
 344         */
 345        memcg = page_memcg_rcu(page);
 346        if (!mem_cgroup_disabled() && !memcg)
 347                goto out;
 348        lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
 349        atomic_long_inc(&lruvec->inactive_age);
 350out:
 351        rcu_read_unlock();
 352}
 353
 354/*
 355 * Shadow entries reflect the share of the working set that does not
 356 * fit into memory, so their number depends on the access pattern of
 357 * the workload.  In most cases, they will refault or get reclaimed
 358 * along with the inode, but a (malicious) workload that streams
 359 * through files with a total size several times that of available
 360 * memory, while preventing the inodes from being reclaimed, can
 361 * create excessive amounts of shadow nodes.  To keep a lid on this,
 362 * track shadow nodes and reclaim them when they grow way past the
 363 * point where they would still be useful.
 364 */
 365
 366static struct list_lru shadow_nodes;
 367
 368void workingset_update_node(struct xa_node *node)
 369{
 370        /*
 371         * Track non-empty nodes that contain only shadow entries;
 372         * unlink those that contain pages or are being freed.
 373         *
 374         * Avoid acquiring the list_lru lock when the nodes are
 375         * already where they should be. The list_empty() test is safe
 376         * as node->private_list is protected by the i_pages lock.
 377         */
 378        VM_WARN_ON_ONCE(!irqs_disabled());  /* For __inc_lruvec_page_state */
 379
 380        if (node->count && node->count == node->nr_values) {
 381                if (list_empty(&node->private_list)) {
 382                        list_lru_add(&shadow_nodes, &node->private_list);
 383                        __inc_lruvec_slab_state(node, WORKINGSET_NODES);
 384                }
 385        } else {
 386                if (!list_empty(&node->private_list)) {
 387                        list_lru_del(&shadow_nodes, &node->private_list);
 388                        __dec_lruvec_slab_state(node, WORKINGSET_NODES);
 389                }
 390        }
 391}
 392
 393static unsigned long count_shadow_nodes(struct shrinker *shrinker,
 394                                        struct shrink_control *sc)
 395{
 396        unsigned long max_nodes;
 397        unsigned long nodes;
 398        unsigned long pages;
 399
 400        nodes = list_lru_shrink_count(&shadow_nodes, sc);
 401
 402        /*
 403         * Approximate a reasonable limit for the nodes
 404         * containing shadow entries. We don't need to keep more
 405         * shadow entries than possible pages on the active list,
 406         * since refault distances bigger than that are dismissed.
 407         *
 408         * The size of the active list converges toward 100% of
 409         * overall page cache as memory grows, with only a tiny
 410         * inactive list. Assume the total cache size for that.
 411         *
 412         * Nodes might be sparsely populated, with only one shadow
 413         * entry in the extreme case. Obviously, we cannot keep one
 414         * node for every eligible shadow entry, so compromise on a
 415         * worst-case density of 1/8th. Below that, not all eligible
 416         * refaults can be detected anymore.
 417         *
 418         * On 64-bit with 7 xa_nodes per page and 64 slots
 419         * each, this will reclaim shadow entries when they consume
 420         * ~1.8% of available memory:
 421         *
 422         * PAGE_SIZE / xa_nodes / node_entries * 8 / PAGE_SIZE
 423         */
 424#ifdef CONFIG_MEMCG
 425        if (sc->memcg) {
 426                struct lruvec *lruvec;
 427                int i;
 428
 429                lruvec = mem_cgroup_lruvec(NODE_DATA(sc->nid), sc->memcg);
 430                for (pages = 0, i = 0; i < NR_LRU_LISTS; i++)
 431                        pages += lruvec_page_state_local(lruvec,
 432                                                         NR_LRU_BASE + i);
 433                pages += lruvec_page_state_local(lruvec, NR_SLAB_RECLAIMABLE);
 434                pages += lruvec_page_state_local(lruvec, NR_SLAB_UNRECLAIMABLE);
 435        } else
 436#endif
 437                pages = node_present_pages(sc->nid);
 438
 439        max_nodes = pages >> (XA_CHUNK_SHIFT - 3);
 440
 441        if (!nodes)
 442                return SHRINK_EMPTY;
 443
 444        if (nodes <= max_nodes)
 445                return 0;
 446        return nodes - max_nodes;
 447}
 448
 449static enum lru_status shadow_lru_isolate(struct list_head *item,
 450                                          struct list_lru_one *lru,
 451                                          spinlock_t *lru_lock,
 452                                          void *arg) __must_hold(lru_lock)
 453{
 454        struct xa_node *node = container_of(item, struct xa_node, private_list);
 455        XA_STATE(xas, node->array, 0);
 456        struct address_space *mapping;
 457        int ret;
 458
 459        /*
 460         * Page cache insertions and deletions synchroneously maintain
 461         * the shadow node LRU under the i_pages lock and the
 462         * lru_lock.  Because the page cache tree is emptied before
 463         * the inode can be destroyed, holding the lru_lock pins any
 464         * address_space that has nodes on the LRU.
 465         *
 466         * We can then safely transition to the i_pages lock to
 467         * pin only the address_space of the particular node we want
 468         * to reclaim, take the node off-LRU, and drop the lru_lock.
 469         */
 470
 471        mapping = container_of(node->array, struct address_space, i_pages);
 472
 473        /* Coming from the list, invert the lock order */
 474        if (!xa_trylock(&mapping->i_pages)) {
 475                spin_unlock_irq(lru_lock);
 476                ret = LRU_RETRY;
 477                goto out;
 478        }
 479
 480        list_lru_isolate(lru, item);
 481        __dec_lruvec_slab_state(node, WORKINGSET_NODES);
 482
 483        spin_unlock(lru_lock);
 484
 485        /*
 486         * The nodes should only contain one or more shadow entries,
 487         * no pages, so we expect to be able to remove them all and
 488         * delete and free the empty node afterwards.
 489         */
 490        if (WARN_ON_ONCE(!node->nr_values))
 491                goto out_invalid;
 492        if (WARN_ON_ONCE(node->count != node->nr_values))
 493                goto out_invalid;
 494        mapping->nrexceptional -= node->nr_values;
 495        xas.xa_node = xa_parent_locked(&mapping->i_pages, node);
 496        xas.xa_offset = node->offset;
 497        xas.xa_shift = node->shift + XA_CHUNK_SHIFT;
 498        xas_set_update(&xas, workingset_update_node);
 499        /*
 500         * We could store a shadow entry here which was the minimum of the
 501         * shadow entries we were tracking ...
 502         */
 503        xas_store(&xas, NULL);
 504        __inc_lruvec_slab_state(node, WORKINGSET_NODERECLAIM);
 505
 506out_invalid:
 507        xa_unlock_irq(&mapping->i_pages);
 508        ret = LRU_REMOVED_RETRY;
 509out:
 510        cond_resched();
 511        spin_lock_irq(lru_lock);
 512        return ret;
 513}
 514
 515static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
 516                                       struct shrink_control *sc)
 517{
 518        /* list_lru lock nests inside the IRQ-safe i_pages lock */
 519        return list_lru_shrink_walk_irq(&shadow_nodes, sc, shadow_lru_isolate,
 520                                        NULL);
 521}
 522
 523static struct shrinker workingset_shadow_shrinker = {
 524        .count_objects = count_shadow_nodes,
 525        .scan_objects = scan_shadow_nodes,
 526        .seeks = 0, /* ->count reports only fully expendable nodes */
 527        .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE,
 528};
 529
 530/*
 531 * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe
 532 * i_pages lock.
 533 */
 534static struct lock_class_key shadow_nodes_key;
 535
 536static int __init workingset_init(void)
 537{
 538        unsigned int timestamp_bits;
 539        unsigned int max_order;
 540        int ret;
 541
 542        BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT);
 543        /*
 544         * Calculate the eviction bucket size to cover the longest
 545         * actionable refault distance, which is currently half of
 546         * memory (totalram_pages/2). However, memory hotplug may add
 547         * some more pages at runtime, so keep working with up to
 548         * double the initial memory by using totalram_pages as-is.
 549         */
 550        timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT;
 551        max_order = fls_long(totalram_pages() - 1);
 552        if (max_order > timestamp_bits)
 553                bucket_order = max_order - timestamp_bits;
 554        pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
 555               timestamp_bits, max_order, bucket_order);
 556
 557        ret = prealloc_shrinker(&workingset_shadow_shrinker);
 558        if (ret)
 559                goto err;
 560        ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key,
 561                              &workingset_shadow_shrinker);
 562        if (ret)
 563                goto err_list_lru;
 564        register_shrinker_prepared(&workingset_shadow_shrinker);
 565        return 0;
 566err_list_lru:
 567        free_prealloced_shrinker(&workingset_shadow_shrinker);
 568err:
 569        return ret;
 570}
 571module_init(workingset_init);
 572