linux/mm/workingset.c
<<
>>
Prefs
   1/*
   2 * Workingset detection
   3 *
   4 * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner
   5 */
   6
   7#include <linux/memcontrol.h>
   8#include <linux/writeback.h>
   9#include <linux/pagemap.h>
  10#include <linux/atomic.h>
  11#include <linux/module.h>
  12#include <linux/swap.h>
  13#include <linux/fs.h>
  14#include <linux/mm.h>
  15
  16/*
  17 *              Double CLOCK lists
  18 *
  19 * Per node, two clock lists are maintained for file pages: the
  20 * inactive and the active list.  Freshly faulted pages start out at
  21 * the head of the inactive list and page reclaim scans pages from the
  22 * tail.  Pages that are accessed multiple times on the inactive list
  23 * are promoted to the active list, to protect them from reclaim,
  24 * whereas active pages are demoted to the inactive list when the
  25 * active list grows too big.
  26 *
  27 *   fault ------------------------+
  28 *                                 |
  29 *              +--------------+   |            +-------------+
  30 *   reclaim <- |   inactive   | <-+-- demotion |    active   | <--+
  31 *              +--------------+                +-------------+    |
  32 *                     |                                           |
  33 *                     +-------------- promotion ------------------+
  34 *
  35 *
  36 *              Access frequency and refault distance
  37 *
  38 * A workload is thrashing when its pages are frequently used but they
  39 * are evicted from the inactive list every time before another access
  40 * would have promoted them to the active list.
  41 *
  42 * In cases where the average access distance between thrashing pages
  43 * is bigger than the size of memory there is nothing that can be
  44 * done - the thrashing set could never fit into memory under any
  45 * circumstance.
  46 *
  47 * However, the average access distance could be bigger than the
  48 * inactive list, yet smaller than the size of memory.  In this case,
  49 * the set could fit into memory if it weren't for the currently
  50 * active pages - which may be used more, hopefully less frequently:
  51 *
  52 *      +-memory available to cache-+
  53 *      |                           |
  54 *      +-inactive------+-active----+
  55 *  a b | c d e f g h i | J K L M N |
  56 *      +---------------+-----------+
  57 *
  58 * It is prohibitively expensive to accurately track access frequency
  59 * of pages.  But a reasonable approximation can be made to measure
  60 * thrashing on the inactive list, after which refaulting pages can be
  61 * activated optimistically to compete with the existing active pages.
  62 *
  63 * Approximating inactive page access frequency - Observations:
  64 *
  65 * 1. When a page is accessed for the first time, it is added to the
  66 *    head of the inactive list, slides every existing inactive page
  67 *    towards the tail by one slot, and pushes the current tail page
  68 *    out of memory.
  69 *
  70 * 2. When a page is accessed for the second time, it is promoted to
  71 *    the active list, shrinking the inactive list by one slot.  This
  72 *    also slides all inactive pages that were faulted into the cache
  73 *    more recently than the activated page towards the tail of the
  74 *    inactive list.
  75 *
  76 * Thus:
  77 *
  78 * 1. The sum of evictions and activations between any two points in
  79 *    time indicate the minimum number of inactive pages accessed in
  80 *    between.
  81 *
  82 * 2. Moving one inactive page N page slots towards the tail of the
  83 *    list requires at least N inactive page accesses.
  84 *
  85 * Combining these:
  86 *
  87 * 1. When a page is finally evicted from memory, the number of
  88 *    inactive pages accessed while the page was in cache is at least
  89 *    the number of page slots on the inactive list.
  90 *
  91 * 2. In addition, measuring the sum of evictions and activations (E)
  92 *    at the time of a page's eviction, and comparing it to another
  93 *    reading (R) at the time the page faults back into memory tells
  94 *    the minimum number of accesses while the page was not cached.
  95 *    This is called the refault distance.
  96 *
  97 * Because the first access of the page was the fault and the second
  98 * access the refault, we combine the in-cache distance with the
  99 * out-of-cache distance to get the complete minimum access distance
 100 * of this page:
 101 *
 102 *      NR_inactive + (R - E)
 103 *
 104 * And knowing the minimum access distance of a page, we can easily
 105 * tell if the page would be able to stay in cache assuming all page
 106 * slots in the cache were available:
 107 *
 108 *   NR_inactive + (R - E) <= NR_inactive + NR_active
 109 *
 110 * which can be further simplified to
 111 *
 112 *   (R - E) <= NR_active
 113 *
 114 * Put into words, the refault distance (out-of-cache) can be seen as
 115 * a deficit in inactive list space (in-cache).  If the inactive list
 116 * had (R - E) more page slots, the page would not have been evicted
 117 * in between accesses, but activated instead.  And on a full system,
 118 * the only thing eating into inactive list space is active pages.
 119 *
 120 *
 121 *              Activating refaulting pages
 122 *
 123 * All that is known about the active list is that the pages have been
 124 * accessed more than once in the past.  This means that at any given
 125 * time there is actually a good chance that pages on the active list
 126 * are no longer in active use.
 127 *
 128 * So when a refault distance of (R - E) is observed and there are at
 129 * least (R - E) active pages, the refaulting page is activated
 130 * optimistically in the hope that (R - E) active pages are actually
 131 * used less frequently than the refaulting page - or even not used at
 132 * all anymore.
 133 *
 134 * If this is wrong and demotion kicks in, the pages which are truly
 135 * used more frequently will be reactivated while the less frequently
 136 * used once will be evicted from memory.
 137 *
 138 * But if this is right, the stale pages will be pushed out of memory
 139 * and the used pages get to stay in cache.
 140 *
 141 *
 142 *              Implementation
 143 *
 144 * For each node's file LRU lists, a counter for inactive evictions
 145 * and activations is maintained (node->inactive_age).
 146 *
 147 * On eviction, a snapshot of this counter (along with some bits to
 148 * identify the node) is stored in the now empty page cache radix tree
 149 * slot of the evicted page.  This is called a shadow entry.
 150 *
 151 * On cache misses for which there are shadow entries, an eligible
 152 * refault distance will immediately activate the refaulting page.
 153 */
 154
 155#define EVICTION_SHIFT  (RADIX_TREE_EXCEPTIONAL_ENTRY + \
 156                         NODES_SHIFT +  \
 157                         MEM_CGROUP_ID_SHIFT)
 158#define EVICTION_MASK   (~0UL >> EVICTION_SHIFT)
 159
 160/*
 161 * Eviction timestamps need to be able to cover the full range of
 162 * actionable refaults. However, bits are tight in the radix tree
 163 * entry, and after storing the identifier for the lruvec there might
 164 * not be enough left to represent every single actionable refault. In
 165 * that case, we have to sacrifice granularity for distance, and group
 166 * evictions into coarser buckets by shaving off lower timestamp bits.
 167 */
 168static unsigned int bucket_order __read_mostly;
 169
 170static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction)
 171{
 172        eviction >>= bucket_order;
 173        eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
 174        eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
 175        eviction = (eviction << RADIX_TREE_EXCEPTIONAL_SHIFT);
 176
 177        return (void *)(eviction | RADIX_TREE_EXCEPTIONAL_ENTRY);
 178}
 179
 180static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
 181                          unsigned long *evictionp)
 182{
 183        unsigned long entry = (unsigned long)shadow;
 184        int memcgid, nid;
 185
 186        entry >>= RADIX_TREE_EXCEPTIONAL_SHIFT;
 187        nid = entry & ((1UL << NODES_SHIFT) - 1);
 188        entry >>= NODES_SHIFT;
 189        memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
 190        entry >>= MEM_CGROUP_ID_SHIFT;
 191
 192        *memcgidp = memcgid;
 193        *pgdat = NODE_DATA(nid);
 194        *evictionp = entry << bucket_order;
 195}
 196
 197/**
 198 * workingset_eviction - note the eviction of a page from memory
 199 * @mapping: address space the page was backing
 200 * @page: the page being evicted
 201 *
 202 * Returns a shadow entry to be stored in @mapping->page_tree in place
 203 * of the evicted @page so that a later refault can be detected.
 204 */
 205void *workingset_eviction(struct address_space *mapping, struct page *page)
 206{
 207        struct mem_cgroup *memcg = page_memcg(page);
 208        struct pglist_data *pgdat = page_pgdat(page);
 209        int memcgid = mem_cgroup_id(memcg);
 210        unsigned long eviction;
 211        struct lruvec *lruvec;
 212
 213        /* Page is fully exclusive and pins page->mem_cgroup */
 214        VM_BUG_ON_PAGE(PageLRU(page), page);
 215        VM_BUG_ON_PAGE(page_count(page), page);
 216        VM_BUG_ON_PAGE(!PageLocked(page), page);
 217
 218        lruvec = mem_cgroup_lruvec(pgdat, memcg);
 219        eviction = atomic_long_inc_return(&lruvec->inactive_age);
 220        return pack_shadow(memcgid, pgdat, eviction);
 221}
 222
 223/**
 224 * workingset_refault - evaluate the refault of a previously evicted page
 225 * @shadow: shadow entry of the evicted page
 226 *
 227 * Calculates and evaluates the refault distance of the previously
 228 * evicted page in the context of the node it was allocated in.
 229 *
 230 * Returns %true if the page should be activated, %false otherwise.
 231 */
 232bool workingset_refault(void *shadow)
 233{
 234        unsigned long refault_distance;
 235        unsigned long active_file;
 236        struct mem_cgroup *memcg;
 237        unsigned long eviction;
 238        struct lruvec *lruvec;
 239        unsigned long refault;
 240        struct pglist_data *pgdat;
 241        int memcgid;
 242
 243        unpack_shadow(shadow, &memcgid, &pgdat, &eviction);
 244
 245        rcu_read_lock();
 246        /*
 247         * Look up the memcg associated with the stored ID. It might
 248         * have been deleted since the page's eviction.
 249         *
 250         * Note that in rare events the ID could have been recycled
 251         * for a new cgroup that refaults a shared page. This is
 252         * impossible to tell from the available data. However, this
 253         * should be a rare and limited disturbance, and activations
 254         * are always speculative anyway. Ultimately, it's the aging
 255         * algorithm's job to shake out the minimum access frequency
 256         * for the active cache.
 257         *
 258         * XXX: On !CONFIG_MEMCG, this will always return NULL; it
 259         * would be better if the root_mem_cgroup existed in all
 260         * configurations instead.
 261         */
 262        memcg = mem_cgroup_from_id(memcgid);
 263        if (!mem_cgroup_disabled() && !memcg) {
 264                rcu_read_unlock();
 265                return false;
 266        }
 267        lruvec = mem_cgroup_lruvec(pgdat, memcg);
 268        refault = atomic_long_read(&lruvec->inactive_age);
 269        active_file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE);
 270        rcu_read_unlock();
 271
 272        /*
 273         * The unsigned subtraction here gives an accurate distance
 274         * across inactive_age overflows in most cases.
 275         *
 276         * There is a special case: usually, shadow entries have a
 277         * short lifetime and are either refaulted or reclaimed along
 278         * with the inode before they get too old.  But it is not
 279         * impossible for the inactive_age to lap a shadow entry in
 280         * the field, which can then can result in a false small
 281         * refault distance, leading to a false activation should this
 282         * old entry actually refault again.  However, earlier kernels
 283         * used to deactivate unconditionally with *every* reclaim
 284         * invocation for the longest time, so the occasional
 285         * inappropriate activation leading to pressure on the active
 286         * list is not a problem.
 287         */
 288        refault_distance = (refault - eviction) & EVICTION_MASK;
 289
 290        inc_node_state(pgdat, WORKINGSET_REFAULT);
 291
 292        if (refault_distance <= active_file) {
 293                inc_node_state(pgdat, WORKINGSET_ACTIVATE);
 294                return true;
 295        }
 296        return false;
 297}
 298
 299/**
 300 * workingset_activation - note a page activation
 301 * @page: page that is being activated
 302 */
 303void workingset_activation(struct page *page)
 304{
 305        struct mem_cgroup *memcg;
 306        struct lruvec *lruvec;
 307
 308        rcu_read_lock();
 309        /*
 310         * Filter non-memcg pages here, e.g. unmap can call
 311         * mark_page_accessed() on VDSO pages.
 312         *
 313         * XXX: See workingset_refault() - this should return
 314         * root_mem_cgroup even for !CONFIG_MEMCG.
 315         */
 316        memcg = page_memcg_rcu(page);
 317        if (!mem_cgroup_disabled() && !memcg)
 318                goto out;
 319        lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
 320        atomic_long_inc(&lruvec->inactive_age);
 321out:
 322        rcu_read_unlock();
 323}
 324
 325/*
 326 * Shadow entries reflect the share of the working set that does not
 327 * fit into memory, so their number depends on the access pattern of
 328 * the workload.  In most cases, they will refault or get reclaimed
 329 * along with the inode, but a (malicious) workload that streams
 330 * through files with a total size several times that of available
 331 * memory, while preventing the inodes from being reclaimed, can
 332 * create excessive amounts of shadow nodes.  To keep a lid on this,
 333 * track shadow nodes and reclaim them when they grow way past the
 334 * point where they would still be useful.
 335 */
 336
 337struct list_lru workingset_shadow_nodes;
 338
 339static unsigned long count_shadow_nodes(struct shrinker *shrinker,
 340                                        struct shrink_control *sc)
 341{
 342        unsigned long shadow_nodes;
 343        unsigned long max_nodes;
 344        unsigned long pages;
 345
 346        /* list_lru lock nests inside IRQ-safe mapping->tree_lock */
 347        local_irq_disable();
 348        shadow_nodes = list_lru_shrink_count(&workingset_shadow_nodes, sc);
 349        local_irq_enable();
 350
 351        if (sc->memcg) {
 352                pages = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
 353                                                     LRU_ALL_FILE);
 354        } else {
 355                pages = node_page_state(NODE_DATA(sc->nid), NR_ACTIVE_FILE) +
 356                        node_page_state(NODE_DATA(sc->nid), NR_INACTIVE_FILE);
 357        }
 358
 359        /*
 360         * Active cache pages are limited to 50% of memory, and shadow
 361         * entries that represent a refault distance bigger than that
 362         * do not have any effect.  Limit the number of shadow nodes
 363         * such that shadow entries do not exceed the number of active
 364         * cache pages, assuming a worst-case node population density
 365         * of 1/8th on average.
 366         *
 367         * On 64-bit with 7 radix_tree_nodes per page and 64 slots
 368         * each, this will reclaim shadow entries when they consume
 369         * ~2% of available memory:
 370         *
 371         * PAGE_SIZE / radix_tree_nodes / node_entries / PAGE_SIZE
 372         */
 373        max_nodes = pages >> (1 + RADIX_TREE_MAP_SHIFT - 3);
 374
 375        if (shadow_nodes <= max_nodes)
 376                return 0;
 377
 378        return shadow_nodes - max_nodes;
 379}
 380
 381static enum lru_status shadow_lru_isolate(struct list_head *item,
 382                                          struct list_lru_one *lru,
 383                                          spinlock_t *lru_lock,
 384                                          void *arg)
 385{
 386        struct address_space *mapping;
 387        struct radix_tree_node *node;
 388        unsigned int i;
 389        int ret;
 390
 391        /*
 392         * Page cache insertions and deletions synchroneously maintain
 393         * the shadow node LRU under the mapping->tree_lock and the
 394         * lru_lock.  Because the page cache tree is emptied before
 395         * the inode can be destroyed, holding the lru_lock pins any
 396         * address_space that has radix tree nodes on the LRU.
 397         *
 398         * We can then safely transition to the mapping->tree_lock to
 399         * pin only the address_space of the particular node we want
 400         * to reclaim, take the node off-LRU, and drop the lru_lock.
 401         */
 402
 403        node = container_of(item, struct radix_tree_node, private_list);
 404        mapping = node->private_data;
 405
 406        /* Coming from the list, invert the lock order */
 407        if (!spin_trylock(&mapping->tree_lock)) {
 408                spin_unlock(lru_lock);
 409                ret = LRU_RETRY;
 410                goto out;
 411        }
 412
 413        list_lru_isolate(lru, item);
 414        spin_unlock(lru_lock);
 415
 416        /*
 417         * The nodes should only contain one or more shadow entries,
 418         * no pages, so we expect to be able to remove them all and
 419         * delete and free the empty node afterwards.
 420         */
 421        BUG_ON(!workingset_node_shadows(node));
 422        BUG_ON(workingset_node_pages(node));
 423
 424        for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
 425                if (node->slots[i]) {
 426                        BUG_ON(!radix_tree_exceptional_entry(node->slots[i]));
 427                        node->slots[i] = NULL;
 428                        workingset_node_shadows_dec(node);
 429                        BUG_ON(!mapping->nrexceptional);
 430                        mapping->nrexceptional--;
 431                }
 432        }
 433        BUG_ON(workingset_node_shadows(node));
 434        inc_node_state(page_pgdat(virt_to_page(node)), WORKINGSET_NODERECLAIM);
 435        if (!__radix_tree_delete_node(&mapping->page_tree, node))
 436                BUG();
 437
 438        spin_unlock(&mapping->tree_lock);
 439        ret = LRU_REMOVED_RETRY;
 440out:
 441        local_irq_enable();
 442        cond_resched();
 443        local_irq_disable();
 444        spin_lock(lru_lock);
 445        return ret;
 446}
 447
 448static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
 449                                       struct shrink_control *sc)
 450{
 451        unsigned long ret;
 452
 453        /* list_lru lock nests inside IRQ-safe mapping->tree_lock */
 454        local_irq_disable();
 455        ret =  list_lru_shrink_walk(&workingset_shadow_nodes, sc,
 456                                    shadow_lru_isolate, NULL);
 457        local_irq_enable();
 458        return ret;
 459}
 460
 461static struct shrinker workingset_shadow_shrinker = {
 462        .count_objects = count_shadow_nodes,
 463        .scan_objects = scan_shadow_nodes,
 464        .seeks = DEFAULT_SEEKS,
 465        .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE,
 466};
 467
 468/*
 469 * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe
 470 * mapping->tree_lock.
 471 */
 472static struct lock_class_key shadow_nodes_key;
 473
 474static int __init workingset_init(void)
 475{
 476        unsigned int timestamp_bits;
 477        unsigned int max_order;
 478        int ret;
 479
 480        BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT);
 481        /*
 482         * Calculate the eviction bucket size to cover the longest
 483         * actionable refault distance, which is currently half of
 484         * memory (totalram_pages/2). However, memory hotplug may add
 485         * some more pages at runtime, so keep working with up to
 486         * double the initial memory by using totalram_pages as-is.
 487         */
 488        timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT;
 489        max_order = fls_long(totalram_pages - 1);
 490        if (max_order > timestamp_bits)
 491                bucket_order = max_order - timestamp_bits;
 492        pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
 493               timestamp_bits, max_order, bucket_order);
 494
 495        ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key);
 496        if (ret)
 497                goto err;
 498        ret = register_shrinker(&workingset_shadow_shrinker);
 499        if (ret)
 500                goto err_list_lru;
 501        return 0;
 502err_list_lru:
 503        list_lru_destroy(&workingset_shadow_nodes);
 504err:
 505        return ret;
 506}
 507module_init(workingset_init);
 508