linux/include/linux/list_lru.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
   3 * Authors: David Chinner and Glauber Costa
   4 *
   5 * Generic LRU infrastructure
   6 */
   7#ifndef _LRU_LIST_H
   8#define _LRU_LIST_H
   9
  10#include <linux/list.h>
  11#include <linux/nodemask.h>
  12
  13/* list_lru_walk_cb has to always return one of those */
  14enum lru_status {
  15        LRU_REMOVED,            /* item removed from list */
  16        LRU_REMOVED_RETRY,      /* item removed, but lock has been
  17                                   dropped and reacquired */
  18        LRU_ROTATE,             /* item referenced, give another pass */
  19        LRU_SKIP,               /* item cannot be locked, skip */
  20        LRU_RETRY,              /* item not freeable. May drop the lock
  21                                   internally, but has to return locked. */
  22};
  23
  24struct list_lru_node {
  25        spinlock_t              lock;
  26        struct list_head        list;
  27        /* kept as signed so we can catch imbalance bugs */
  28        long                    nr_items;
  29} ____cacheline_aligned_in_smp;
  30
  31struct list_lru {
  32        struct list_lru_node    *node;
  33        nodemask_t              active_nodes;
  34};
  35
  36void list_lru_destroy(struct list_lru *lru);
  37int list_lru_init_key(struct list_lru *lru, struct lock_class_key *key);
  38static inline int list_lru_init(struct list_lru *lru)
  39{
  40        return list_lru_init_key(lru, NULL);
  41}
  42
  43/**
  44 * list_lru_add: add an element to the lru list's tail
  45 * @list_lru: the lru pointer
  46 * @item: the item to be added.
  47 *
  48 * If the element is already part of a list, this function returns doing
  49 * nothing. Therefore the caller does not need to keep state about whether or
  50 * not the element already belongs in the list and is allowed to lazy update
  51 * it. Note however that this is valid for *a* list, not *this* list. If
  52 * the caller organize itself in a way that elements can be in more than
  53 * one type of list, it is up to the caller to fully remove the item from
  54 * the previous list (with list_lru_del() for instance) before moving it
  55 * to @list_lru
  56 *
  57 * Return value: true if the list was updated, false otherwise
  58 */
  59bool list_lru_add(struct list_lru *lru, struct list_head *item);
  60
  61/**
  62 * list_lru_del: delete an element to the lru list
  63 * @list_lru: the lru pointer
  64 * @item: the item to be deleted.
  65 *
  66 * This function works analogously as list_lru_add in terms of list
  67 * manipulation. The comments about an element already pertaining to
  68 * a list are also valid for list_lru_del.
  69 *
  70 * Return value: true if the list was updated, false otherwise
  71 */
  72bool list_lru_del(struct list_lru *lru, struct list_head *item);
  73
  74/**
  75 * list_lru_count_node: return the number of objects currently held by @lru
  76 * @lru: the lru pointer.
  77 * @nid: the node id to count from.
  78 *
  79 * Always return a non-negative number, 0 for empty lists. There is no
  80 * guarantee that the list is not updated while the count is being computed.
  81 * Callers that want such a guarantee need to provide an outer lock.
  82 */
  83unsigned long list_lru_count_node(struct list_lru *lru, int nid);
  84static inline unsigned long list_lru_count(struct list_lru *lru)
  85{
  86        long count = 0;
  87        int nid;
  88
  89        for_each_node_mask(nid, lru->active_nodes)
  90                count += list_lru_count_node(lru, nid);
  91
  92        return count;
  93}
  94
  95typedef enum lru_status
  96(*list_lru_walk_cb)(struct list_head *item, spinlock_t *lock, void *cb_arg);
  97/**
  98 * list_lru_walk_node: walk a list_lru, isolating and disposing freeable items.
  99 * @lru: the lru pointer.
 100 * @nid: the node id to scan from.
 101 * @isolate: callback function that is resposible for deciding what to do with
 102 *  the item currently being scanned
 103 * @cb_arg: opaque type that will be passed to @isolate
 104 * @nr_to_walk: how many items to scan.
 105 *
 106 * This function will scan all elements in a particular list_lru, calling the
 107 * @isolate callback for each of those items, along with the current list
 108 * spinlock and a caller-provided opaque. The @isolate callback can choose to
 109 * drop the lock internally, but *must* return with the lock held. The callback
 110 * will return an enum lru_status telling the list_lru infrastructure what to
 111 * do with the object being scanned.
 112 *
 113 * Please note that nr_to_walk does not mean how many objects will be freed,
 114 * just how many objects will be scanned.
 115 *
 116 * Return value: the number of objects effectively removed from the LRU.
 117 */
 118unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
 119                                 list_lru_walk_cb isolate, void *cb_arg,
 120                                 unsigned long *nr_to_walk);
 121
 122static inline unsigned long
 123list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
 124              void *cb_arg, unsigned long nr_to_walk)
 125{
 126        long isolated = 0;
 127        int nid;
 128
 129        for_each_node_mask(nid, lru->active_nodes) {
 130                isolated += list_lru_walk_node(lru, nid, isolate,
 131                                               cb_arg, &nr_to_walk);
 132                if (nr_to_walk <= 0)
 133                        break;
 134        }
 135        return isolated;
 136}
 137#endif /* _LRU_LIST_H */
 138