linux/fs/mbcache.c
<<
>>
Prefs
   1#include <linux/spinlock.h>
   2#include <linux/slab.h>
   3#include <linux/list.h>
   4#include <linux/list_bl.h>
   5#include <linux/module.h>
   6#include <linux/sched.h>
   7#include <linux/workqueue.h>
   8#include <linux/mbcache.h>
   9
  10/*
  11 * Mbcache is a simple key-value store. Keys need not be unique, however
  12 * key-value pairs are expected to be unique (we use this fact in
  13 * mb_cache_entry_delete_block()).
  14 *
  15 * Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
  16 * They use hash of a block contents as a key and block number as a value.
  17 * That's why keys need not be unique (different xattr blocks may end up having
  18 * the same hash). However block number always uniquely identifies a cache
  19 * entry.
  20 *
  21 * We provide functions for creation and removal of entries, search by key,
  22 * and a special "delete entry with given key-value pair" operation. Fixed
  23 * size hash table is used for fast key lookups.
  24 */
  25
  26struct mb_cache {
  27        /* Hash table of entries */
  28        struct hlist_bl_head    *c_hash;
  29        /* log2 of hash table size */
  30        int                     c_bucket_bits;
  31        /* Maximum entries in cache to avoid degrading hash too much */
  32        int                     c_max_entries;
  33        /* Protects c_list, c_entry_count */
  34        spinlock_t              c_list_lock;
  35        struct list_head        c_list;
  36        /* Number of entries in cache */
  37        unsigned long           c_entry_count;
  38        struct shrinker         c_shrink;
  39        /* Work for shrinking when the cache has too many entries */
  40        struct work_struct      c_shrink_work;
  41};
  42
  43static struct kmem_cache *mb_entry_cache;
  44
  45static unsigned long mb_cache_shrink(struct mb_cache *cache,
  46                                     unsigned int nr_to_scan);
  47
  48static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache,
  49                                                        u32 key)
  50{
  51        return &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
  52}
  53
  54/*
  55 * Number of entries to reclaim synchronously when there are too many entries
  56 * in cache
  57 */
  58#define SYNC_SHRINK_BATCH 64
  59
  60/*
  61 * mb_cache_entry_create - create entry in cache
  62 * @cache - cache where the entry should be created
  63 * @mask - gfp mask with which the entry should be allocated
  64 * @key - key of the entry
  65 * @block - block that contains data
  66 * @reusable - is the block reusable by other inodes?
  67 *
  68 * Creates entry in @cache with key @key and records that data is stored in
  69 * block @block. The function returns -EBUSY if entry with the same key
  70 * and for the same block already exists in cache. Otherwise 0 is returned.
  71 */
  72int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
  73                          sector_t block, bool reusable)
  74{
  75        struct mb_cache_entry *entry, *dup;
  76        struct hlist_bl_node *dup_node;
  77        struct hlist_bl_head *head;
  78
  79        /* Schedule background reclaim if there are too many entries */
  80        if (cache->c_entry_count >= cache->c_max_entries)
  81                schedule_work(&cache->c_shrink_work);
  82        /* Do some sync reclaim if background reclaim cannot keep up */
  83        if (cache->c_entry_count >= 2*cache->c_max_entries)
  84                mb_cache_shrink(cache, SYNC_SHRINK_BATCH);
  85
  86        entry = kmem_cache_alloc(mb_entry_cache, mask);
  87        if (!entry)
  88                return -ENOMEM;
  89
  90        INIT_LIST_HEAD(&entry->e_list);
  91        /* One ref for hash, one ref returned */
  92        atomic_set(&entry->e_refcnt, 1);
  93        entry->e_key = key;
  94        entry->e_block = block;
  95        entry->e_reusable = reusable;
  96        head = mb_cache_entry_head(cache, key);
  97        hlist_bl_lock(head);
  98        hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
  99                if (dup->e_key == key && dup->e_block == block) {
 100                        hlist_bl_unlock(head);
 101                        kmem_cache_free(mb_entry_cache, entry);
 102                        return -EBUSY;
 103                }
 104        }
 105        hlist_bl_add_head(&entry->e_hash_list, head);
 106        hlist_bl_unlock(head);
 107
 108        spin_lock(&cache->c_list_lock);
 109        list_add_tail(&entry->e_list, &cache->c_list);
 110        /* Grab ref for LRU list */
 111        atomic_inc(&entry->e_refcnt);
 112        cache->c_entry_count++;
 113        spin_unlock(&cache->c_list_lock);
 114
 115        return 0;
 116}
 117EXPORT_SYMBOL(mb_cache_entry_create);
 118
 119void __mb_cache_entry_free(struct mb_cache_entry *entry)
 120{
 121        kmem_cache_free(mb_entry_cache, entry);
 122}
 123EXPORT_SYMBOL(__mb_cache_entry_free);
 124
 125static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
 126                                           struct mb_cache_entry *entry,
 127                                           u32 key)
 128{
 129        struct mb_cache_entry *old_entry = entry;
 130        struct hlist_bl_node *node;
 131        struct hlist_bl_head *head;
 132
 133        head = mb_cache_entry_head(cache, key);
 134        hlist_bl_lock(head);
 135        if (entry && !hlist_bl_unhashed(&entry->e_hash_list))
 136                node = entry->e_hash_list.next;
 137        else
 138                node = hlist_bl_first(head);
 139        while (node) {
 140                entry = hlist_bl_entry(node, struct mb_cache_entry,
 141                                       e_hash_list);
 142                if (entry->e_key == key && entry->e_reusable) {
 143                        atomic_inc(&entry->e_refcnt);
 144                        goto out;
 145                }
 146                node = node->next;
 147        }
 148        entry = NULL;
 149out:
 150        hlist_bl_unlock(head);
 151        if (old_entry)
 152                mb_cache_entry_put(cache, old_entry);
 153
 154        return entry;
 155}
 156
 157/*
 158 * mb_cache_entry_find_first - find the first entry in cache with given key
 159 * @cache: cache where we should search
 160 * @key: key to look for
 161 *
 162 * Search in @cache for entry with key @key. Grabs reference to the first
 163 * entry found and returns the entry.
 164 */
 165struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
 166                                                 u32 key)
 167{
 168        return __entry_find(cache, NULL, key);
 169}
 170EXPORT_SYMBOL(mb_cache_entry_find_first);
 171
 172/*
 173 * mb_cache_entry_find_next - find next entry in cache with the same
 174 * @cache: cache where we should search
 175 * @entry: entry to start search from
 176 *
 177 * Finds next entry in the hash chain which has the same key as @entry.
 178 * If @entry is unhashed (which can happen when deletion of entry races
 179 * with the search), finds the first entry in the hash chain. The function
 180 * drops reference to @entry and returns with a reference to the found entry.
 181 */
 182struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
 183                                                struct mb_cache_entry *entry)
 184{
 185        return __entry_find(cache, entry, entry->e_key);
 186}
 187EXPORT_SYMBOL(mb_cache_entry_find_next);
 188
 189/*
 190 * mb_cache_entry_get - get a cache entry by block number (and key)
 191 * @cache - cache we work with
 192 * @key - key of block number @block
 193 * @block - block number
 194 */
 195struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
 196                                          sector_t block)
 197{
 198        struct hlist_bl_node *node;
 199        struct hlist_bl_head *head;
 200        struct mb_cache_entry *entry;
 201
 202        head = mb_cache_entry_head(cache, key);
 203        hlist_bl_lock(head);
 204        hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
 205                if (entry->e_key == key && entry->e_block == block) {
 206                        atomic_inc(&entry->e_refcnt);
 207                        goto out;
 208                }
 209        }
 210        entry = NULL;
 211out:
 212        hlist_bl_unlock(head);
 213        return entry;
 214}
 215EXPORT_SYMBOL(mb_cache_entry_get);
 216
 217/* mb_cache_entry_delete_block - remove information about block from cache
 218 * @cache - cache we work with
 219 * @key - key of block @block
 220 * @block - block number
 221 *
 222 * Remove entry from cache @cache with key @key with data stored in @block.
 223 */
 224void mb_cache_entry_delete_block(struct mb_cache *cache, u32 key,
 225                                 sector_t block)
 226{
 227        struct hlist_bl_node *node;
 228        struct hlist_bl_head *head;
 229        struct mb_cache_entry *entry;
 230
 231        head = mb_cache_entry_head(cache, key);
 232        hlist_bl_lock(head);
 233        hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
 234                if (entry->e_key == key && entry->e_block == block) {
 235                        /* We keep hash list reference to keep entry alive */
 236                        hlist_bl_del_init(&entry->e_hash_list);
 237                        hlist_bl_unlock(head);
 238                        spin_lock(&cache->c_list_lock);
 239                        if (!list_empty(&entry->e_list)) {
 240                                list_del_init(&entry->e_list);
 241                                cache->c_entry_count--;
 242                                atomic_dec(&entry->e_refcnt);
 243                        }
 244                        spin_unlock(&cache->c_list_lock);
 245                        mb_cache_entry_put(cache, entry);
 246                        return;
 247                }
 248        }
 249        hlist_bl_unlock(head);
 250}
 251EXPORT_SYMBOL(mb_cache_entry_delete_block);
 252
 253/* mb_cache_entry_touch - cache entry got used
 254 * @cache - cache the entry belongs to
 255 * @entry - entry that got used
 256 *
 257 * Marks entry as used to give hit higher chances of surviving in cache.
 258 */
 259void mb_cache_entry_touch(struct mb_cache *cache,
 260                          struct mb_cache_entry *entry)
 261{
 262        entry->e_referenced = 1;
 263}
 264EXPORT_SYMBOL(mb_cache_entry_touch);
 265
 266static unsigned long mb_cache_count(struct shrinker *shrink,
 267                                    struct shrink_control *sc)
 268{
 269        struct mb_cache *cache = container_of(shrink, struct mb_cache,
 270                                              c_shrink);
 271
 272        return cache->c_entry_count;
 273}
 274
 275/* Shrink number of entries in cache */
 276static unsigned long mb_cache_shrink(struct mb_cache *cache,
 277                                     unsigned int nr_to_scan)
 278{
 279        struct mb_cache_entry *entry;
 280        struct hlist_bl_head *head;
 281        unsigned int shrunk = 0;
 282
 283        spin_lock(&cache->c_list_lock);
 284        while (nr_to_scan-- && !list_empty(&cache->c_list)) {
 285                entry = list_first_entry(&cache->c_list,
 286                                         struct mb_cache_entry, e_list);
 287                if (entry->e_referenced) {
 288                        entry->e_referenced = 0;
 289                        list_move_tail(&cache->c_list, &entry->e_list);
 290                        continue;
 291                }
 292                list_del_init(&entry->e_list);
 293                cache->c_entry_count--;
 294                /*
 295                 * We keep LRU list reference so that entry doesn't go away
 296                 * from under us.
 297                 */
 298                spin_unlock(&cache->c_list_lock);
 299                head = mb_cache_entry_head(cache, entry->e_key);
 300                hlist_bl_lock(head);
 301                if (!hlist_bl_unhashed(&entry->e_hash_list)) {
 302                        hlist_bl_del_init(&entry->e_hash_list);
 303                        atomic_dec(&entry->e_refcnt);
 304                }
 305                hlist_bl_unlock(head);
 306                if (mb_cache_entry_put(cache, entry))
 307                        shrunk++;
 308                cond_resched();
 309                spin_lock(&cache->c_list_lock);
 310        }
 311        spin_unlock(&cache->c_list_lock);
 312
 313        return shrunk;
 314}
 315
 316static unsigned long mb_cache_scan(struct shrinker *shrink,
 317                                   struct shrink_control *sc)
 318{
 319        int nr_to_scan = sc->nr_to_scan;
 320        struct mb_cache *cache = container_of(shrink, struct mb_cache,
 321                                              c_shrink);
 322        return mb_cache_shrink(cache, nr_to_scan);
 323}
 324
 325/* We shrink 1/X of the cache when we have too many entries in it */
 326#define SHRINK_DIVISOR 16
 327
 328static void mb_cache_shrink_worker(struct work_struct *work)
 329{
 330        struct mb_cache *cache = container_of(work, struct mb_cache,
 331                                              c_shrink_work);
 332        mb_cache_shrink(cache, cache->c_max_entries / SHRINK_DIVISOR);
 333}
 334
 335/*
 336 * mb_cache_create - create cache
 337 * @bucket_bits: log2 of the hash table size
 338 *
 339 * Create cache for keys with 2^bucket_bits hash entries.
 340 */
 341struct mb_cache *mb_cache_create(int bucket_bits)
 342{
 343        struct mb_cache *cache;
 344        int bucket_count = 1 << bucket_bits;
 345        int i;
 346
 347        if (!try_module_get(THIS_MODULE))
 348                return NULL;
 349
 350        cache = kzalloc(sizeof(struct mb_cache), GFP_KERNEL);
 351        if (!cache)
 352                goto err_out;
 353        cache->c_bucket_bits = bucket_bits;
 354        cache->c_max_entries = bucket_count << 4;
 355        INIT_LIST_HEAD(&cache->c_list);
 356        spin_lock_init(&cache->c_list_lock);
 357        cache->c_hash = kmalloc(bucket_count * sizeof(struct hlist_bl_head),
 358                                GFP_KERNEL);
 359        if (!cache->c_hash) {
 360                kfree(cache);
 361                goto err_out;
 362        }
 363        for (i = 0; i < bucket_count; i++)
 364                INIT_HLIST_BL_HEAD(&cache->c_hash[i]);
 365
 366        cache->c_shrink.count_objects = mb_cache_count;
 367        cache->c_shrink.scan_objects = mb_cache_scan;
 368        cache->c_shrink.seeks = DEFAULT_SEEKS;
 369        if (register_shrinker(&cache->c_shrink)) {
 370                kfree(cache->c_hash);
 371                kfree(cache);
 372                goto err_out;
 373        }
 374
 375        INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker);
 376
 377        return cache;
 378
 379err_out:
 380        module_put(THIS_MODULE);
 381        return NULL;
 382}
 383EXPORT_SYMBOL(mb_cache_create);
 384
 385/*
 386 * mb_cache_destroy - destroy cache
 387 * @cache: the cache to destroy
 388 *
 389 * Free all entries in cache and cache itself. Caller must make sure nobody
 390 * (except shrinker) can reach @cache when calling this.
 391 */
 392void mb_cache_destroy(struct mb_cache *cache)
 393{
 394        struct mb_cache_entry *entry, *next;
 395
 396        unregister_shrinker(&cache->c_shrink);
 397
 398        /*
 399         * We don't bother with any locking. Cache must not be used at this
 400         * point.
 401         */
 402        list_for_each_entry_safe(entry, next, &cache->c_list, e_list) {
 403                if (!hlist_bl_unhashed(&entry->e_hash_list)) {
 404                        hlist_bl_del_init(&entry->e_hash_list);
 405                        atomic_dec(&entry->e_refcnt);
 406                } else
 407                        WARN_ON(1);
 408                list_del(&entry->e_list);
 409                WARN_ON(atomic_read(&entry->e_refcnt) != 1);
 410                mb_cache_entry_put(cache, entry);
 411        }
 412        kfree(cache->c_hash);
 413        kfree(cache);
 414        module_put(THIS_MODULE);
 415}
 416EXPORT_SYMBOL(mb_cache_destroy);
 417
 418static int __init mbcache_init(void)
 419{
 420        mb_entry_cache = kmem_cache_create("mbcache",
 421                                sizeof(struct mb_cache_entry), 0,
 422                                SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
 423        BUG_ON(!mb_entry_cache);
 424        return 0;
 425}
 426
 427static void __exit mbcache_exit(void)
 428{
 429        kmem_cache_destroy(mb_entry_cache);
 430}
 431
 432module_init(mbcache_init)
 433module_exit(mbcache_exit)
 434
 435MODULE_AUTHOR("Jan Kara <jack@suse.cz>");
 436MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
 437MODULE_LICENSE("GPL");
 438