linux/fs/f2fs/shrinker.c
<<
>>
Prefs
   1/*
   2 * f2fs shrinker support
   3 *   the basic infra was copied from fs/ubifs/shrinker.c
   4 *
   5 * Copyright (c) 2015 Motorola Mobility
   6 * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 */
  12#include <linux/fs.h>
  13#include <linux/f2fs_fs.h>
  14
  15#include "f2fs.h"
  16
  17static LIST_HEAD(f2fs_list);
  18static DEFINE_SPINLOCK(f2fs_list_lock);
  19static unsigned int shrinker_run_no;
  20
  21static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
  22{
  23        return NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
  24}
  25
  26static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
  27{
  28        if (NM_I(sbi)->fcnt > NAT_ENTRY_PER_BLOCK)
  29                return NM_I(sbi)->fcnt - NAT_ENTRY_PER_BLOCK;
  30        return 0;
  31}
  32
  33static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
  34{
  35        return atomic_read(&sbi->total_zombie_tree) +
  36                                atomic_read(&sbi->total_ext_node);
  37}
  38
  39unsigned long f2fs_shrink_count(struct shrinker *shrink,
  40                                struct shrink_control *sc)
  41{
  42        struct f2fs_sb_info *sbi;
  43        struct list_head *p;
  44        unsigned long count = 0;
  45
  46        spin_lock(&f2fs_list_lock);
  47        p = f2fs_list.next;
  48        while (p != &f2fs_list) {
  49                sbi = list_entry(p, struct f2fs_sb_info, s_list);
  50
  51                /* stop f2fs_put_super */
  52                if (!mutex_trylock(&sbi->umount_mutex)) {
  53                        p = p->next;
  54                        continue;
  55                }
  56                spin_unlock(&f2fs_list_lock);
  57
  58                /* count extent cache entries */
  59                count += __count_extent_cache(sbi);
  60
  61                /* shrink clean nat cache entries */
  62                count += __count_nat_entries(sbi);
  63
  64                /* count free nids cache entries */
  65                count += __count_free_nids(sbi);
  66
  67                spin_lock(&f2fs_list_lock);
  68                p = p->next;
  69                mutex_unlock(&sbi->umount_mutex);
  70        }
  71        spin_unlock(&f2fs_list_lock);
  72        return count;
  73}
  74
  75unsigned long f2fs_shrink_scan(struct shrinker *shrink,
  76                                struct shrink_control *sc)
  77{
  78        unsigned long nr = sc->nr_to_scan;
  79        struct f2fs_sb_info *sbi;
  80        struct list_head *p;
  81        unsigned int run_no;
  82        unsigned long freed = 0;
  83
  84        spin_lock(&f2fs_list_lock);
  85        do {
  86                run_no = ++shrinker_run_no;
  87        } while (run_no == 0);
  88        p = f2fs_list.next;
  89        while (p != &f2fs_list) {
  90                sbi = list_entry(p, struct f2fs_sb_info, s_list);
  91
  92                if (sbi->shrinker_run_no == run_no)
  93                        break;
  94
  95                /* stop f2fs_put_super */
  96                if (!mutex_trylock(&sbi->umount_mutex)) {
  97                        p = p->next;
  98                        continue;
  99                }
 100                spin_unlock(&f2fs_list_lock);
 101
 102                sbi->shrinker_run_no = run_no;
 103
 104                /* shrink extent cache entries */
 105                freed += f2fs_shrink_extent_tree(sbi, nr >> 1);
 106
 107                /* shrink clean nat cache entries */
 108                if (freed < nr)
 109                        freed += try_to_free_nats(sbi, nr - freed);
 110
 111                /* shrink free nids cache entries */
 112                if (freed < nr)
 113                        freed += try_to_free_nids(sbi, nr - freed);
 114
 115                spin_lock(&f2fs_list_lock);
 116                p = p->next;
 117                list_move_tail(&sbi->s_list, &f2fs_list);
 118                mutex_unlock(&sbi->umount_mutex);
 119                if (freed >= nr)
 120                        break;
 121        }
 122        spin_unlock(&f2fs_list_lock);
 123        return freed;
 124}
 125
 126void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
 127{
 128        spin_lock(&f2fs_list_lock);
 129        list_add_tail(&sbi->s_list, &f2fs_list);
 130        spin_unlock(&f2fs_list_lock);
 131}
 132
 133void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
 134{
 135        f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi));
 136
 137        spin_lock(&f2fs_list_lock);
 138        list_del(&sbi->s_list);
 139        spin_unlock(&f2fs_list_lock);
 140}
 141