linux/include/linux/backing-dev-defs.h
<<
>>
Prefs
   1#ifndef __LINUX_BACKING_DEV_DEFS_H
   2#define __LINUX_BACKING_DEV_DEFS_H
   3
   4#include <linux/list.h>
   5#include <linux/radix-tree.h>
   6#include <linux/rbtree.h>
   7#include <linux/spinlock.h>
   8#include <linux/percpu_counter.h>
   9#include <linux/percpu-refcount.h>
  10#include <linux/flex_proportions.h>
  11#include <linux/timer.h>
  12#include <linux/workqueue.h>
  13
  14struct page;
  15struct device;
  16struct dentry;
  17
  18/*
  19 * Bits in bdi_writeback.state
  20 */
  21enum wb_state {
  22        WB_registered,          /* bdi_register() was done */
  23        WB_writeback_running,   /* Writeback is in progress */
  24        WB_has_dirty_io,        /* Dirty inodes on ->b_{dirty|io|more_io} */
  25};
  26
  27enum wb_congested_state {
  28        WB_async_congested,     /* The async (write) queue is getting full */
  29        WB_sync_congested,      /* The sync queue is getting full */
  30};
  31
  32typedef int (congested_fn)(void *, int);
  33
  34enum wb_stat_item {
  35        WB_RECLAIMABLE,
  36        WB_WRITEBACK,
  37        WB_DIRTIED,
  38        WB_WRITTEN,
  39        NR_WB_STAT_ITEMS
  40};
  41
  42#define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
  43
  44/*
  45 * For cgroup writeback, multiple wb's may map to the same blkcg.  Those
  46 * wb's can operate mostly independently but should share the congested
  47 * state.  To facilitate such sharing, the congested state is tracked using
  48 * the following struct which is created on demand, indexed by blkcg ID on
  49 * its bdi, and refcounted.
  50 */
  51struct bdi_writeback_congested {
  52        unsigned long state;            /* WB_[a]sync_congested flags */
  53        atomic_t refcnt;                /* nr of attached wb's and blkg */
  54
  55#ifdef CONFIG_CGROUP_WRITEBACK
  56        struct backing_dev_info *bdi;   /* the associated bdi */
  57        int blkcg_id;                   /* ID of the associated blkcg */
  58        struct rb_node rb_node;         /* on bdi->cgwb_congestion_tree */
  59#endif
  60};
  61
  62/*
  63 * Each wb (bdi_writeback) can perform writeback operations, is measured
  64 * and throttled, independently.  Without cgroup writeback, each bdi
  65 * (bdi_writeback) is served by its embedded bdi->wb.
  66 *
  67 * On the default hierarchy, blkcg implicitly enables memcg.  This allows
  68 * using memcg's page ownership for attributing writeback IOs, and every
  69 * memcg - blkcg combination can be served by its own wb by assigning a
  70 * dedicated wb to each memcg, which enables isolation across different
  71 * cgroups and propagation of IO back pressure down from the IO layer upto
  72 * the tasks which are generating the dirty pages to be written back.
  73 *
  74 * A cgroup wb is indexed on its bdi by the ID of the associated memcg,
  75 * refcounted with the number of inodes attached to it, and pins the memcg
  76 * and the corresponding blkcg.  As the corresponding blkcg for a memcg may
  77 * change as blkcg is disabled and enabled higher up in the hierarchy, a wb
  78 * is tested for blkcg after lookup and removed from index on mismatch so
  79 * that a new wb for the combination can be created.
  80 */
  81struct bdi_writeback {
  82        struct backing_dev_info *bdi;   /* our parent bdi */
  83
  84        unsigned long state;            /* Always use atomic bitops on this */
  85        unsigned long last_old_flush;   /* last old data flush */
  86
  87        struct list_head b_dirty;       /* dirty inodes */
  88        struct list_head b_io;          /* parked for writeback */
  89        struct list_head b_more_io;     /* parked for more writeback */
  90        struct list_head b_dirty_time;  /* time stamps are dirty */
  91        spinlock_t list_lock;           /* protects the b_* lists */
  92
  93        struct percpu_counter stat[NR_WB_STAT_ITEMS];
  94
  95        struct bdi_writeback_congested *congested;
  96
  97        unsigned long bw_time_stamp;    /* last time write bw is updated */
  98        unsigned long dirtied_stamp;
  99        unsigned long written_stamp;    /* pages written at bw_time_stamp */
 100        unsigned long write_bandwidth;  /* the estimated write bandwidth */
 101        unsigned long avg_write_bandwidth; /* further smoothed write bw, > 0 */
 102
 103        /*
 104         * The base dirty throttle rate, re-calculated on every 200ms.
 105         * All the bdi tasks' dirty rate will be curbed under it.
 106         * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit
 107         * in small steps and is much more smooth/stable than the latter.
 108         */
 109        unsigned long dirty_ratelimit;
 110        unsigned long balanced_dirty_ratelimit;
 111
 112        struct fprop_local_percpu completions;
 113        int dirty_exceeded;
 114
 115        spinlock_t work_lock;           /* protects work_list & dwork scheduling */
 116        struct list_head work_list;
 117        struct delayed_work dwork;      /* work item used for writeback */
 118
 119        struct list_head bdi_node;      /* anchored at bdi->wb_list */
 120
 121#ifdef CONFIG_CGROUP_WRITEBACK
 122        struct percpu_ref refcnt;       /* used only for !root wb's */
 123        struct fprop_local_percpu memcg_completions;
 124        struct cgroup_subsys_state *memcg_css; /* the associated memcg */
 125        struct cgroup_subsys_state *blkcg_css; /* and blkcg */
 126        struct list_head memcg_node;    /* anchored at memcg->cgwb_list */
 127        struct list_head blkcg_node;    /* anchored at blkcg->cgwb_list */
 128
 129        union {
 130                struct work_struct release_work;
 131                struct rcu_head rcu;
 132        };
 133#endif
 134};
 135
 136struct backing_dev_info {
 137        struct list_head bdi_list;
 138        unsigned long ra_pages; /* max readahead in PAGE_SIZE units */
 139        unsigned int capabilities; /* Device capabilities */
 140        congested_fn *congested_fn; /* Function pointer if device is md/dm */
 141        void *congested_data;   /* Pointer to aux data for congested func */
 142
 143        char *name;
 144
 145        unsigned int min_ratio;
 146        unsigned int max_ratio, max_prop_frac;
 147
 148        /*
 149         * Sum of avg_write_bw of wbs with dirty inodes.  > 0 if there are
 150         * any dirty wbs, which is depended upon by bdi_has_dirty().
 151         */
 152        atomic_long_t tot_write_bandwidth;
 153
 154        struct bdi_writeback wb;  /* the root writeback info for this bdi */
 155        struct list_head wb_list; /* list of all wbs */
 156#ifdef CONFIG_CGROUP_WRITEBACK
 157        struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
 158        struct rb_root cgwb_congested_tree; /* their congested states */
 159        atomic_t usage_cnt; /* counts both cgwbs and cgwb_contested's */
 160#else
 161        struct bdi_writeback_congested *wb_congested;
 162#endif
 163        wait_queue_head_t wb_waitq;
 164
 165        struct device *dev;
 166        struct device *owner;
 167
 168        struct timer_list laptop_mode_wb_timer;
 169
 170#ifdef CONFIG_DEBUG_FS
 171        struct dentry *debug_dir;
 172        struct dentry *debug_stats;
 173#endif
 174};
 175
 176enum {
 177        BLK_RW_ASYNC    = 0,
 178        BLK_RW_SYNC     = 1,
 179};
 180
 181void clear_wb_congested(struct bdi_writeback_congested *congested, int sync);
 182void set_wb_congested(struct bdi_writeback_congested *congested, int sync);
 183
 184static inline void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
 185{
 186        clear_wb_congested(bdi->wb.congested, sync);
 187}
 188
 189static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync)
 190{
 191        set_wb_congested(bdi->wb.congested, sync);
 192}
 193
 194#ifdef CONFIG_CGROUP_WRITEBACK
 195
 196/**
 197 * wb_tryget - try to increment a wb's refcount
 198 * @wb: bdi_writeback to get
 199 */
 200static inline bool wb_tryget(struct bdi_writeback *wb)
 201{
 202        if (wb != &wb->bdi->wb)
 203                return percpu_ref_tryget(&wb->refcnt);
 204        return true;
 205}
 206
 207/**
 208 * wb_get - increment a wb's refcount
 209 * @wb: bdi_writeback to get
 210 */
 211static inline void wb_get(struct bdi_writeback *wb)
 212{
 213        if (wb != &wb->bdi->wb)
 214                percpu_ref_get(&wb->refcnt);
 215}
 216
 217/**
 218 * wb_put - decrement a wb's refcount
 219 * @wb: bdi_writeback to put
 220 */
 221static inline void wb_put(struct bdi_writeback *wb)
 222{
 223        if (wb != &wb->bdi->wb)
 224                percpu_ref_put(&wb->refcnt);
 225}
 226
 227/**
 228 * wb_dying - is a wb dying?
 229 * @wb: bdi_writeback of interest
 230 *
 231 * Returns whether @wb is unlinked and being drained.
 232 */
 233static inline bool wb_dying(struct bdi_writeback *wb)
 234{
 235        return percpu_ref_is_dying(&wb->refcnt);
 236}
 237
 238#else   /* CONFIG_CGROUP_WRITEBACK */
 239
 240static inline bool wb_tryget(struct bdi_writeback *wb)
 241{
 242        return true;
 243}
 244
 245static inline void wb_get(struct bdi_writeback *wb)
 246{
 247}
 248
 249static inline void wb_put(struct bdi_writeback *wb)
 250{
 251}
 252
 253static inline bool wb_dying(struct bdi_writeback *wb)
 254{
 255        return false;
 256}
 257
 258#endif  /* CONFIG_CGROUP_WRITEBACK */
 259
 260#endif  /* __LINUX_BACKING_DEV_DEFS_H */
 261