linux/include/linux/backing-dev-defs.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __LINUX_BACKING_DEV_DEFS_H
   3#define __LINUX_BACKING_DEV_DEFS_H
   4
   5#include <linux/list.h>
   6#include <linux/radix-tree.h>
   7#include <linux/rbtree.h>
   8#include <linux/spinlock.h>
   9#include <linux/percpu_counter.h>
  10#include <linux/percpu-refcount.h>
  11#include <linux/flex_proportions.h>
  12#include <linux/timer.h>
  13#include <linux/workqueue.h>
  14#include <linux/kref.h>
  15#include <linux/refcount.h>
  16
  17struct page;
  18struct device;
  19struct dentry;
  20
  21/*
  22 * Bits in bdi_writeback.state
  23 */
  24enum wb_state {
  25        WB_registered,          /* bdi_register() was done */
  26        WB_writeback_running,   /* Writeback is in progress */
  27        WB_has_dirty_io,        /* Dirty inodes on ->b_{dirty|io|more_io} */
  28        WB_start_all,           /* nr_pages == 0 (all) work pending */
  29};
  30
  31enum wb_congested_state {
  32        WB_async_congested,     /* The async (write) queue is getting full */
  33        WB_sync_congested,      /* The sync queue is getting full */
  34};
  35
  36typedef int (congested_fn)(void *, int);
  37
  38enum wb_stat_item {
  39        WB_RECLAIMABLE,
  40        WB_WRITEBACK,
  41        WB_DIRTIED,
  42        WB_WRITTEN,
  43        NR_WB_STAT_ITEMS
  44};
  45
  46#define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
  47
  48/*
  49 * why some writeback work was initiated
  50 */
  51enum wb_reason {
  52        WB_REASON_BACKGROUND,
  53        WB_REASON_VMSCAN,
  54        WB_REASON_SYNC,
  55        WB_REASON_PERIODIC,
  56        WB_REASON_LAPTOP_TIMER,
  57        WB_REASON_FREE_MORE_MEM,
  58        WB_REASON_FS_FREE_SPACE,
  59        /*
  60         * There is no bdi forker thread any more and works are done
  61         * by emergency worker, however, this is TPs userland visible
  62         * and we'll be exposing exactly the same information,
  63         * so it has a mismatch name.
  64         */
  65        WB_REASON_FORKER_THREAD,
  66
  67        WB_REASON_MAX,
  68};
  69
  70/*
  71 * For cgroup writeback, multiple wb's may map to the same blkcg.  Those
  72 * wb's can operate mostly independently but should share the congested
  73 * state.  To facilitate such sharing, the congested state is tracked using
  74 * the following struct which is created on demand, indexed by blkcg ID on
  75 * its bdi, and refcounted.
  76 */
  77struct bdi_writeback_congested {
  78        unsigned long state;            /* WB_[a]sync_congested flags */
  79        refcount_t refcnt;              /* nr of attached wb's and blkg */
  80
  81#ifdef CONFIG_CGROUP_WRITEBACK
  82        struct backing_dev_info *__bdi; /* the associated bdi, set to NULL
  83                                         * on bdi unregistration. For memcg-wb
  84                                         * internal use only! */
  85        int blkcg_id;                   /* ID of the associated blkcg */
  86        struct rb_node rb_node;         /* on bdi->cgwb_congestion_tree */
  87#endif
  88};
  89
  90/*
  91 * Each wb (bdi_writeback) can perform writeback operations, is measured
  92 * and throttled, independently.  Without cgroup writeback, each bdi
  93 * (bdi_writeback) is served by its embedded bdi->wb.
  94 *
  95 * On the default hierarchy, blkcg implicitly enables memcg.  This allows
  96 * using memcg's page ownership for attributing writeback IOs, and every
  97 * memcg - blkcg combination can be served by its own wb by assigning a
  98 * dedicated wb to each memcg, which enables isolation across different
  99 * cgroups and propagation of IO back pressure down from the IO layer upto
 100 * the tasks which are generating the dirty pages to be written back.
 101 *
 102 * A cgroup wb is indexed on its bdi by the ID of the associated memcg,
 103 * refcounted with the number of inodes attached to it, and pins the memcg
 104 * and the corresponding blkcg.  As the corresponding blkcg for a memcg may
 105 * change as blkcg is disabled and enabled higher up in the hierarchy, a wb
 106 * is tested for blkcg after lookup and removed from index on mismatch so
 107 * that a new wb for the combination can be created.
 108 */
 109struct bdi_writeback {
 110        struct backing_dev_info *bdi;   /* our parent bdi */
 111
 112        unsigned long state;            /* Always use atomic bitops on this */
 113        unsigned long last_old_flush;   /* last old data flush */
 114
 115        struct list_head b_dirty;       /* dirty inodes */
 116        struct list_head b_io;          /* parked for writeback */
 117        struct list_head b_more_io;     /* parked for more writeback */
 118        struct list_head b_dirty_time;  /* time stamps are dirty */
 119        spinlock_t list_lock;           /* protects the b_* lists */
 120
 121        struct percpu_counter stat[NR_WB_STAT_ITEMS];
 122
 123        struct bdi_writeback_congested *congested;
 124
 125        unsigned long bw_time_stamp;    /* last time write bw is updated */
 126        unsigned long dirtied_stamp;
 127        unsigned long written_stamp;    /* pages written at bw_time_stamp */
 128        unsigned long write_bandwidth;  /* the estimated write bandwidth */
 129        unsigned long avg_write_bandwidth; /* further smoothed write bw, > 0 */
 130
 131        /*
 132         * The base dirty throttle rate, re-calculated on every 200ms.
 133         * All the bdi tasks' dirty rate will be curbed under it.
 134         * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit
 135         * in small steps and is much more smooth/stable than the latter.
 136         */
 137        unsigned long dirty_ratelimit;
 138        unsigned long balanced_dirty_ratelimit;
 139
 140        struct fprop_local_percpu completions;
 141        int dirty_exceeded;
 142        enum wb_reason start_all_reason;
 143
 144        spinlock_t work_lock;           /* protects work_list & dwork scheduling */
 145        struct list_head work_list;
 146        struct delayed_work dwork;      /* work item used for writeback */
 147
 148        unsigned long dirty_sleep;      /* last wait */
 149
 150        struct list_head bdi_node;      /* anchored at bdi->wb_list */
 151
 152#ifdef CONFIG_CGROUP_WRITEBACK
 153        struct percpu_ref refcnt;       /* used only for !root wb's */
 154        struct fprop_local_percpu memcg_completions;
 155        struct cgroup_subsys_state *memcg_css; /* the associated memcg */
 156        struct cgroup_subsys_state *blkcg_css; /* and blkcg */
 157        struct list_head memcg_node;    /* anchored at memcg->cgwb_list */
 158        struct list_head blkcg_node;    /* anchored at blkcg->cgwb_list */
 159
 160        union {
 161                struct work_struct release_work;
 162                struct rcu_head rcu;
 163        };
 164#endif
 165};
 166
 167struct backing_dev_info {
 168        struct list_head bdi_list;
 169        unsigned long ra_pages; /* max readahead in PAGE_SIZE units */
 170        unsigned long io_pages; /* max allowed IO size */
 171        congested_fn *congested_fn; /* Function pointer if device is md/dm */
 172        void *congested_data;   /* Pointer to aux data for congested func */
 173
 174        const char *name;
 175
 176        struct kref refcnt;     /* Reference counter for the structure */
 177        unsigned int capabilities; /* Device capabilities */
 178        unsigned int min_ratio;
 179        unsigned int max_ratio, max_prop_frac;
 180
 181        /*
 182         * Sum of avg_write_bw of wbs with dirty inodes.  > 0 if there are
 183         * any dirty wbs, which is depended upon by bdi_has_dirty().
 184         */
 185        atomic_long_t tot_write_bandwidth;
 186
 187        struct bdi_writeback wb;  /* the root writeback info for this bdi */
 188        struct list_head wb_list; /* list of all wbs */
 189#ifdef CONFIG_CGROUP_WRITEBACK
 190        struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
 191        struct rb_root cgwb_congested_tree; /* their congested states */
 192        struct mutex cgwb_release_mutex;  /* protect shutdown of wb structs */
 193        struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */
 194#else
 195        struct bdi_writeback_congested *wb_congested;
 196#endif
 197        wait_queue_head_t wb_waitq;
 198
 199        struct device *dev;
 200        struct device *owner;
 201
 202        struct timer_list laptop_mode_wb_timer;
 203
 204#ifdef CONFIG_DEBUG_FS
 205        struct dentry *debug_dir;
 206#endif
 207};
 208
 209enum {
 210        BLK_RW_ASYNC    = 0,
 211        BLK_RW_SYNC     = 1,
 212};
 213
 214void clear_wb_congested(struct bdi_writeback_congested *congested, int sync);
 215void set_wb_congested(struct bdi_writeback_congested *congested, int sync);
 216
 217static inline void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
 218{
 219        clear_wb_congested(bdi->wb.congested, sync);
 220}
 221
 222static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync)
 223{
 224        set_wb_congested(bdi->wb.congested, sync);
 225}
 226
 227struct wb_lock_cookie {
 228        bool locked;
 229        unsigned long flags;
 230};
 231
 232#ifdef CONFIG_CGROUP_WRITEBACK
 233
 234/**
 235 * wb_tryget - try to increment a wb's refcount
 236 * @wb: bdi_writeback to get
 237 */
 238static inline bool wb_tryget(struct bdi_writeback *wb)
 239{
 240        if (wb != &wb->bdi->wb)
 241                return percpu_ref_tryget(&wb->refcnt);
 242        return true;
 243}
 244
 245/**
 246 * wb_get - increment a wb's refcount
 247 * @wb: bdi_writeback to get
 248 */
 249static inline void wb_get(struct bdi_writeback *wb)
 250{
 251        if (wb != &wb->bdi->wb)
 252                percpu_ref_get(&wb->refcnt);
 253}
 254
 255/**
 256 * wb_put - decrement a wb's refcount
 257 * @wb: bdi_writeback to put
 258 */
 259static inline void wb_put(struct bdi_writeback *wb)
 260{
 261        if (WARN_ON_ONCE(!wb->bdi)) {
 262                /*
 263                 * A driver bug might cause a file to be removed before bdi was
 264                 * initialized.
 265                 */
 266                return;
 267        }
 268
 269        if (wb != &wb->bdi->wb)
 270                percpu_ref_put(&wb->refcnt);
 271}
 272
 273/**
 274 * wb_dying - is a wb dying?
 275 * @wb: bdi_writeback of interest
 276 *
 277 * Returns whether @wb is unlinked and being drained.
 278 */
 279static inline bool wb_dying(struct bdi_writeback *wb)
 280{
 281        return percpu_ref_is_dying(&wb->refcnt);
 282}
 283
 284#else   /* CONFIG_CGROUP_WRITEBACK */
 285
 286static inline bool wb_tryget(struct bdi_writeback *wb)
 287{
 288        return true;
 289}
 290
 291static inline void wb_get(struct bdi_writeback *wb)
 292{
 293}
 294
 295static inline void wb_put(struct bdi_writeback *wb)
 296{
 297}
 298
 299static inline bool wb_dying(struct bdi_writeback *wb)
 300{
 301        return false;
 302}
 303
 304#endif  /* CONFIG_CGROUP_WRITEBACK */
 305
 306#endif  /* __LINUX_BACKING_DEV_DEFS_H */
 307