1
2#ifndef __LINUX_BACKING_DEV_DEFS_H
3#define __LINUX_BACKING_DEV_DEFS_H
4
5#include <linux/list.h>
6#include <linux/radix-tree.h>
7#include <linux/rbtree.h>
8#include <linux/spinlock.h>
9#include <linux/percpu_counter.h>
10#include <linux/percpu-refcount.h>
11#include <linux/flex_proportions.h>
12#include <linux/timer.h>
13#include <linux/workqueue.h>
14#include <linux/kref.h>
15#include <linux/refcount.h>
16
17struct page;
18struct device;
19struct dentry;
20
21
22
23
24enum wb_state {
25 WB_registered,
26 WB_writeback_running,
27 WB_has_dirty_io,
28 WB_start_all,
29};
30
31enum wb_congested_state {
32 WB_async_congested,
33 WB_sync_congested,
34};
35
36typedef int (congested_fn)(void *, int);
37
38enum wb_stat_item {
39 WB_RECLAIMABLE,
40 WB_WRITEBACK,
41 WB_DIRTIED,
42 WB_WRITTEN,
43 NR_WB_STAT_ITEMS
44};
45
46#define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
47
48
49
50
51enum wb_reason {
52 WB_REASON_BACKGROUND,
53 WB_REASON_VMSCAN,
54 WB_REASON_SYNC,
55 WB_REASON_PERIODIC,
56 WB_REASON_LAPTOP_TIMER,
57 WB_REASON_FREE_MORE_MEM,
58 WB_REASON_FS_FREE_SPACE,
59
60
61
62
63
64
65 WB_REASON_FORKER_THREAD,
66
67 WB_REASON_MAX,
68};
69
70
71
72
73
74
75
76
77struct bdi_writeback_congested {
78 unsigned long state;
79 refcount_t refcnt;
80
81#ifdef CONFIG_CGROUP_WRITEBACK
82 struct backing_dev_info *__bdi;
83
84
85 int blkcg_id;
86 struct rb_node rb_node;
87#endif
88};
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109struct bdi_writeback {
110 struct backing_dev_info *bdi;
111
112 unsigned long state;
113 unsigned long last_old_flush;
114
115 struct list_head b_dirty;
116 struct list_head b_io;
117 struct list_head b_more_io;
118 struct list_head b_dirty_time;
119 spinlock_t list_lock;
120
121 struct percpu_counter stat[NR_WB_STAT_ITEMS];
122
123 struct bdi_writeback_congested *congested;
124
125 unsigned long bw_time_stamp;
126 unsigned long dirtied_stamp;
127 unsigned long written_stamp;
128 unsigned long write_bandwidth;
129 unsigned long avg_write_bandwidth;
130
131
132
133
134
135
136
137 unsigned long dirty_ratelimit;
138 unsigned long balanced_dirty_ratelimit;
139
140 struct fprop_local_percpu completions;
141 int dirty_exceeded;
142 enum wb_reason start_all_reason;
143
144 spinlock_t work_lock;
145 struct list_head work_list;
146 struct delayed_work dwork;
147
148 unsigned long dirty_sleep;
149
150 struct list_head bdi_node;
151
152#ifdef CONFIG_CGROUP_WRITEBACK
153 struct percpu_ref refcnt;
154 struct fprop_local_percpu memcg_completions;
155 struct cgroup_subsys_state *memcg_css;
156 struct cgroup_subsys_state *blkcg_css;
157 struct list_head memcg_node;
158 struct list_head blkcg_node;
159
160 union {
161 struct work_struct release_work;
162 struct rcu_head rcu;
163 };
164#endif
165};
166
167struct backing_dev_info {
168 struct list_head bdi_list;
169 unsigned long ra_pages;
170 unsigned long io_pages;
171 congested_fn *congested_fn;
172 void *congested_data;
173
174 const char *name;
175
176 struct kref refcnt;
177 unsigned int capabilities;
178 unsigned int min_ratio;
179 unsigned int max_ratio, max_prop_frac;
180
181
182
183
184
185 atomic_long_t tot_write_bandwidth;
186
187 struct bdi_writeback wb;
188 struct list_head wb_list;
189#ifdef CONFIG_CGROUP_WRITEBACK
190 struct radix_tree_root cgwb_tree;
191 struct rb_root cgwb_congested_tree;
192 struct mutex cgwb_release_mutex;
193 struct rw_semaphore wb_switch_rwsem;
194#else
195 struct bdi_writeback_congested *wb_congested;
196#endif
197 wait_queue_head_t wb_waitq;
198
199 struct device *dev;
200 struct device *owner;
201
202 struct timer_list laptop_mode_wb_timer;
203
204#ifdef CONFIG_DEBUG_FS
205 struct dentry *debug_dir;
206#endif
207};
208
209enum {
210 BLK_RW_ASYNC = 0,
211 BLK_RW_SYNC = 1,
212};
213
214void clear_wb_congested(struct bdi_writeback_congested *congested, int sync);
215void set_wb_congested(struct bdi_writeback_congested *congested, int sync);
216
217static inline void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
218{
219 clear_wb_congested(bdi->wb.congested, sync);
220}
221
222static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync)
223{
224 set_wb_congested(bdi->wb.congested, sync);
225}
226
227struct wb_lock_cookie {
228 bool locked;
229 unsigned long flags;
230};
231
232#ifdef CONFIG_CGROUP_WRITEBACK
233
234
235
236
237
238static inline bool wb_tryget(struct bdi_writeback *wb)
239{
240 if (wb != &wb->bdi->wb)
241 return percpu_ref_tryget(&wb->refcnt);
242 return true;
243}
244
245
246
247
248
249static inline void wb_get(struct bdi_writeback *wb)
250{
251 if (wb != &wb->bdi->wb)
252 percpu_ref_get(&wb->refcnt);
253}
254
255
256
257
258
259static inline void wb_put(struct bdi_writeback *wb)
260{
261 if (WARN_ON_ONCE(!wb->bdi)) {
262
263
264
265
266 return;
267 }
268
269 if (wb != &wb->bdi->wb)
270 percpu_ref_put(&wb->refcnt);
271}
272
273
274
275
276
277
278
279static inline bool wb_dying(struct bdi_writeback *wb)
280{
281 return percpu_ref_is_dying(&wb->refcnt);
282}
283
284#else
285
286static inline bool wb_tryget(struct bdi_writeback *wb)
287{
288 return true;
289}
290
291static inline void wb_get(struct bdi_writeback *wb)
292{
293}
294
295static inline void wb_put(struct bdi_writeback *wb)
296{
297}
298
299static inline bool wb_dying(struct bdi_writeback *wb)
300{
301 return false;
302}
303
304#endif
305
306#endif
307