1
2#ifndef __LINUX_BACKING_DEV_DEFS_H
3#define __LINUX_BACKING_DEV_DEFS_H
4
5#include <linux/list.h>
6#include <linux/radix-tree.h>
7#include <linux/rbtree.h>
8#include <linux/spinlock.h>
9#include <linux/percpu_counter.h>
10#include <linux/percpu-refcount.h>
11#include <linux/flex_proportions.h>
12#include <linux/timer.h>
13#include <linux/workqueue.h>
14#include <linux/kref.h>
15#include <linux/refcount.h>
16
17struct page;
18struct device;
19struct dentry;
20
21
22
23
24enum wb_state {
25 WB_registered,
26 WB_writeback_running,
27 WB_has_dirty_io,
28 WB_start_all,
29};
30
31enum wb_congested_state {
32 WB_async_congested,
33 WB_sync_congested,
34};
35
36enum wb_stat_item {
37 WB_RECLAIMABLE,
38 WB_WRITEBACK,
39 WB_DIRTIED,
40 WB_WRITTEN,
41 NR_WB_STAT_ITEMS
42};
43
44#define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
45
46
47
48
49enum wb_reason {
50 WB_REASON_BACKGROUND,
51 WB_REASON_VMSCAN,
52 WB_REASON_SYNC,
53 WB_REASON_PERIODIC,
54 WB_REASON_LAPTOP_TIMER,
55 WB_REASON_FS_FREE_SPACE,
56
57
58
59
60
61
62 WB_REASON_FORKER_THREAD,
63 WB_REASON_FOREIGN_FLUSH,
64
65 WB_REASON_MAX,
66};
67
68struct wb_completion {
69 atomic_t cnt;
70 wait_queue_head_t *waitq;
71};
72
73#define __WB_COMPLETION_INIT(_waitq) \
74 (struct wb_completion){ .cnt = ATOMIC_INIT(1), .waitq = (_waitq) }
75
76
77
78
79
80
81
82
83#define WB_COMPLETION_INIT(bdi) __WB_COMPLETION_INIT(&(bdi)->wb_waitq)
84
85#define DEFINE_WB_COMPLETION(cmpl, bdi) \
86 struct wb_completion cmpl = WB_COMPLETION_INIT(bdi)
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107struct bdi_writeback {
108 struct backing_dev_info *bdi;
109
110 unsigned long state;
111 unsigned long last_old_flush;
112
113 struct list_head b_dirty;
114 struct list_head b_io;
115 struct list_head b_more_io;
116 struct list_head b_dirty_time;
117 spinlock_t list_lock;
118
119 atomic_t writeback_inodes;
120 struct percpu_counter stat[NR_WB_STAT_ITEMS];
121
122 unsigned long congested;
123
124 unsigned long bw_time_stamp;
125 unsigned long dirtied_stamp;
126 unsigned long written_stamp;
127 unsigned long write_bandwidth;
128 unsigned long avg_write_bandwidth;
129
130
131
132
133
134
135
136 unsigned long dirty_ratelimit;
137 unsigned long balanced_dirty_ratelimit;
138
139 struct fprop_local_percpu completions;
140 int dirty_exceeded;
141 enum wb_reason start_all_reason;
142
143 spinlock_t work_lock;
144 struct list_head work_list;
145 struct delayed_work dwork;
146 struct delayed_work bw_dwork;
147
148 unsigned long dirty_sleep;
149
150 struct list_head bdi_node;
151
152#ifdef CONFIG_CGROUP_WRITEBACK
153 struct percpu_ref refcnt;
154 struct fprop_local_percpu memcg_completions;
155 struct cgroup_subsys_state *memcg_css;
156 struct cgroup_subsys_state *blkcg_css;
157 struct list_head memcg_node;
158 struct list_head blkcg_node;
159 struct list_head b_attached;
160 struct list_head offline_node;
161
162 union {
163 struct work_struct release_work;
164 struct rcu_head rcu;
165 };
166#endif
167};
168
169struct backing_dev_info {
170 u64 id;
171 struct rb_node rb_node;
172 struct list_head bdi_list;
173 unsigned long ra_pages;
174 unsigned long io_pages;
175
176 struct kref refcnt;
177 unsigned int capabilities;
178 unsigned int min_ratio;
179 unsigned int max_ratio, max_prop_frac;
180
181
182
183
184
185 atomic_long_t tot_write_bandwidth;
186
187 struct bdi_writeback wb;
188 struct list_head wb_list;
189#ifdef CONFIG_CGROUP_WRITEBACK
190 struct radix_tree_root cgwb_tree;
191 struct mutex cgwb_release_mutex;
192 struct rw_semaphore wb_switch_rwsem;
193#endif
194 wait_queue_head_t wb_waitq;
195
196 struct device *dev;
197 char dev_name[64];
198 struct device *owner;
199
200 struct timer_list laptop_mode_wb_timer;
201
202#ifdef CONFIG_DEBUG_FS
203 struct dentry *debug_dir;
204#endif
205};
206
207enum {
208 BLK_RW_ASYNC = 0,
209 BLK_RW_SYNC = 1,
210};
211
212void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
213void set_bdi_congested(struct backing_dev_info *bdi, int sync);
214
215struct wb_lock_cookie {
216 bool locked;
217 unsigned long flags;
218};
219
220#ifdef CONFIG_CGROUP_WRITEBACK
221
222
223
224
225
226static inline bool wb_tryget(struct bdi_writeback *wb)
227{
228 if (wb != &wb->bdi->wb)
229 return percpu_ref_tryget(&wb->refcnt);
230 return true;
231}
232
233
234
235
236
237static inline void wb_get(struct bdi_writeback *wb)
238{
239 if (wb != &wb->bdi->wb)
240 percpu_ref_get(&wb->refcnt);
241}
242
243
244
245
246
247
248static inline void wb_put_many(struct bdi_writeback *wb, unsigned long nr)
249{
250 if (WARN_ON_ONCE(!wb->bdi)) {
251
252
253
254
255 return;
256 }
257
258 if (wb != &wb->bdi->wb)
259 percpu_ref_put_many(&wb->refcnt, nr);
260}
261
262
263
264
265
266static inline void wb_put(struct bdi_writeback *wb)
267{
268 wb_put_many(wb, 1);
269}
270
271
272
273
274
275
276
277static inline bool wb_dying(struct bdi_writeback *wb)
278{
279 return percpu_ref_is_dying(&wb->refcnt);
280}
281
282#else
283
284static inline bool wb_tryget(struct bdi_writeback *wb)
285{
286 return true;
287}
288
289static inline void wb_get(struct bdi_writeback *wb)
290{
291}
292
293static inline void wb_put(struct bdi_writeback *wb)
294{
295}
296
297static inline void wb_put_many(struct bdi_writeback *wb, unsigned long nr)
298{
299}
300
301static inline bool wb_dying(struct bdi_writeback *wb)
302{
303 return false;
304}
305
306#endif
307
308#endif
309