1
2
3#ifndef BTRFS_EXTENT_IO_TREE_H
4#define BTRFS_EXTENT_IO_TREE_H
5
6struct extent_changeset;
7struct io_failure_record;
8
9
10#define EXTENT_DIRTY (1U << 0)
11#define EXTENT_UPTODATE (1U << 1)
12#define EXTENT_LOCKED (1U << 2)
13#define EXTENT_NEW (1U << 3)
14#define EXTENT_DELALLOC (1U << 4)
15#define EXTENT_DEFRAG (1U << 5)
16#define EXTENT_BOUNDARY (1U << 6)
17#define EXTENT_NODATASUM (1U << 7)
18#define EXTENT_CLEAR_META_RESV (1U << 8)
19#define EXTENT_NEED_WAIT (1U << 9)
20#define EXTENT_DAMAGED (1U << 10)
21#define EXTENT_NORESERVE (1U << 11)
22#define EXTENT_QGROUP_RESERVED (1U << 12)
23#define EXTENT_CLEAR_DATA_RESV (1U << 13)
24#define EXTENT_DELALLOC_NEW (1U << 14)
25#define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \
26 EXTENT_CLEAR_DATA_RESV)
27#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING)
28
29
30
31
32
33
34
35#define CHUNK_ALLOCATED EXTENT_DIRTY
36#define CHUNK_TRIMMED EXTENT_DEFRAG
37
38enum {
39 IO_TREE_FS_PINNED_EXTENTS,
40 IO_TREE_FS_EXCLUDED_EXTENTS,
41 IO_TREE_INODE_IO,
42 IO_TREE_INODE_IO_FAILURE,
43 IO_TREE_RELOC_BLOCKS,
44 IO_TREE_TRANS_DIRTY_PAGES,
45 IO_TREE_ROOT_DIRTY_LOG_PAGES,
46 IO_TREE_INODE_FILE_EXTENT,
47 IO_TREE_SELFTEST,
48};
49
50struct extent_io_tree {
51 struct rb_root state;
52 struct btrfs_fs_info *fs_info;
53 void *private_data;
54 u64 dirty_bytes;
55 bool track_uptodate;
56
57
58 u8 owner;
59
60 spinlock_t lock;
61 const struct extent_io_ops *ops;
62};
63
64struct extent_state {
65 u64 start;
66 u64 end;
67 struct rb_node rb_node;
68
69
70 wait_queue_head_t wq;
71 refcount_t refs;
72 unsigned state;
73
74 struct io_failure_record *failrec;
75
76#ifdef CONFIG_BTRFS_DEBUG
77 struct list_head leak_list;
78#endif
79};
80
81int __init extent_state_cache_init(void);
82void __cold extent_state_cache_exit(void);
83
84void extent_io_tree_init(struct btrfs_fs_info *fs_info,
85 struct extent_io_tree *tree, unsigned int owner,
86 void *private_data);
87void extent_io_tree_release(struct extent_io_tree *tree);
88
89int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
90 struct extent_state **cached);
91
92static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
93{
94 return lock_extent_bits(tree, start, end, NULL);
95}
96
97int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
98
99int __init extent_io_init(void);
100void __cold extent_io_exit(void);
101
102u64 count_range_bits(struct extent_io_tree *tree,
103 u64 *start, u64 search_end,
104 u64 max_bytes, unsigned bits, int contig);
105
106void free_extent_state(struct extent_state *state);
107int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
108 unsigned bits, int filled,
109 struct extent_state *cached_state);
110int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
111 unsigned bits, struct extent_changeset *changeset);
112int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
113 unsigned bits, int wake, int delete,
114 struct extent_state **cached);
115int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
116 unsigned bits, int wake, int delete,
117 struct extent_state **cached, gfp_t mask,
118 struct extent_changeset *changeset);
119
120static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
121{
122 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL);
123}
124
125static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
126 u64 end, struct extent_state **cached)
127{
128 return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
129 GFP_NOFS, NULL);
130}
131
132static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree,
133 u64 start, u64 end, struct extent_state **cached)
134{
135 return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
136 GFP_ATOMIC, NULL);
137}
138
139static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
140 u64 end, unsigned bits)
141{
142 int wake = 0;
143
144 if (bits & EXTENT_LOCKED)
145 wake = 1;
146
147 return clear_extent_bit(tree, start, end, bits, wake, 0, NULL);
148}
149
150int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
151 unsigned bits, struct extent_changeset *changeset);
152int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
153 unsigned bits, u64 *failed_start,
154 struct extent_state **cached_state, gfp_t mask);
155int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
156 unsigned bits);
157
158static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
159 u64 end, unsigned bits)
160{
161 return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS);
162}
163
164static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
165 u64 end, struct extent_state **cached_state)
166{
167 return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
168 cached_state, GFP_NOFS, NULL);
169}
170
171static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
172 u64 end, gfp_t mask)
173{
174 return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
175 NULL, mask);
176}
177
178static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
179 u64 end, struct extent_state **cached)
180{
181 return clear_extent_bit(tree, start, end,
182 EXTENT_DIRTY | EXTENT_DELALLOC |
183 EXTENT_DO_ACCOUNTING, 0, 0, cached);
184}
185
186int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
187 unsigned bits, unsigned clear_bits,
188 struct extent_state **cached_state);
189
190static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
191 u64 end, unsigned int extra_bits,
192 struct extent_state **cached_state)
193{
194 return set_extent_bit(tree, start, end,
195 EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits,
196 NULL, cached_state, GFP_NOFS);
197}
198
199static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
200 u64 end, struct extent_state **cached_state)
201{
202 return set_extent_bit(tree, start, end,
203 EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
204 NULL, cached_state, GFP_NOFS);
205}
206
207static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
208 u64 end)
209{
210 return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL,
211 GFP_NOFS);
212}
213
214static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
215 u64 end, struct extent_state **cached_state, gfp_t mask)
216{
217 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
218 cached_state, mask);
219}
220
221int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
222 u64 *start_ret, u64 *end_ret, unsigned bits,
223 struct extent_state **cached_state);
224void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
225 u64 *start_ret, u64 *end_ret, unsigned bits);
226int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
227 u64 *start_ret, u64 *end_ret, unsigned bits);
228int extent_invalidatepage(struct extent_io_tree *tree,
229 struct page *page, unsigned long offset);
230bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
231 u64 *end, u64 max_bytes,
232 struct extent_state **cached_state);
233
234
235int get_state_failrec(struct extent_io_tree *tree, u64 start,
236 struct io_failure_record **failrec);
237int set_state_failrec(struct extent_io_tree *tree, u64 start,
238 struct io_failure_record *failrec);
239void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start,
240 u64 end);
241int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
242 struct io_failure_record **failrec_ret);
243int free_io_failure(struct extent_io_tree *failure_tree,
244 struct extent_io_tree *io_tree,
245 struct io_failure_record *rec);
246int clean_io_failure(struct btrfs_fs_info *fs_info,
247 struct extent_io_tree *failure_tree,
248 struct extent_io_tree *io_tree, u64 start,
249 struct page *page, u64 ino, unsigned int pg_offset);
250
251#endif
252