1
2
3#include <linux/slab.h>
4#include "ctree.h"
5#include "subpage.h"
6
7int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
8 struct page *page, enum btrfs_subpage_type type)
9{
10 struct btrfs_subpage *subpage = NULL;
11 int ret;
12
13
14
15
16
17 if (page->mapping)
18 ASSERT(PageLocked(page));
19
20 if (fs_info->sectorsize == PAGE_SIZE || PagePrivate(page))
21 return 0;
22
23 ret = btrfs_alloc_subpage(fs_info, &subpage, type);
24 if (ret < 0)
25 return ret;
26 attach_page_private(page, subpage);
27 return 0;
28}
29
30void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info,
31 struct page *page)
32{
33 struct btrfs_subpage *subpage;
34
35
36 if (fs_info->sectorsize == PAGE_SIZE || !PagePrivate(page))
37 return;
38
39 subpage = (struct btrfs_subpage *)detach_page_private(page);
40 ASSERT(subpage);
41 btrfs_free_subpage(subpage);
42}
43
44int btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
45 struct btrfs_subpage **ret,
46 enum btrfs_subpage_type type)
47{
48 if (fs_info->sectorsize == PAGE_SIZE)
49 return 0;
50
51 *ret = kzalloc(sizeof(struct btrfs_subpage), GFP_NOFS);
52 if (!*ret)
53 return -ENOMEM;
54 spin_lock_init(&(*ret)->lock);
55 if (type == BTRFS_SUBPAGE_METADATA)
56 atomic_set(&(*ret)->eb_refs, 0);
57 else
58 atomic_set(&(*ret)->readers, 0);
59 return 0;
60}
61
62void btrfs_free_subpage(struct btrfs_subpage *subpage)
63{
64 kfree(subpage);
65}
66
67
68
69
70
71
72
73
74
75
76void btrfs_page_inc_eb_refs(const struct btrfs_fs_info *fs_info,
77 struct page *page)
78{
79 struct btrfs_subpage *subpage;
80
81 if (fs_info->sectorsize == PAGE_SIZE)
82 return;
83
84 ASSERT(PagePrivate(page) && page->mapping);
85 lockdep_assert_held(&page->mapping->private_lock);
86
87 subpage = (struct btrfs_subpage *)page->private;
88 atomic_inc(&subpage->eb_refs);
89}
90
91void btrfs_page_dec_eb_refs(const struct btrfs_fs_info *fs_info,
92 struct page *page)
93{
94 struct btrfs_subpage *subpage;
95
96 if (fs_info->sectorsize == PAGE_SIZE)
97 return;
98
99 ASSERT(PagePrivate(page) && page->mapping);
100 lockdep_assert_held(&page->mapping->private_lock);
101
102 subpage = (struct btrfs_subpage *)page->private;
103 ASSERT(atomic_read(&subpage->eb_refs));
104 atomic_dec(&subpage->eb_refs);
105}
106
107static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
108 struct page *page, u64 start, u32 len)
109{
110
111 ASSERT(PagePrivate(page) && page->private);
112 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
113 IS_ALIGNED(len, fs_info->sectorsize));
114
115
116
117
118 if (page->mapping)
119 ASSERT(page_offset(page) <= start &&
120 start + len <= page_offset(page) + PAGE_SIZE);
121}
122
123void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info,
124 struct page *page, u64 start, u32 len)
125{
126 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
127 const int nbits = len >> fs_info->sectorsize_bits;
128 int ret;
129
130 btrfs_subpage_assert(fs_info, page, start, len);
131
132 ret = atomic_add_return(nbits, &subpage->readers);
133 ASSERT(ret == nbits);
134}
135
136void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
137 struct page *page, u64 start, u32 len)
138{
139 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
140 const int nbits = len >> fs_info->sectorsize_bits;
141
142 btrfs_subpage_assert(fs_info, page, start, len);
143 ASSERT(atomic_read(&subpage->readers) >= nbits);
144 if (atomic_sub_and_test(nbits, &subpage->readers))
145 unlock_page(page);
146}
147
148
149
150
151
152
153static u16 btrfs_subpage_calc_bitmap(const struct btrfs_fs_info *fs_info,
154 struct page *page, u64 start, u32 len)
155{
156 const int bit_start = offset_in_page(start) >> fs_info->sectorsize_bits;
157 const int nbits = len >> fs_info->sectorsize_bits;
158
159 btrfs_subpage_assert(fs_info, page, start, len);
160
161
162
163
164
165
166 return (u16)(((1UL << nbits) - 1) << bit_start);
167}
168
169void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info,
170 struct page *page, u64 start, u32 len)
171{
172 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
173 const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len);
174 unsigned long flags;
175
176 spin_lock_irqsave(&subpage->lock, flags);
177 subpage->uptodate_bitmap |= tmp;
178 if (subpage->uptodate_bitmap == U16_MAX)
179 SetPageUptodate(page);
180 spin_unlock_irqrestore(&subpage->lock, flags);
181}
182
183void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info,
184 struct page *page, u64 start, u32 len)
185{
186 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
187 const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len);
188 unsigned long flags;
189
190 spin_lock_irqsave(&subpage->lock, flags);
191 subpage->uptodate_bitmap &= ~tmp;
192 ClearPageUptodate(page);
193 spin_unlock_irqrestore(&subpage->lock, flags);
194}
195
196void btrfs_subpage_set_error(const struct btrfs_fs_info *fs_info,
197 struct page *page, u64 start, u32 len)
198{
199 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
200 const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len);
201 unsigned long flags;
202
203 spin_lock_irqsave(&subpage->lock, flags);
204 subpage->error_bitmap |= tmp;
205 SetPageError(page);
206 spin_unlock_irqrestore(&subpage->lock, flags);
207}
208
209void btrfs_subpage_clear_error(const struct btrfs_fs_info *fs_info,
210 struct page *page, u64 start, u32 len)
211{
212 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
213 const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len);
214 unsigned long flags;
215
216 spin_lock_irqsave(&subpage->lock, flags);
217 subpage->error_bitmap &= ~tmp;
218 if (subpage->error_bitmap == 0)
219 ClearPageError(page);
220 spin_unlock_irqrestore(&subpage->lock, flags);
221}
222
223
224
225
226
227#define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name) \
228bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \
229 struct page *page, u64 start, u32 len) \
230{ \
231 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; \
232 const u16 tmp = btrfs_subpage_calc_bitmap(fs_info, page, start, len); \
233 unsigned long flags; \
234 bool ret; \
235 \
236 spin_lock_irqsave(&subpage->lock, flags); \
237 ret = ((subpage->name##_bitmap & tmp) == tmp); \
238 spin_unlock_irqrestore(&subpage->lock, flags); \
239 return ret; \
240}
241IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate);
242IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(error);
243
244
245
246
247
248
249#define IMPLEMENT_BTRFS_PAGE_OPS(name, set_page_func, clear_page_func, \
250 test_page_func) \
251void btrfs_page_set_##name(const struct btrfs_fs_info *fs_info, \
252 struct page *page, u64 start, u32 len) \
253{ \
254 if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) { \
255 set_page_func(page); \
256 return; \
257 } \
258 btrfs_subpage_set_##name(fs_info, page, start, len); \
259} \
260void btrfs_page_clear_##name(const struct btrfs_fs_info *fs_info, \
261 struct page *page, u64 start, u32 len) \
262{ \
263 if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) { \
264 clear_page_func(page); \
265 return; \
266 } \
267 btrfs_subpage_clear_##name(fs_info, page, start, len); \
268} \
269bool btrfs_page_test_##name(const struct btrfs_fs_info *fs_info, \
270 struct page *page, u64 start, u32 len) \
271{ \
272 if (unlikely(!fs_info) || fs_info->sectorsize == PAGE_SIZE) \
273 return test_page_func(page); \
274 return btrfs_subpage_test_##name(fs_info, page, start, len); \
275}
276IMPLEMENT_BTRFS_PAGE_OPS(uptodate, SetPageUptodate, ClearPageUptodate,
277 PageUptodate);
278IMPLEMENT_BTRFS_PAGE_OPS(error, SetPageError, ClearPageError, PageError);
279