1
2#ifndef _BCACHE_BTREE_H
3#define _BCACHE_BTREE_H
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102#include "bset.h"
103#include "debug.h"
104
105struct btree_write {
106 atomic_t *journal;
107
108
109
110
111
112
113
114 int prio_blocked;
115};
116
117struct btree {
118
119 struct hlist_node hash;
120
121
122 BKEY_PADDED(key);
123
124 unsigned long seq;
125 struct rw_semaphore lock;
126 struct cache_set *c;
127 struct btree *parent;
128
129 struct mutex write_lock;
130
131 unsigned long flags;
132 uint16_t written;
133 uint8_t level;
134
135 struct btree_keys keys;
136
137
138 struct closure io;
139 struct semaphore io_mutex;
140
141 struct list_head list;
142 struct delayed_work work;
143
144 struct btree_write writes[2];
145 struct bio *bio;
146};
147
148
149
150
151#define BTREE_FLAG(flag) \
152static inline bool btree_node_ ## flag(struct btree *b) \
153{ return test_bit(BTREE_NODE_ ## flag, &b->flags); } \
154 \
155static inline void set_btree_node_ ## flag(struct btree *b) \
156{ set_bit(BTREE_NODE_ ## flag, &b->flags); }
157
158enum btree_flags {
159 BTREE_NODE_io_error,
160 BTREE_NODE_dirty,
161 BTREE_NODE_write_idx,
162 BTREE_NODE_journal_flush,
163};
164
165BTREE_FLAG(io_error);
166BTREE_FLAG(dirty);
167BTREE_FLAG(write_idx);
168BTREE_FLAG(journal_flush);
169
170static inline struct btree_write *btree_current_write(struct btree *b)
171{
172 return b->writes + btree_node_write_idx(b);
173}
174
175static inline struct btree_write *btree_prev_write(struct btree *b)
176{
177 return b->writes + (btree_node_write_idx(b) ^ 1);
178}
179
180static inline struct bset *btree_bset_first(struct btree *b)
181{
182 return b->keys.set->data;
183}
184
185static inline struct bset *btree_bset_last(struct btree *b)
186{
187 return bset_tree_last(&b->keys)->data;
188}
189
190static inline unsigned int bset_block_offset(struct btree *b, struct bset *i)
191{
192 return bset_sector_offset(&b->keys, i) >> b->c->block_bits;
193}
194
195static inline void set_gc_sectors(struct cache_set *c)
196{
197 atomic_set(&c->sectors_to_gc, c->cache->sb.bucket_size * c->nbuckets / 16);
198}
199
200void bkey_put(struct cache_set *c, struct bkey *k);
201
202
203
204#define for_each_cached_btree(b, c, iter) \
205 for (iter = 0; \
206 iter < ARRAY_SIZE((c)->bucket_hash); \
207 iter++) \
208 hlist_for_each_entry_rcu((b), (c)->bucket_hash + iter, hash)
209
210
211
212struct btree_op {
213
214 wait_queue_entry_t wait;
215
216
217 short lock;
218
219 unsigned int insert_collision:1;
220};
221
222struct btree_check_state;
223struct btree_check_info {
224 struct btree_check_state *state;
225 struct task_struct *thread;
226 int result;
227};
228
229#define BCH_BTR_CHKTHREAD_MAX 64
230struct btree_check_state {
231 struct cache_set *c;
232 int total_threads;
233 int key_idx;
234 spinlock_t idx_lock;
235 atomic_t started;
236 atomic_t enough;
237 wait_queue_head_t wait;
238 struct btree_check_info infos[BCH_BTR_CHKTHREAD_MAX];
239};
240
241static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
242{
243 memset(op, 0, sizeof(struct btree_op));
244 init_wait(&op->wait);
245 op->lock = write_lock_level;
246}
247
248static inline void rw_lock(bool w, struct btree *b, int level)
249{
250 w ? down_write_nested(&b->lock, level + 1)
251 : down_read_nested(&b->lock, level + 1);
252 if (w)
253 b->seq++;
254}
255
256static inline void rw_unlock(bool w, struct btree *b)
257{
258 if (w)
259 b->seq++;
260 (w ? up_write : up_read)(&b->lock);
261}
262
263void bch_btree_node_read_done(struct btree *b);
264void __bch_btree_node_write(struct btree *b, struct closure *parent);
265void bch_btree_node_write(struct btree *b, struct closure *parent);
266
267void bch_btree_set_root(struct btree *b);
268struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
269 int level, bool wait,
270 struct btree *parent);
271struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
272 struct bkey *k, int level, bool write,
273 struct btree *parent);
274
275int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
276 struct bkey *check_key);
277int bch_btree_insert(struct cache_set *c, struct keylist *keys,
278 atomic_t *journal_ref, struct bkey *replace_key);
279
280int bch_gc_thread_start(struct cache_set *c);
281void bch_initial_gc_finish(struct cache_set *c);
282void bch_moving_gc(struct cache_set *c);
283int bch_btree_check(struct cache_set *c);
284void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k);
285
286static inline void wake_up_gc(struct cache_set *c)
287{
288 wake_up(&c->gc_wait);
289}
290
291static inline void force_wake_up_gc(struct cache_set *c)
292{
293
294
295
296
297
298
299
300
301
302
303
304
305 atomic_set(&c->sectors_to_gc, -1);
306 wake_up_gc(c);
307}
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327#define bcache_btree(fn, key, b, op, ...) \
328({ \
329 int _r, l = (b)->level - 1; \
330 bool _w = l <= (op)->lock; \
331 struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \
332 _w, b); \
333 if (!IS_ERR(_child)) { \
334 _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
335 rw_unlock(_w, _child); \
336 } else \
337 _r = PTR_ERR(_child); \
338 _r; \
339})
340
341
342
343
344
345
346
347#define bcache_btree_root(fn, c, op, ...) \
348({ \
349 int _r = -EINTR; \
350 do { \
351 struct btree *_b = (c)->root; \
352 bool _w = insert_lock(op, _b); \
353 rw_lock(_w, _b, _b->level); \
354 if (_b == (c)->root && \
355 _w == insert_lock(op, _b)) { \
356 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
357 } \
358 rw_unlock(_w, _b); \
359 bch_cannibalize_unlock(c); \
360 if (_r == -EINTR) \
361 schedule(); \
362 } while (_r == -EINTR); \
363 \
364 finish_wait(&(c)->btree_cache_wait, &(op)->wait); \
365 _r; \
366})
367
368#define MAP_DONE 0
369#define MAP_CONTINUE 1
370
371#define MAP_ALL_NODES 0
372#define MAP_LEAF_NODES 1
373
374#define MAP_END_KEY 1
375
376typedef int (btree_map_nodes_fn)(struct btree_op *b_op, struct btree *b);
377int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
378 struct bkey *from, btree_map_nodes_fn *fn, int flags);
379
380static inline int bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
381 struct bkey *from, btree_map_nodes_fn *fn)
382{
383 return __bch_btree_map_nodes(op, c, from, fn, MAP_ALL_NODES);
384}
385
386static inline int bch_btree_map_leaf_nodes(struct btree_op *op,
387 struct cache_set *c,
388 struct bkey *from,
389 btree_map_nodes_fn *fn)
390{
391 return __bch_btree_map_nodes(op, c, from, fn, MAP_LEAF_NODES);
392}
393
394typedef int (btree_map_keys_fn)(struct btree_op *op, struct btree *b,
395 struct bkey *k);
396int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
397 struct bkey *from, btree_map_keys_fn *fn, int flags);
398int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
399 struct bkey *from, btree_map_keys_fn *fn,
400 int flags);
401
402typedef bool (keybuf_pred_fn)(struct keybuf *buf, struct bkey *k);
403
404void bch_keybuf_init(struct keybuf *buf);
405void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
406 struct bkey *end, keybuf_pred_fn *pred);
407bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
408 struct bkey *end);
409void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w);
410struct keybuf_key *bch_keybuf_next(struct keybuf *buf);
411struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
412 struct keybuf *buf,
413 struct bkey *end,
414 keybuf_pred_fn *pred);
415void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats);
416#endif
417