1
2#ifndef _BCACHE_BTREE_H
3#define _BCACHE_BTREE_H
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102#include "bset.h"
103#include "debug.h"
104
105struct btree_write {
106 atomic_t *journal;
107
108
109
110
111
112
113
114 int prio_blocked;
115};
116
117struct btree {
118
119 struct hlist_node hash;
120
121
122 BKEY_PADDED(key);
123
124
125 unsigned long accessed;
126 unsigned long seq;
127 struct rw_semaphore lock;
128 struct cache_set *c;
129 struct btree *parent;
130
131 struct mutex write_lock;
132
133 unsigned long flags;
134 uint16_t written;
135 uint8_t level;
136
137 struct btree_keys keys;
138
139
140 struct closure io;
141 struct semaphore io_mutex;
142
143 struct list_head list;
144 struct delayed_work work;
145
146 struct btree_write writes[2];
147 struct bio *bio;
148};
149
150#define BTREE_FLAG(flag) \
151static inline bool btree_node_ ## flag(struct btree *b) \
152{ return test_bit(BTREE_NODE_ ## flag, &b->flags); } \
153 \
154static inline void set_btree_node_ ## flag(struct btree *b) \
155{ set_bit(BTREE_NODE_ ## flag, &b->flags); }
156
157enum btree_flags {
158 BTREE_NODE_io_error,
159 BTREE_NODE_dirty,
160 BTREE_NODE_write_idx,
161 BTREE_NODE_journal_flush,
162};
163
164BTREE_FLAG(io_error);
165BTREE_FLAG(dirty);
166BTREE_FLAG(write_idx);
167BTREE_FLAG(journal_flush);
168
169static inline struct btree_write *btree_current_write(struct btree *b)
170{
171 return b->writes + btree_node_write_idx(b);
172}
173
174static inline struct btree_write *btree_prev_write(struct btree *b)
175{
176 return b->writes + (btree_node_write_idx(b) ^ 1);
177}
178
179static inline struct bset *btree_bset_first(struct btree *b)
180{
181 return b->keys.set->data;
182}
183
184static inline struct bset *btree_bset_last(struct btree *b)
185{
186 return bset_tree_last(&b->keys)->data;
187}
188
189static inline unsigned int bset_block_offset(struct btree *b, struct bset *i)
190{
191 return bset_sector_offset(&b->keys, i) >> b->c->block_bits;
192}
193
194static inline void set_gc_sectors(struct cache_set *c)
195{
196 atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16);
197}
198
199void bkey_put(struct cache_set *c, struct bkey *k);
200
201
202
203#define for_each_cached_btree(b, c, iter) \
204 for (iter = 0; \
205 iter < ARRAY_SIZE((c)->bucket_hash); \
206 iter++) \
207 hlist_for_each_entry_rcu((b), (c)->bucket_hash + iter, hash)
208
209
210
211struct btree_op {
212
213 wait_queue_entry_t wait;
214
215
216 short lock;
217
218 unsigned int insert_collision:1;
219};
220
221static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
222{
223 memset(op, 0, sizeof(struct btree_op));
224 init_wait(&op->wait);
225 op->lock = write_lock_level;
226}
227
228static inline void rw_lock(bool w, struct btree *b, int level)
229{
230 w ? down_write_nested(&b->lock, level + 1)
231 : down_read_nested(&b->lock, level + 1);
232 if (w)
233 b->seq++;
234}
235
236static inline void rw_unlock(bool w, struct btree *b)
237{
238 if (w)
239 b->seq++;
240 (w ? up_write : up_read)(&b->lock);
241}
242
243void bch_btree_node_read_done(struct btree *b);
244void __bch_btree_node_write(struct btree *b, struct closure *parent);
245void bch_btree_node_write(struct btree *b, struct closure *parent);
246
247void bch_btree_set_root(struct btree *b);
248struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
249 int level, bool wait,
250 struct btree *parent);
251struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
252 struct bkey *k, int level, bool write,
253 struct btree *parent);
254
255int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
256 struct bkey *check_key);
257int bch_btree_insert(struct cache_set *c, struct keylist *keys,
258 atomic_t *journal_ref, struct bkey *replace_key);
259
260int bch_gc_thread_start(struct cache_set *c);
261void bch_initial_gc_finish(struct cache_set *c);
262void bch_moving_gc(struct cache_set *c);
263int bch_btree_check(struct cache_set *c);
264void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k);
265
266static inline void wake_up_gc(struct cache_set *c)
267{
268 wake_up(&c->gc_wait);
269}
270
271static inline void force_wake_up_gc(struct cache_set *c)
272{
273
274
275
276
277
278
279
280
281
282
283
284
285 atomic_set(&c->sectors_to_gc, -1);
286 wake_up_gc(c);
287}
288
289#define MAP_DONE 0
290#define MAP_CONTINUE 1
291
292#define MAP_ALL_NODES 0
293#define MAP_LEAF_NODES 1
294
295#define MAP_END_KEY 1
296
297typedef int (btree_map_nodes_fn)(struct btree_op *b_op, struct btree *b);
298int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
299 struct bkey *from, btree_map_nodes_fn *fn, int flags);
300
301static inline int bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
302 struct bkey *from, btree_map_nodes_fn *fn)
303{
304 return __bch_btree_map_nodes(op, c, from, fn, MAP_ALL_NODES);
305}
306
307static inline int bch_btree_map_leaf_nodes(struct btree_op *op,
308 struct cache_set *c,
309 struct bkey *from,
310 btree_map_nodes_fn *fn)
311{
312 return __bch_btree_map_nodes(op, c, from, fn, MAP_LEAF_NODES);
313}
314
315typedef int (btree_map_keys_fn)(struct btree_op *op, struct btree *b,
316 struct bkey *k);
317int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
318 struct bkey *from, btree_map_keys_fn *fn, int flags);
319
320typedef bool (keybuf_pred_fn)(struct keybuf *buf, struct bkey *k);
321
322void bch_keybuf_init(struct keybuf *buf);
323void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
324 struct bkey *end, keybuf_pred_fn *pred);
325bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
326 struct bkey *end);
327void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w);
328struct keybuf_key *bch_keybuf_next(struct keybuf *buf);
329struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
330 struct keybuf *buf,
331 struct bkey *end,
332 keybuf_pred_fn *pred);
333void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats);
334#endif
335