1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include "bcache.h"
25#include "btree.h"
26#include "debug.h"
27#include "extents.h"
28
29#include <linux/slab.h>
30#include <linux/bitops.h>
31#include <linux/hash.h>
32#include <linux/kthread.h>
33#include <linux/prefetch.h>
34#include <linux/random.h>
35#include <linux/rcupdate.h>
36#include <linux/sched/clock.h>
37#include <linux/rculist.h>
38
39#include <trace/events/bcache.h>
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91#define MAX_NEED_GC 64
92#define MAX_SAVE_PRIO 72
93
94#define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
95
96#define PTR_HASH(c, k) \
97 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
98
99#define insert_lock(s, b) ((b)->level <= (s)->lock)
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119#define btree(fn, key, b, op, ...) \
120({ \
121 int _r, l = (b)->level - 1; \
122 bool _w = l <= (op)->lock; \
123 struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \
124 _w, b); \
125 if (!IS_ERR(_child)) { \
126 _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
127 rw_unlock(_w, _child); \
128 } else \
129 _r = PTR_ERR(_child); \
130 _r; \
131})
132
133
134
135
136
137
138
139#define btree_root(fn, c, op, ...) \
140({ \
141 int _r = -EINTR; \
142 do { \
143 struct btree *_b = (c)->root; \
144 bool _w = insert_lock(op, _b); \
145 rw_lock(_w, _b, _b->level); \
146 if (_b == (c)->root && \
147 _w == insert_lock(op, _b)) { \
148 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
149 } \
150 rw_unlock(_w, _b); \
151 bch_cannibalize_unlock(c); \
152 if (_r == -EINTR) \
153 schedule(); \
154 } while (_r == -EINTR); \
155 \
156 finish_wait(&(c)->btree_cache_wait, &(op)->wait); \
157 _r; \
158})
159
160static inline struct bset *write_block(struct btree *b)
161{
162 return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
163}
164
165static void bch_btree_init_next(struct btree *b)
166{
167
168 if (b->level && b->keys.nsets)
169 bch_btree_sort(&b->keys, &b->c->sort);
170 else
171 bch_btree_sort_lazy(&b->keys, &b->c->sort);
172
173 if (b->written < btree_blocks(b))
174 bch_bset_init_next(&b->keys, write_block(b),
175 bset_magic(&b->c->sb));
176
177}
178
179
180
181void bkey_put(struct cache_set *c, struct bkey *k)
182{
183 unsigned i;
184
185 for (i = 0; i < KEY_PTRS(k); i++)
186 if (ptr_available(c, k, i))
187 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
188}
189
190
191
192static uint64_t btree_csum_set(struct btree *b, struct bset *i)
193{
194 uint64_t crc = b->key.ptr[0];
195 void *data = (void *) i + 8, *end = bset_bkey_last(i);
196
197 crc = bch_crc64_update(crc, data, end - data);
198 return crc ^ 0xffffffffffffffffULL;
199}
200
201void bch_btree_node_read_done(struct btree *b)
202{
203 const char *err = "bad btree header";
204 struct bset *i = btree_bset_first(b);
205 struct btree_iter *iter;
206
207 iter = mempool_alloc(b->c->fill_iter, GFP_NOIO);
208 iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
209 iter->used = 0;
210
211#ifdef CONFIG_BCACHE_DEBUG
212 iter->b = &b->keys;
213#endif
214
215 if (!i->seq)
216 goto err;
217
218 for (;
219 b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
220 i = write_block(b)) {
221 err = "unsupported bset version";
222 if (i->version > BCACHE_BSET_VERSION)
223 goto err;
224
225 err = "bad btree header";
226 if (b->written + set_blocks(i, block_bytes(b->c)) >
227 btree_blocks(b))
228 goto err;
229
230 err = "bad magic";
231 if (i->magic != bset_magic(&b->c->sb))
232 goto err;
233
234 err = "bad checksum";
235 switch (i->version) {
236 case 0:
237 if (i->csum != csum_set(i))
238 goto err;
239 break;
240 case BCACHE_BSET_VERSION:
241 if (i->csum != btree_csum_set(b, i))
242 goto err;
243 break;
244 }
245
246 err = "empty set";
247 if (i != b->keys.set[0].data && !i->keys)
248 goto err;
249
250 bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
251
252 b->written += set_blocks(i, block_bytes(b->c));
253 }
254
255 err = "corrupted btree";
256 for (i = write_block(b);
257 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
258 i = ((void *) i) + block_bytes(b->c))
259 if (i->seq == b->keys.set[0].data->seq)
260 goto err;
261
262 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
263
264 i = b->keys.set[0].data;
265 err = "short btree key";
266 if (b->keys.set[0].size &&
267 bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
268 goto err;
269
270 if (b->written < btree_blocks(b))
271 bch_bset_init_next(&b->keys, write_block(b),
272 bset_magic(&b->c->sb));
273out:
274 mempool_free(iter, b->c->fill_iter);
275 return;
276err:
277 set_btree_node_io_error(b);
278 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
279 err, PTR_BUCKET_NR(b->c, &b->key, 0),
280 bset_block_offset(b, i), i->keys);
281 goto out;
282}
283
284static void btree_node_read_endio(struct bio *bio)
285{
286 struct closure *cl = bio->bi_private;
287 closure_put(cl);
288}
289
290static void bch_btree_node_read(struct btree *b)
291{
292 uint64_t start_time = local_clock();
293 struct closure cl;
294 struct bio *bio;
295
296 trace_bcache_btree_read(b);
297
298 closure_init_stack(&cl);
299
300 bio = bch_bbio_alloc(b->c);
301 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
302 bio->bi_end_io = btree_node_read_endio;
303 bio->bi_private = &cl;
304 bio->bi_opf = REQ_OP_READ | REQ_META;
305
306 bch_bio_map(bio, b->keys.set[0].data);
307
308 bch_submit_bbio(bio, b->c, &b->key, 0);
309 closure_sync(&cl);
310
311 if (bio->bi_status)
312 set_btree_node_io_error(b);
313
314 bch_bbio_free(bio, b->c);
315
316 if (btree_node_io_error(b))
317 goto err;
318
319 bch_btree_node_read_done(b);
320 bch_time_stats_update(&b->c->btree_read_time, start_time);
321
322 return;
323err:
324 bch_cache_set_error(b->c, "io error reading bucket %zu",
325 PTR_BUCKET_NR(b->c, &b->key, 0));
326}
327
328static void btree_complete_write(struct btree *b, struct btree_write *w)
329{
330 if (w->prio_blocked &&
331 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
332 wake_up_allocators(b->c);
333
334 if (w->journal) {
335 atomic_dec_bug(w->journal);
336 __closure_wake_up(&b->c->journal.wait);
337 }
338
339 w->prio_blocked = 0;
340 w->journal = NULL;
341}
342
343static void btree_node_write_unlock(struct closure *cl)
344{
345 struct btree *b = container_of(cl, struct btree, io);
346
347 up(&b->io_mutex);
348}
349
350static void __btree_node_write_done(struct closure *cl)
351{
352 struct btree *b = container_of(cl, struct btree, io);
353 struct btree_write *w = btree_prev_write(b);
354
355 bch_bbio_free(b->bio, b->c);
356 b->bio = NULL;
357 btree_complete_write(b, w);
358
359 if (btree_node_dirty(b))
360 schedule_delayed_work(&b->work, 30 * HZ);
361
362 closure_return_with_destructor(cl, btree_node_write_unlock);
363}
364
365static void btree_node_write_done(struct closure *cl)
366{
367 struct btree *b = container_of(cl, struct btree, io);
368
369 bio_free_pages(b->bio);
370 __btree_node_write_done(cl);
371}
372
373static void btree_node_write_endio(struct bio *bio)
374{
375 struct closure *cl = bio->bi_private;
376 struct btree *b = container_of(cl, struct btree, io);
377
378 if (bio->bi_status)
379 set_btree_node_io_error(b);
380
381 bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree");
382 closure_put(cl);
383}
384
385static void do_btree_node_write(struct btree *b)
386{
387 struct closure *cl = &b->io;
388 struct bset *i = btree_bset_last(b);
389 BKEY_PADDED(key) k;
390
391 i->version = BCACHE_BSET_VERSION;
392 i->csum = btree_csum_set(b, i);
393
394 BUG_ON(b->bio);
395 b->bio = bch_bbio_alloc(b->c);
396
397 b->bio->bi_end_io = btree_node_write_endio;
398 b->bio->bi_private = cl;
399 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
400 b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA;
401 bch_bio_map(b->bio, i);
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418 bkey_copy(&k.key, &b->key);
419 SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
420 bset_sector_offset(&b->keys, i));
421
422 if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
423 int j;
424 struct bio_vec *bv;
425 void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
426
427 bio_for_each_segment_all(bv, b->bio, j)
428 memcpy(page_address(bv->bv_page),
429 base + j * PAGE_SIZE, PAGE_SIZE);
430
431 bch_submit_bbio(b->bio, b->c, &k.key, 0);
432
433 continue_at(cl, btree_node_write_done, NULL);
434 } else {
435
436 b->bio->bi_vcnt = 0;
437 bch_bio_map(b->bio, i);
438
439 bch_submit_bbio(b->bio, b->c, &k.key, 0);
440
441 closure_sync(cl);
442 continue_at_nobarrier(cl, __btree_node_write_done, NULL);
443 }
444}
445
446void __bch_btree_node_write(struct btree *b, struct closure *parent)
447{
448 struct bset *i = btree_bset_last(b);
449
450 lockdep_assert_held(&b->write_lock);
451
452 trace_bcache_btree_write(b);
453
454 BUG_ON(current->bio_list);
455 BUG_ON(b->written >= btree_blocks(b));
456 BUG_ON(b->written && !i->keys);
457 BUG_ON(btree_bset_first(b)->seq != i->seq);
458 bch_check_keys(&b->keys, "writing");
459
460 cancel_delayed_work(&b->work);
461
462
463 down(&b->io_mutex);
464 closure_init(&b->io, parent ?: &b->c->cl);
465
466 clear_bit(BTREE_NODE_dirty, &b->flags);
467 change_bit(BTREE_NODE_write_idx, &b->flags);
468
469 do_btree_node_write(b);
470
471 atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
472 &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
473
474 b->written += set_blocks(i, block_bytes(b->c));
475}
476
477void bch_btree_node_write(struct btree *b, struct closure *parent)
478{
479 unsigned nsets = b->keys.nsets;
480
481 lockdep_assert_held(&b->lock);
482
483 __bch_btree_node_write(b, parent);
484
485
486
487
488
489 if (nsets && !b->keys.nsets)
490 bch_btree_verify(b);
491
492 bch_btree_init_next(b);
493}
494
495static void bch_btree_node_write_sync(struct btree *b)
496{
497 struct closure cl;
498
499 closure_init_stack(&cl);
500
501 mutex_lock(&b->write_lock);
502 bch_btree_node_write(b, &cl);
503 mutex_unlock(&b->write_lock);
504
505 closure_sync(&cl);
506}
507
508static void btree_node_write_work(struct work_struct *w)
509{
510 struct btree *b = container_of(to_delayed_work(w), struct btree, work);
511
512 mutex_lock(&b->write_lock);
513 if (btree_node_dirty(b))
514 __bch_btree_node_write(b, NULL);
515 mutex_unlock(&b->write_lock);
516}
517
518static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
519{
520 struct bset *i = btree_bset_last(b);
521 struct btree_write *w = btree_current_write(b);
522
523 lockdep_assert_held(&b->write_lock);
524
525 BUG_ON(!b->written);
526 BUG_ON(!i->keys);
527
528 if (!btree_node_dirty(b))
529 schedule_delayed_work(&b->work, 30 * HZ);
530
531 set_btree_node_dirty(b);
532
533 if (journal_ref) {
534 if (w->journal &&
535 journal_pin_cmp(b->c, w->journal, journal_ref)) {
536 atomic_dec_bug(w->journal);
537 w->journal = NULL;
538 }
539
540 if (!w->journal) {
541 w->journal = journal_ref;
542 atomic_inc(w->journal);
543 }
544 }
545
546
547 if (set_bytes(i) > PAGE_SIZE - 48 &&
548 !current->bio_list)
549 bch_btree_node_write(b, NULL);
550}
551
552
553
554
555
556
557#define mca_reserve(c) (((c->root && c->root->level) \
558 ? c->root->level : 1) * 8 + 16)
559#define mca_can_free(c) \
560 max_t(int, 0, c->btree_cache_used - mca_reserve(c))
561
562static void mca_data_free(struct btree *b)
563{
564 BUG_ON(b->io_mutex.count != 1);
565
566 bch_btree_keys_free(&b->keys);
567
568 b->c->btree_cache_used--;
569 list_move(&b->list, &b->c->btree_cache_freed);
570}
571
572static void mca_bucket_free(struct btree *b)
573{
574 BUG_ON(btree_node_dirty(b));
575
576 b->key.ptr[0] = 0;
577 hlist_del_init_rcu(&b->hash);
578 list_move(&b->list, &b->c->btree_cache_freeable);
579}
580
581static unsigned btree_order(struct bkey *k)
582{
583 return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
584}
585
586static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
587{
588 if (!bch_btree_keys_alloc(&b->keys,
589 max_t(unsigned,
590 ilog2(b->c->btree_pages),
591 btree_order(k)),
592 gfp)) {
593 b->c->btree_cache_used++;
594 list_move(&b->list, &b->c->btree_cache);
595 } else {
596 list_move(&b->list, &b->c->btree_cache_freed);
597 }
598}
599
600static struct btree *mca_bucket_alloc(struct cache_set *c,
601 struct bkey *k, gfp_t gfp)
602{
603 struct btree *b = kzalloc(sizeof(struct btree), gfp);
604 if (!b)
605 return NULL;
606
607 init_rwsem(&b->lock);
608 lockdep_set_novalidate_class(&b->lock);
609 mutex_init(&b->write_lock);
610 lockdep_set_novalidate_class(&b->write_lock);
611 INIT_LIST_HEAD(&b->list);
612 INIT_DELAYED_WORK(&b->work, btree_node_write_work);
613 b->c = c;
614 sema_init(&b->io_mutex, 1);
615
616 mca_data_alloc(b, k, gfp);
617 return b;
618}
619
620static int mca_reap(struct btree *b, unsigned min_order, bool flush)
621{
622 struct closure cl;
623
624 closure_init_stack(&cl);
625 lockdep_assert_held(&b->c->bucket_lock);
626
627 if (!down_write_trylock(&b->lock))
628 return -ENOMEM;
629
630 BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
631
632 if (b->keys.page_order < min_order)
633 goto out_unlock;
634
635 if (!flush) {
636 if (btree_node_dirty(b))
637 goto out_unlock;
638
639 if (down_trylock(&b->io_mutex))
640 goto out_unlock;
641 up(&b->io_mutex);
642 }
643
644 mutex_lock(&b->write_lock);
645 if (btree_node_dirty(b))
646 __bch_btree_node_write(b, &cl);
647 mutex_unlock(&b->write_lock);
648
649 closure_sync(&cl);
650
651
652 down(&b->io_mutex);
653 up(&b->io_mutex);
654
655 return 0;
656out_unlock:
657 rw_unlock(true, b);
658 return -ENOMEM;
659}
660
661static unsigned long bch_mca_scan(struct shrinker *shrink,
662 struct shrink_control *sc)
663{
664 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
665 struct btree *b, *t;
666 unsigned long i, nr = sc->nr_to_scan;
667 unsigned long freed = 0;
668
669 if (c->shrinker_disabled)
670 return SHRINK_STOP;
671
672 if (c->btree_cache_alloc_lock)
673 return SHRINK_STOP;
674
675
676 if (sc->gfp_mask & __GFP_IO)
677 mutex_lock(&c->bucket_lock);
678 else if (!mutex_trylock(&c->bucket_lock))
679 return -1;
680
681
682
683
684
685
686
687
688 nr /= c->btree_pages;
689 nr = min_t(unsigned long, nr, mca_can_free(c));
690
691 i = 0;
692 list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
693 if (freed >= nr)
694 break;
695
696 if (++i > 3 &&
697 !mca_reap(b, 0, false)) {
698 mca_data_free(b);
699 rw_unlock(true, b);
700 freed++;
701 }
702 }
703
704 for (i = 0; (nr--) && i < c->btree_cache_used; i++) {
705 if (list_empty(&c->btree_cache))
706 goto out;
707
708 b = list_first_entry(&c->btree_cache, struct btree, list);
709 list_rotate_left(&c->btree_cache);
710
711 if (!b->accessed &&
712 !mca_reap(b, 0, false)) {
713 mca_bucket_free(b);
714 mca_data_free(b);
715 rw_unlock(true, b);
716 freed++;
717 } else
718 b->accessed = 0;
719 }
720out:
721 mutex_unlock(&c->bucket_lock);
722 return freed;
723}
724
725static unsigned long bch_mca_count(struct shrinker *shrink,
726 struct shrink_control *sc)
727{
728 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
729
730 if (c->shrinker_disabled)
731 return 0;
732
733 if (c->btree_cache_alloc_lock)
734 return 0;
735
736 return mca_can_free(c) * c->btree_pages;
737}
738
739void bch_btree_cache_free(struct cache_set *c)
740{
741 struct btree *b;
742 struct closure cl;
743 closure_init_stack(&cl);
744
745 if (c->shrink.list.next)
746 unregister_shrinker(&c->shrink);
747
748 mutex_lock(&c->bucket_lock);
749
750#ifdef CONFIG_BCACHE_DEBUG
751 if (c->verify_data)
752 list_move(&c->verify_data->list, &c->btree_cache);
753
754 free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c)));
755#endif
756
757 list_splice(&c->btree_cache_freeable,
758 &c->btree_cache);
759
760 while (!list_empty(&c->btree_cache)) {
761 b = list_first_entry(&c->btree_cache, struct btree, list);
762
763 if (btree_node_dirty(b))
764 btree_complete_write(b, btree_current_write(b));
765 clear_bit(BTREE_NODE_dirty, &b->flags);
766
767 mca_data_free(b);
768 }
769
770 while (!list_empty(&c->btree_cache_freed)) {
771 b = list_first_entry(&c->btree_cache_freed,
772 struct btree, list);
773 list_del(&b->list);
774 cancel_delayed_work_sync(&b->work);
775 kfree(b);
776 }
777
778 mutex_unlock(&c->bucket_lock);
779}
780
781int bch_btree_cache_alloc(struct cache_set *c)
782{
783 unsigned i;
784
785 for (i = 0; i < mca_reserve(c); i++)
786 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
787 return -ENOMEM;
788
789 list_splice_init(&c->btree_cache,
790 &c->btree_cache_freeable);
791
792#ifdef CONFIG_BCACHE_DEBUG
793 mutex_init(&c->verify_lock);
794
795 c->verify_ondisk = (void *)
796 __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c)));
797
798 c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
799
800 if (c->verify_data &&
801 c->verify_data->keys.set->data)
802 list_del_init(&c->verify_data->list);
803 else
804 c->verify_data = NULL;
805#endif
806
807 c->shrink.count_objects = bch_mca_count;
808 c->shrink.scan_objects = bch_mca_scan;
809 c->shrink.seeks = 4;
810 c->shrink.batch = c->btree_pages * 2;
811
812 if (register_shrinker(&c->shrink))
813 pr_warn("bcache: %s: could not register shrinker",
814 __func__);
815
816 return 0;
817}
818
819
820
821static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
822{
823 return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
824}
825
826static struct btree *mca_find(struct cache_set *c, struct bkey *k)
827{
828 struct btree *b;
829
830 rcu_read_lock();
831 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
832 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
833 goto out;
834 b = NULL;
835out:
836 rcu_read_unlock();
837 return b;
838}
839
840static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
841{
842 struct task_struct *old;
843
844 old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current);
845 if (old && old != current) {
846 if (op)
847 prepare_to_wait(&c->btree_cache_wait, &op->wait,
848 TASK_UNINTERRUPTIBLE);
849 return -EINTR;
850 }
851
852 return 0;
853}
854
855static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
856 struct bkey *k)
857{
858 struct btree *b;
859
860 trace_bcache_btree_cache_cannibalize(c);
861
862 if (mca_cannibalize_lock(c, op))
863 return ERR_PTR(-EINTR);
864
865 list_for_each_entry_reverse(b, &c->btree_cache, list)
866 if (!mca_reap(b, btree_order(k), false))
867 return b;
868
869 list_for_each_entry_reverse(b, &c->btree_cache, list)
870 if (!mca_reap(b, btree_order(k), true))
871 return b;
872
873 WARN(1, "btree cache cannibalize failed\n");
874 return ERR_PTR(-ENOMEM);
875}
876
877
878
879
880
881
882
883static void bch_cannibalize_unlock(struct cache_set *c)
884{
885 if (c->btree_cache_alloc_lock == current) {
886 c->btree_cache_alloc_lock = NULL;
887 wake_up(&c->btree_cache_wait);
888 }
889}
890
891static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
892 struct bkey *k, int level)
893{
894 struct btree *b;
895
896 BUG_ON(current->bio_list);
897
898 lockdep_assert_held(&c->bucket_lock);
899
900 if (mca_find(c, k))
901 return NULL;
902
903
904
905
906 list_for_each_entry(b, &c->btree_cache_freeable, list)
907 if (!mca_reap(b, btree_order(k), false))
908 goto out;
909
910
911
912
913 list_for_each_entry(b, &c->btree_cache_freed, list)
914 if (!mca_reap(b, 0, false)) {
915 mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
916 if (!b->keys.set[0].data)
917 goto err;
918 else
919 goto out;
920 }
921
922 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
923 if (!b)
924 goto err;
925
926 BUG_ON(!down_write_trylock(&b->lock));
927 if (!b->keys.set->data)
928 goto err;
929out:
930 BUG_ON(b->io_mutex.count != 1);
931
932 bkey_copy(&b->key, k);
933 list_move(&b->list, &c->btree_cache);
934 hlist_del_init_rcu(&b->hash);
935 hlist_add_head_rcu(&b->hash, mca_hash(c, k));
936
937 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
938 b->parent = (void *) ~0UL;
939 b->flags = 0;
940 b->written = 0;
941 b->level = level;
942
943 if (!b->level)
944 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
945 &b->c->expensive_debug_checks);
946 else
947 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
948 &b->c->expensive_debug_checks);
949
950 return b;
951err:
952 if (b)
953 rw_unlock(true, b);
954
955 b = mca_cannibalize(c, op, k);
956 if (!IS_ERR(b))
957 goto out;
958
959 return b;
960}
961
962
963
964
965
966
967
968
969
970
971struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
972 struct bkey *k, int level, bool write,
973 struct btree *parent)
974{
975 int i = 0;
976 struct btree *b;
977
978 BUG_ON(level < 0);
979retry:
980 b = mca_find(c, k);
981
982 if (!b) {
983 if (current->bio_list)
984 return ERR_PTR(-EAGAIN);
985
986 mutex_lock(&c->bucket_lock);
987 b = mca_alloc(c, op, k, level);
988 mutex_unlock(&c->bucket_lock);
989
990 if (!b)
991 goto retry;
992 if (IS_ERR(b))
993 return b;
994
995 bch_btree_node_read(b);
996
997 if (!write)
998 downgrade_write(&b->lock);
999 } else {
1000 rw_lock(write, b, level);
1001 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
1002 rw_unlock(write, b);
1003 goto retry;
1004 }
1005 BUG_ON(b->level != level);
1006 }
1007
1008 b->parent = parent;
1009 b->accessed = 1;
1010
1011 for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
1012 prefetch(b->keys.set[i].tree);
1013 prefetch(b->keys.set[i].data);
1014 }
1015
1016 for (; i <= b->keys.nsets; i++)
1017 prefetch(b->keys.set[i].data);
1018
1019 if (btree_node_io_error(b)) {
1020 rw_unlock(write, b);
1021 return ERR_PTR(-EIO);
1022 }
1023
1024 BUG_ON(!b->written);
1025
1026 return b;
1027}
1028
1029static void btree_node_prefetch(struct btree *parent, struct bkey *k)
1030{
1031 struct btree *b;
1032
1033 mutex_lock(&parent->c->bucket_lock);
1034 b = mca_alloc(parent->c, NULL, k, parent->level - 1);
1035 mutex_unlock(&parent->c->bucket_lock);
1036
1037 if (!IS_ERR_OR_NULL(b)) {
1038 b->parent = parent;
1039 bch_btree_node_read(b);
1040 rw_unlock(true, b);
1041 }
1042}
1043
1044
1045
1046static void btree_node_free(struct btree *b)
1047{
1048 trace_bcache_btree_node_free(b);
1049
1050 BUG_ON(b == b->c->root);
1051
1052 mutex_lock(&b->write_lock);
1053
1054 if (btree_node_dirty(b))
1055 btree_complete_write(b, btree_current_write(b));
1056 clear_bit(BTREE_NODE_dirty, &b->flags);
1057
1058 mutex_unlock(&b->write_lock);
1059
1060 cancel_delayed_work(&b->work);
1061
1062 mutex_lock(&b->c->bucket_lock);
1063 bch_bucket_free(b->c, &b->key);
1064 mca_bucket_free(b);
1065 mutex_unlock(&b->c->bucket_lock);
1066}
1067
1068struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
1069 int level, bool wait,
1070 struct btree *parent)
1071{
1072 BKEY_PADDED(key) k;
1073 struct btree *b = ERR_PTR(-EAGAIN);
1074
1075 mutex_lock(&c->bucket_lock);
1076retry:
1077 if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
1078 goto err;
1079
1080 bkey_put(c, &k.key);
1081 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1082
1083 b = mca_alloc(c, op, &k.key, level);
1084 if (IS_ERR(b))
1085 goto err_free;
1086
1087 if (!b) {
1088 cache_bug(c,
1089 "Tried to allocate bucket that was in btree cache");
1090 goto retry;
1091 }
1092
1093 b->accessed = 1;
1094 b->parent = parent;
1095 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
1096
1097 mutex_unlock(&c->bucket_lock);
1098
1099 trace_bcache_btree_node_alloc(b);
1100 return b;
1101err_free:
1102 bch_bucket_free(c, &k.key);
1103err:
1104 mutex_unlock(&c->bucket_lock);
1105
1106 trace_bcache_btree_node_alloc_fail(c);
1107 return b;
1108}
1109
1110static struct btree *bch_btree_node_alloc(struct cache_set *c,
1111 struct btree_op *op, int level,
1112 struct btree *parent)
1113{
1114 return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
1115}
1116
1117static struct btree *btree_node_alloc_replacement(struct btree *b,
1118 struct btree_op *op)
1119{
1120 struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
1121 if (!IS_ERR_OR_NULL(n)) {
1122 mutex_lock(&n->write_lock);
1123 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
1124 bkey_copy_key(&n->key, &b->key);
1125 mutex_unlock(&n->write_lock);
1126 }
1127
1128 return n;
1129}
1130
1131static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1132{
1133 unsigned i;
1134
1135 mutex_lock(&b->c->bucket_lock);
1136
1137 atomic_inc(&b->c->prio_blocked);
1138
1139 bkey_copy(k, &b->key);
1140 bkey_copy_key(k, &ZERO_KEY);
1141
1142 for (i = 0; i < KEY_PTRS(k); i++)
1143 SET_PTR_GEN(k, i,
1144 bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
1145 PTR_BUCKET(b->c, &b->key, i)));
1146
1147 mutex_unlock(&b->c->bucket_lock);
1148}
1149
1150static int btree_check_reserve(struct btree *b, struct btree_op *op)
1151{
1152 struct cache_set *c = b->c;
1153 struct cache *ca;
1154 unsigned i, reserve = (c->root->level - b->level) * 2 + 1;
1155
1156 mutex_lock(&c->bucket_lock);
1157
1158 for_each_cache(ca, c, i)
1159 if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
1160 if (op)
1161 prepare_to_wait(&c->btree_cache_wait, &op->wait,
1162 TASK_UNINTERRUPTIBLE);
1163 mutex_unlock(&c->bucket_lock);
1164 return -EINTR;
1165 }
1166
1167 mutex_unlock(&c->bucket_lock);
1168
1169 return mca_cannibalize_lock(b->c, op);
1170}
1171
1172
1173
1174static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
1175 struct bkey *k)
1176{
1177 uint8_t stale = 0;
1178 unsigned i;
1179 struct bucket *g;
1180
1181
1182
1183
1184
1185
1186 if (!bkey_cmp(k, &ZERO_KEY))
1187 return stale;
1188
1189 for (i = 0; i < KEY_PTRS(k); i++) {
1190 if (!ptr_available(c, k, i))
1191 continue;
1192
1193 g = PTR_BUCKET(c, k, i);
1194
1195 if (gen_after(g->last_gc, PTR_GEN(k, i)))
1196 g->last_gc = PTR_GEN(k, i);
1197
1198 if (ptr_stale(c, k, i)) {
1199 stale = max(stale, ptr_stale(c, k, i));
1200 continue;
1201 }
1202
1203 cache_bug_on(GC_MARK(g) &&
1204 (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1205 c, "inconsistent ptrs: mark = %llu, level = %i",
1206 GC_MARK(g), level);
1207
1208 if (level)
1209 SET_GC_MARK(g, GC_MARK_METADATA);
1210 else if (KEY_DIRTY(k))
1211 SET_GC_MARK(g, GC_MARK_DIRTY);
1212 else if (!GC_MARK(g))
1213 SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
1214
1215
1216 SET_GC_SECTORS_USED(g, min_t(unsigned,
1217 GC_SECTORS_USED(g) + KEY_SIZE(k),
1218 MAX_GC_SECTORS_USED));
1219
1220 BUG_ON(!GC_SECTORS_USED(g));
1221 }
1222
1223 return stale;
1224}
1225
1226#define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
1227
1228void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
1229{
1230 unsigned i;
1231
1232 for (i = 0; i < KEY_PTRS(k); i++)
1233 if (ptr_available(c, k, i) &&
1234 !ptr_stale(c, k, i)) {
1235 struct bucket *b = PTR_BUCKET(c, k, i);
1236
1237 b->gen = PTR_GEN(k, i);
1238
1239 if (level && bkey_cmp(k, &ZERO_KEY))
1240 b->prio = BTREE_PRIO;
1241 else if (!level && b->prio == BTREE_PRIO)
1242 b->prio = INITIAL_PRIO;
1243 }
1244
1245 __bch_btree_mark_key(c, level, k);
1246}
1247
1248void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
1249{
1250 stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets;
1251}
1252
1253static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
1254{
1255 uint8_t stale = 0;
1256 unsigned keys = 0, good_keys = 0;
1257 struct bkey *k;
1258 struct btree_iter iter;
1259 struct bset_tree *t;
1260
1261 gc->nodes++;
1262
1263 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
1264 stale = max(stale, btree_mark_key(b, k));
1265 keys++;
1266
1267 if (bch_ptr_bad(&b->keys, k))
1268 continue;
1269
1270 gc->key_bytes += bkey_u64s(k);
1271 gc->nkeys++;
1272 good_keys++;
1273
1274 gc->data += KEY_SIZE(k);
1275 }
1276
1277 for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
1278 btree_bug_on(t->size &&
1279 bset_written(&b->keys, t) &&
1280 bkey_cmp(&b->key, &t->end) < 0,
1281 b, "found short btree key in gc");
1282
1283 if (b->c->gc_always_rewrite)
1284 return true;
1285
1286 if (stale > 10)
1287 return true;
1288
1289 if ((keys - good_keys) * 2 > keys)
1290 return true;
1291
1292 return false;
1293}
1294
1295#define GC_MERGE_NODES 4U
1296
1297struct gc_merge_info {
1298 struct btree *b;
1299 unsigned keys;
1300};
1301
1302static int bch_btree_insert_node(struct btree *, struct btree_op *,
1303 struct keylist *, atomic_t *, struct bkey *);
1304
1305static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1306 struct gc_stat *gc, struct gc_merge_info *r)
1307{
1308 unsigned i, nodes = 0, keys = 0, blocks;
1309 struct btree *new_nodes[GC_MERGE_NODES];
1310 struct keylist keylist;
1311 struct closure cl;
1312 struct bkey *k;
1313
1314 bch_keylist_init(&keylist);
1315
1316 if (btree_check_reserve(b, NULL))
1317 return 0;
1318
1319 memset(new_nodes, 0, sizeof(new_nodes));
1320 closure_init_stack(&cl);
1321
1322 while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
1323 keys += r[nodes++].keys;
1324
1325 blocks = btree_default_blocks(b->c) * 2 / 3;
1326
1327 if (nodes < 2 ||
1328 __set_blocks(b->keys.set[0].data, keys,
1329 block_bytes(b->c)) > blocks * (nodes - 1))
1330 return 0;
1331
1332 for (i = 0; i < nodes; i++) {
1333 new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
1334 if (IS_ERR_OR_NULL(new_nodes[i]))
1335 goto out_nocoalesce;
1336 }
1337
1338
1339
1340
1341
1342
1343
1344 if (btree_check_reserve(b, NULL))
1345 goto out_nocoalesce;
1346
1347 for (i = 0; i < nodes; i++)
1348 mutex_lock(&new_nodes[i]->write_lock);
1349
1350 for (i = nodes - 1; i > 0; --i) {
1351 struct bset *n1 = btree_bset_first(new_nodes[i]);
1352 struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
1353 struct bkey *k, *last = NULL;
1354
1355 keys = 0;
1356
1357 if (i > 1) {
1358 for (k = n2->start;
1359 k < bset_bkey_last(n2);
1360 k = bkey_next(k)) {
1361 if (__set_blocks(n1, n1->keys + keys +
1362 bkey_u64s(k),
1363 block_bytes(b->c)) > blocks)
1364 break;
1365
1366 last = k;
1367 keys += bkey_u64s(k);
1368 }
1369 } else {
1370
1371
1372
1373
1374
1375
1376
1377
1378 if (__set_blocks(n1, n1->keys + n2->keys,
1379 block_bytes(b->c)) >
1380 btree_blocks(new_nodes[i]))
1381 goto out_nocoalesce;
1382
1383 keys = n2->keys;
1384
1385 last = &r->b->key;
1386 }
1387
1388 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
1389 btree_blocks(new_nodes[i]));
1390
1391 if (last)
1392 bkey_copy_key(&new_nodes[i]->key, last);
1393
1394 memcpy(bset_bkey_last(n1),
1395 n2->start,
1396 (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
1397
1398 n1->keys += keys;
1399 r[i].keys = n1->keys;
1400
1401 memmove(n2->start,
1402 bset_bkey_idx(n2, keys),
1403 (void *) bset_bkey_last(n2) -
1404 (void *) bset_bkey_idx(n2, keys));
1405
1406 n2->keys -= keys;
1407
1408 if (__bch_keylist_realloc(&keylist,
1409 bkey_u64s(&new_nodes[i]->key)))
1410 goto out_nocoalesce;
1411
1412 bch_btree_node_write(new_nodes[i], &cl);
1413 bch_keylist_add(&keylist, &new_nodes[i]->key);
1414 }
1415
1416 for (i = 0; i < nodes; i++)
1417 mutex_unlock(&new_nodes[i]->write_lock);
1418
1419 closure_sync(&cl);
1420
1421
1422 BUG_ON(btree_bset_first(new_nodes[0])->keys);
1423 btree_node_free(new_nodes[0]);
1424 rw_unlock(true, new_nodes[0]);
1425 new_nodes[0] = NULL;
1426
1427 for (i = 0; i < nodes; i++) {
1428 if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
1429 goto out_nocoalesce;
1430
1431 make_btree_freeing_key(r[i].b, keylist.top);
1432 bch_keylist_push(&keylist);
1433 }
1434
1435 bch_btree_insert_node(b, op, &keylist, NULL, NULL);
1436 BUG_ON(!bch_keylist_empty(&keylist));
1437
1438 for (i = 0; i < nodes; i++) {
1439 btree_node_free(r[i].b);
1440 rw_unlock(true, r[i].b);
1441
1442 r[i].b = new_nodes[i];
1443 }
1444
1445 memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1446 r[nodes - 1].b = ERR_PTR(-EINTR);
1447
1448 trace_bcache_btree_gc_coalesce(nodes);
1449 gc->nodes--;
1450
1451 bch_keylist_free(&keylist);
1452
1453
1454 return -EINTR;
1455
1456out_nocoalesce:
1457 closure_sync(&cl);
1458 bch_keylist_free(&keylist);
1459
1460 while ((k = bch_keylist_pop(&keylist)))
1461 if (!bkey_cmp(k, &ZERO_KEY))
1462 atomic_dec(&b->c->prio_blocked);
1463
1464 for (i = 0; i < nodes; i++)
1465 if (!IS_ERR_OR_NULL(new_nodes[i])) {
1466 btree_node_free(new_nodes[i]);
1467 rw_unlock(true, new_nodes[i]);
1468 }
1469 return 0;
1470}
1471
1472static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
1473 struct btree *replace)
1474{
1475 struct keylist keys;
1476 struct btree *n;
1477
1478 if (btree_check_reserve(b, NULL))
1479 return 0;
1480
1481 n = btree_node_alloc_replacement(replace, NULL);
1482
1483
1484 if (btree_check_reserve(b, NULL)) {
1485 btree_node_free(n);
1486 rw_unlock(true, n);
1487 return 0;
1488 }
1489
1490 bch_btree_node_write_sync(n);
1491
1492 bch_keylist_init(&keys);
1493 bch_keylist_add(&keys, &n->key);
1494
1495 make_btree_freeing_key(replace, keys.top);
1496 bch_keylist_push(&keys);
1497
1498 bch_btree_insert_node(b, op, &keys, NULL, NULL);
1499 BUG_ON(!bch_keylist_empty(&keys));
1500
1501 btree_node_free(replace);
1502 rw_unlock(true, n);
1503
1504
1505 return -EINTR;
1506}
1507
1508static unsigned btree_gc_count_keys(struct btree *b)
1509{
1510 struct bkey *k;
1511 struct btree_iter iter;
1512 unsigned ret = 0;
1513
1514 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
1515 ret += bkey_u64s(k);
1516
1517 return ret;
1518}
1519
1520static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1521 struct closure *writes, struct gc_stat *gc)
1522{
1523 int ret = 0;
1524 bool should_rewrite;
1525 struct bkey *k;
1526 struct btree_iter iter;
1527 struct gc_merge_info r[GC_MERGE_NODES];
1528 struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
1529
1530 bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
1531
1532 for (i = r; i < r + ARRAY_SIZE(r); i++)
1533 i->b = ERR_PTR(-EINTR);
1534
1535 while (1) {
1536 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
1537 if (k) {
1538 r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
1539 true, b);
1540 if (IS_ERR(r->b)) {
1541 ret = PTR_ERR(r->b);
1542 break;
1543 }
1544
1545 r->keys = btree_gc_count_keys(r->b);
1546
1547 ret = btree_gc_coalesce(b, op, gc, r);
1548 if (ret)
1549 break;
1550 }
1551
1552 if (!last->b)
1553 break;
1554
1555 if (!IS_ERR(last->b)) {
1556 should_rewrite = btree_gc_mark_node(last->b, gc);
1557 if (should_rewrite) {
1558 ret = btree_gc_rewrite_node(b, op, last->b);
1559 if (ret)
1560 break;
1561 }
1562
1563 if (last->b->level) {
1564 ret = btree_gc_recurse(last->b, op, writes, gc);
1565 if (ret)
1566 break;
1567 }
1568
1569 bkey_copy_key(&b->c->gc_done, &last->b->key);
1570
1571
1572
1573
1574
1575 mutex_lock(&last->b->write_lock);
1576 if (btree_node_dirty(last->b))
1577 bch_btree_node_write(last->b, writes);
1578 mutex_unlock(&last->b->write_lock);
1579 rw_unlock(true, last->b);
1580 }
1581
1582 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1583 r->b = NULL;
1584
1585 if (need_resched()) {
1586 ret = -EAGAIN;
1587 break;
1588 }
1589 }
1590
1591 for (i = r; i < r + ARRAY_SIZE(r); i++)
1592 if (!IS_ERR_OR_NULL(i->b)) {
1593 mutex_lock(&i->b->write_lock);
1594 if (btree_node_dirty(i->b))
1595 bch_btree_node_write(i->b, writes);
1596 mutex_unlock(&i->b->write_lock);
1597 rw_unlock(true, i->b);
1598 }
1599
1600 return ret;
1601}
1602
1603static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1604 struct closure *writes, struct gc_stat *gc)
1605{
1606 struct btree *n = NULL;
1607 int ret = 0;
1608 bool should_rewrite;
1609
1610 should_rewrite = btree_gc_mark_node(b, gc);
1611 if (should_rewrite) {
1612 n = btree_node_alloc_replacement(b, NULL);
1613
1614 if (!IS_ERR_OR_NULL(n)) {
1615 bch_btree_node_write_sync(n);
1616
1617 bch_btree_set_root(n);
1618 btree_node_free(b);
1619 rw_unlock(true, n);
1620
1621 return -EINTR;
1622 }
1623 }
1624
1625 __bch_btree_mark_key(b->c, b->level + 1, &b->key);
1626
1627 if (b->level) {
1628 ret = btree_gc_recurse(b, op, writes, gc);
1629 if (ret)
1630 return ret;
1631 }
1632
1633 bkey_copy_key(&b->c->gc_done, &b->key);
1634
1635 return ret;
1636}
1637
1638static void btree_gc_start(struct cache_set *c)
1639{
1640 struct cache *ca;
1641 struct bucket *b;
1642 unsigned i;
1643
1644 if (!c->gc_mark_valid)
1645 return;
1646
1647 mutex_lock(&c->bucket_lock);
1648
1649 c->gc_mark_valid = 0;
1650 c->gc_done = ZERO_KEY;
1651
1652 for_each_cache(ca, c, i)
1653 for_each_bucket(b, ca) {
1654 b->last_gc = b->gen;
1655 if (!atomic_read(&b->pin)) {
1656 SET_GC_MARK(b, 0);
1657 SET_GC_SECTORS_USED(b, 0);
1658 }
1659 }
1660
1661 mutex_unlock(&c->bucket_lock);
1662}
1663
1664static void bch_btree_gc_finish(struct cache_set *c)
1665{
1666 struct bucket *b;
1667 struct cache *ca;
1668 unsigned i;
1669
1670 mutex_lock(&c->bucket_lock);
1671
1672 set_gc_sectors(c);
1673 c->gc_mark_valid = 1;
1674 c->need_gc = 0;
1675
1676 for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1677 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1678 GC_MARK_METADATA);
1679
1680
1681 rcu_read_lock();
1682 for (i = 0; i < c->devices_max_used; i++) {
1683 struct bcache_device *d = c->devices[i];
1684 struct cached_dev *dc;
1685 struct keybuf_key *w, *n;
1686 unsigned j;
1687
1688 if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1689 continue;
1690 dc = container_of(d, struct cached_dev, disk);
1691
1692 spin_lock(&dc->writeback_keys.lock);
1693 rbtree_postorder_for_each_entry_safe(w, n,
1694 &dc->writeback_keys.keys, node)
1695 for (j = 0; j < KEY_PTRS(&w->key); j++)
1696 SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1697 GC_MARK_DIRTY);
1698 spin_unlock(&dc->writeback_keys.lock);
1699 }
1700 rcu_read_unlock();
1701
1702 c->avail_nbuckets = 0;
1703 for_each_cache(ca, c, i) {
1704 uint64_t *i;
1705
1706 ca->invalidate_needs_gc = 0;
1707
1708 for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
1709 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1710
1711 for (i = ca->prio_buckets;
1712 i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
1713 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1714
1715 for_each_bucket(b, ca) {
1716 c->need_gc = max(c->need_gc, bucket_gc_gen(b));
1717
1718 if (atomic_read(&b->pin))
1719 continue;
1720
1721 BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
1722
1723 if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
1724 c->avail_nbuckets++;
1725 }
1726 }
1727
1728 mutex_unlock(&c->bucket_lock);
1729}
1730
1731static void bch_btree_gc(struct cache_set *c)
1732{
1733 int ret;
1734 struct gc_stat stats;
1735 struct closure writes;
1736 struct btree_op op;
1737 uint64_t start_time = local_clock();
1738
1739 trace_bcache_gc_start(c);
1740
1741 memset(&stats, 0, sizeof(struct gc_stat));
1742 closure_init_stack(&writes);
1743 bch_btree_op_init(&op, SHRT_MAX);
1744
1745 btree_gc_start(c);
1746
1747 do {
1748 ret = btree_root(gc_root, c, &op, &writes, &stats);
1749 closure_sync(&writes);
1750 cond_resched();
1751
1752 if (ret && ret != -EAGAIN)
1753 pr_warn("gc failed!");
1754 } while (ret);
1755
1756 bch_btree_gc_finish(c);
1757 wake_up_allocators(c);
1758
1759 bch_time_stats_update(&c->btree_gc_time, start_time);
1760
1761 stats.key_bytes *= sizeof(uint64_t);
1762 stats.data <<= 9;
1763 bch_update_bucket_in_use(c, &stats);
1764 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
1765
1766 trace_bcache_gc_end(c);
1767
1768 bch_moving_gc(c);
1769}
1770
1771static bool gc_should_run(struct cache_set *c)
1772{
1773 struct cache *ca;
1774 unsigned i;
1775
1776 for_each_cache(ca, c, i)
1777 if (ca->invalidate_needs_gc)
1778 return true;
1779
1780 if (atomic_read(&c->sectors_to_gc) < 0)
1781 return true;
1782
1783 return false;
1784}
1785
1786static int bch_gc_thread(void *arg)
1787{
1788 struct cache_set *c = arg;
1789
1790 while (1) {
1791 wait_event_interruptible(c->gc_wait,
1792 kthread_should_stop() || gc_should_run(c));
1793
1794 if (kthread_should_stop())
1795 break;
1796
1797 set_gc_sectors(c);
1798 bch_btree_gc(c);
1799 }
1800
1801 return 0;
1802}
1803
1804int bch_gc_thread_start(struct cache_set *c)
1805{
1806 c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
1807 return PTR_ERR_OR_ZERO(c->gc_thread);
1808}
1809
1810
1811
1812static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
1813{
1814 int ret = 0;
1815 struct bkey *k, *p = NULL;
1816 struct btree_iter iter;
1817
1818 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1819 bch_initial_mark_key(b->c, b->level, k);
1820
1821 bch_initial_mark_key(b->c, b->level + 1, &b->key);
1822
1823 if (b->level) {
1824 bch_btree_iter_init(&b->keys, &iter, NULL);
1825
1826 do {
1827 k = bch_btree_iter_next_filter(&iter, &b->keys,
1828 bch_ptr_bad);
1829 if (k)
1830 btree_node_prefetch(b, k);
1831
1832 if (p)
1833 ret = btree(check_recurse, p, b, op);
1834
1835 p = k;
1836 } while (p && !ret);
1837 }
1838
1839 return ret;
1840}
1841
1842int bch_btree_check(struct cache_set *c)
1843{
1844 struct btree_op op;
1845
1846 bch_btree_op_init(&op, SHRT_MAX);
1847
1848 return btree_root(check_recurse, c, &op);
1849}
1850
1851void bch_initial_gc_finish(struct cache_set *c)
1852{
1853 struct cache *ca;
1854 struct bucket *b;
1855 unsigned i;
1856
1857 bch_btree_gc_finish(c);
1858
1859 mutex_lock(&c->bucket_lock);
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870 for_each_cache(ca, c, i) {
1871 for_each_bucket(b, ca) {
1872 if (fifo_full(&ca->free[RESERVE_PRIO]) &&
1873 fifo_full(&ca->free[RESERVE_BTREE]))
1874 break;
1875
1876 if (bch_can_invalidate_bucket(ca, b) &&
1877 !GC_MARK(b)) {
1878 __bch_invalidate_one_bucket(ca, b);
1879 if (!fifo_push(&ca->free[RESERVE_PRIO],
1880 b - ca->buckets))
1881 fifo_push(&ca->free[RESERVE_BTREE],
1882 b - ca->buckets);
1883 }
1884 }
1885 }
1886
1887 mutex_unlock(&c->bucket_lock);
1888}
1889
1890
1891
1892static bool btree_insert_key(struct btree *b, struct bkey *k,
1893 struct bkey *replace_key)
1894{
1895 unsigned status;
1896
1897 BUG_ON(bkey_cmp(k, &b->key) > 0);
1898
1899 status = bch_btree_insert_key(&b->keys, k, replace_key);
1900 if (status != BTREE_INSERT_STATUS_NO_INSERT) {
1901 bch_check_keys(&b->keys, "%u for %s", status,
1902 replace_key ? "replace" : "insert");
1903
1904 trace_bcache_btree_insert_key(b, k, replace_key != NULL,
1905 status);
1906 return true;
1907 } else
1908 return false;
1909}
1910
1911static size_t insert_u64s_remaining(struct btree *b)
1912{
1913 long ret = bch_btree_keys_u64s_remaining(&b->keys);
1914
1915
1916
1917
1918 if (b->keys.ops->is_extents)
1919 ret -= KEY_MAX_U64S;
1920
1921 return max(ret, 0L);
1922}
1923
1924static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
1925 struct keylist *insert_keys,
1926 struct bkey *replace_key)
1927{
1928 bool ret = false;
1929 int oldsize = bch_count_data(&b->keys);
1930
1931 while (!bch_keylist_empty(insert_keys)) {
1932 struct bkey *k = insert_keys->keys;
1933
1934 if (bkey_u64s(k) > insert_u64s_remaining(b))
1935 break;
1936
1937 if (bkey_cmp(k, &b->key) <= 0) {
1938 if (!b->level)
1939 bkey_put(b->c, k);
1940
1941 ret |= btree_insert_key(b, k, replace_key);
1942 bch_keylist_pop_front(insert_keys);
1943 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
1944 BKEY_PADDED(key) temp;
1945 bkey_copy(&temp.key, insert_keys->keys);
1946
1947 bch_cut_back(&b->key, &temp.key);
1948 bch_cut_front(&b->key, insert_keys->keys);
1949
1950 ret |= btree_insert_key(b, &temp.key, replace_key);
1951 break;
1952 } else {
1953 break;
1954 }
1955 }
1956
1957 if (!ret)
1958 op->insert_collision = true;
1959
1960 BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
1961
1962 BUG_ON(bch_count_data(&b->keys) < oldsize);
1963 return ret;
1964}
1965
1966static int btree_split(struct btree *b, struct btree_op *op,
1967 struct keylist *insert_keys,
1968 struct bkey *replace_key)
1969{
1970 bool split;
1971 struct btree *n1, *n2 = NULL, *n3 = NULL;
1972 uint64_t start_time = local_clock();
1973 struct closure cl;
1974 struct keylist parent_keys;
1975
1976 closure_init_stack(&cl);
1977 bch_keylist_init(&parent_keys);
1978
1979 if (btree_check_reserve(b, op)) {
1980 if (!b->level)
1981 return -EINTR;
1982 else
1983 WARN(1, "insufficient reserve for split\n");
1984 }
1985
1986 n1 = btree_node_alloc_replacement(b, op);
1987 if (IS_ERR(n1))
1988 goto err;
1989
1990 split = set_blocks(btree_bset_first(n1),
1991 block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
1992
1993 if (split) {
1994 unsigned keys = 0;
1995
1996 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
1997
1998 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent);
1999 if (IS_ERR(n2))
2000 goto err_free1;
2001
2002 if (!b->parent) {
2003 n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL);
2004 if (IS_ERR(n3))
2005 goto err_free2;
2006 }
2007
2008 mutex_lock(&n1->write_lock);
2009 mutex_lock(&n2->write_lock);
2010
2011 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2012
2013
2014
2015
2016
2017
2018 while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2019 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
2020 keys));
2021
2022 bkey_copy_key(&n1->key,
2023 bset_bkey_idx(btree_bset_first(n1), keys));
2024 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
2025
2026 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2027 btree_bset_first(n1)->keys = keys;
2028
2029 memcpy(btree_bset_first(n2)->start,
2030 bset_bkey_last(btree_bset_first(n1)),
2031 btree_bset_first(n2)->keys * sizeof(uint64_t));
2032
2033 bkey_copy_key(&n2->key, &b->key);
2034
2035 bch_keylist_add(&parent_keys, &n2->key);
2036 bch_btree_node_write(n2, &cl);
2037 mutex_unlock(&n2->write_lock);
2038 rw_unlock(true, n2);
2039 } else {
2040 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
2041
2042 mutex_lock(&n1->write_lock);
2043 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2044 }
2045
2046 bch_keylist_add(&parent_keys, &n1->key);
2047 bch_btree_node_write(n1, &cl);
2048 mutex_unlock(&n1->write_lock);
2049
2050 if (n3) {
2051
2052 mutex_lock(&n3->write_lock);
2053 bkey_copy_key(&n3->key, &MAX_KEY);
2054 bch_btree_insert_keys(n3, op, &parent_keys, NULL);
2055 bch_btree_node_write(n3, &cl);
2056 mutex_unlock(&n3->write_lock);
2057
2058 closure_sync(&cl);
2059 bch_btree_set_root(n3);
2060 rw_unlock(true, n3);
2061 } else if (!b->parent) {
2062
2063 closure_sync(&cl);
2064 bch_btree_set_root(n1);
2065 } else {
2066
2067 closure_sync(&cl);
2068 make_btree_freeing_key(b, parent_keys.top);
2069 bch_keylist_push(&parent_keys);
2070
2071 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
2072 BUG_ON(!bch_keylist_empty(&parent_keys));
2073 }
2074
2075 btree_node_free(b);
2076 rw_unlock(true, n1);
2077
2078 bch_time_stats_update(&b->c->btree_split_time, start_time);
2079
2080 return 0;
2081err_free2:
2082 bkey_put(b->c, &n2->key);
2083 btree_node_free(n2);
2084 rw_unlock(true, n2);
2085err_free1:
2086 bkey_put(b->c, &n1->key);
2087 btree_node_free(n1);
2088 rw_unlock(true, n1);
2089err:
2090 WARN(1, "bcache: btree split failed (level %u)", b->level);
2091
2092 if (n3 == ERR_PTR(-EAGAIN) ||
2093 n2 == ERR_PTR(-EAGAIN) ||
2094 n1 == ERR_PTR(-EAGAIN))
2095 return -EAGAIN;
2096
2097 return -ENOMEM;
2098}
2099
2100static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2101 struct keylist *insert_keys,
2102 atomic_t *journal_ref,
2103 struct bkey *replace_key)
2104{
2105 struct closure cl;
2106
2107 BUG_ON(b->level && replace_key);
2108
2109 closure_init_stack(&cl);
2110
2111 mutex_lock(&b->write_lock);
2112
2113 if (write_block(b) != btree_bset_last(b) &&
2114 b->keys.last_set_unwritten)
2115 bch_btree_init_next(b);
2116
2117 if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
2118 mutex_unlock(&b->write_lock);
2119 goto split;
2120 }
2121
2122 BUG_ON(write_block(b) != btree_bset_last(b));
2123
2124 if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
2125 if (!b->level)
2126 bch_btree_leaf_dirty(b, journal_ref);
2127 else
2128 bch_btree_node_write(b, &cl);
2129 }
2130
2131 mutex_unlock(&b->write_lock);
2132
2133
2134 closure_sync(&cl);
2135
2136 return 0;
2137split:
2138 if (current->bio_list) {
2139 op->lock = b->c->root->level + 1;
2140 return -EAGAIN;
2141 } else if (op->lock <= b->c->root->level) {
2142 op->lock = b->c->root->level + 1;
2143 return -EINTR;
2144 } else {
2145
2146 int ret = btree_split(b, op, insert_keys, replace_key);
2147
2148 if (bch_keylist_empty(insert_keys))
2149 return 0;
2150 else if (!ret)
2151 return -EINTR;
2152 return ret;
2153 }
2154}
2155
2156int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2157 struct bkey *check_key)
2158{
2159 int ret = -EINTR;
2160 uint64_t btree_ptr = b->key.ptr[0];
2161 unsigned long seq = b->seq;
2162 struct keylist insert;
2163 bool upgrade = op->lock == -1;
2164
2165 bch_keylist_init(&insert);
2166
2167 if (upgrade) {
2168 rw_unlock(false, b);
2169 rw_lock(true, b, b->level);
2170
2171 if (b->key.ptr[0] != btree_ptr ||
2172 b->seq != seq + 1) {
2173 op->lock = b->level;
2174 goto out;
2175 }
2176 }
2177
2178 SET_KEY_PTRS(check_key, 1);
2179 get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2180
2181 SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2182
2183 bch_keylist_add(&insert, check_key);
2184
2185 ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
2186
2187 BUG_ON(!ret && !bch_keylist_empty(&insert));
2188out:
2189 if (upgrade)
2190 downgrade_write(&b->lock);
2191 return ret;
2192}
2193
2194struct btree_insert_op {
2195 struct btree_op op;
2196 struct keylist *keys;
2197 atomic_t *journal_ref;
2198 struct bkey *replace_key;
2199};
2200
2201static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
2202{
2203 struct btree_insert_op *op = container_of(b_op,
2204 struct btree_insert_op, op);
2205
2206 int ret = bch_btree_insert_node(b, &op->op, op->keys,
2207 op->journal_ref, op->replace_key);
2208 if (ret && !bch_keylist_empty(op->keys))
2209 return ret;
2210 else
2211 return MAP_DONE;
2212}
2213
2214int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2215 atomic_t *journal_ref, struct bkey *replace_key)
2216{
2217 struct btree_insert_op op;
2218 int ret = 0;
2219
2220 BUG_ON(current->bio_list);
2221 BUG_ON(bch_keylist_empty(keys));
2222
2223 bch_btree_op_init(&op.op, 0);
2224 op.keys = keys;
2225 op.journal_ref = journal_ref;
2226 op.replace_key = replace_key;
2227
2228 while (!ret && !bch_keylist_empty(keys)) {
2229 op.op.lock = 0;
2230 ret = bch_btree_map_leaf_nodes(&op.op, c,
2231 &START_KEY(keys->keys),
2232 btree_insert_fn);
2233 }
2234
2235 if (ret) {
2236 struct bkey *k;
2237
2238 pr_err("error %i", ret);
2239
2240 while ((k = bch_keylist_pop(keys)))
2241 bkey_put(c, k);
2242 } else if (op.op.insert_collision)
2243 ret = -ESRCH;
2244
2245 return ret;
2246}
2247
2248void bch_btree_set_root(struct btree *b)
2249{
2250 unsigned i;
2251 struct closure cl;
2252
2253 closure_init_stack(&cl);
2254
2255 trace_bcache_btree_set_root(b);
2256
2257 BUG_ON(!b->written);
2258
2259 for (i = 0; i < KEY_PTRS(&b->key); i++)
2260 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2261
2262 mutex_lock(&b->c->bucket_lock);
2263 list_del_init(&b->list);
2264 mutex_unlock(&b->c->bucket_lock);
2265
2266 b->c->root = b;
2267
2268 bch_journal_meta(b->c, &cl);
2269 closure_sync(&cl);
2270}
2271
2272
2273
2274static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2275 struct bkey *from,
2276 btree_map_nodes_fn *fn, int flags)
2277{
2278 int ret = MAP_CONTINUE;
2279
2280 if (b->level) {
2281 struct bkey *k;
2282 struct btree_iter iter;
2283
2284 bch_btree_iter_init(&b->keys, &iter, from);
2285
2286 while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
2287 bch_ptr_bad))) {
2288 ret = btree(map_nodes_recurse, k, b,
2289 op, from, fn, flags);
2290 from = NULL;
2291
2292 if (ret != MAP_CONTINUE)
2293 return ret;
2294 }
2295 }
2296
2297 if (!b->level || flags == MAP_ALL_NODES)
2298 ret = fn(op, b);
2299
2300 return ret;
2301}
2302
2303int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2304 struct bkey *from, btree_map_nodes_fn *fn, int flags)
2305{
2306 return btree_root(map_nodes_recurse, c, op, from, fn, flags);
2307}
2308
2309static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2310 struct bkey *from, btree_map_keys_fn *fn,
2311 int flags)
2312{
2313 int ret = MAP_CONTINUE;
2314 struct bkey *k;
2315 struct btree_iter iter;
2316
2317 bch_btree_iter_init(&b->keys, &iter, from);
2318
2319 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
2320 ret = !b->level
2321 ? fn(op, b, k)
2322 : btree(map_keys_recurse, k, b, op, from, fn, flags);
2323 from = NULL;
2324
2325 if (ret != MAP_CONTINUE)
2326 return ret;
2327 }
2328
2329 if (!b->level && (flags & MAP_END_KEY))
2330 ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2331 KEY_OFFSET(&b->key), 0));
2332
2333 return ret;
2334}
2335
2336int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2337 struct bkey *from, btree_map_keys_fn *fn, int flags)
2338{
2339 return btree_root(map_keys_recurse, c, op, from, fn, flags);
2340}
2341
2342
2343
2344static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2345{
2346
2347 if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2348 return -1;
2349 if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2350 return 1;
2351 return 0;
2352}
2353
2354static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2355 struct keybuf_key *r)
2356{
2357 return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2358}
2359
2360struct refill {
2361 struct btree_op op;
2362 unsigned nr_found;
2363 struct keybuf *buf;
2364 struct bkey *end;
2365 keybuf_pred_fn *pred;
2366};
2367
2368static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2369 struct bkey *k)
2370{
2371 struct refill *refill = container_of(op, struct refill, op);
2372 struct keybuf *buf = refill->buf;
2373 int ret = MAP_CONTINUE;
2374
2375 if (bkey_cmp(k, refill->end) >= 0) {
2376 ret = MAP_DONE;
2377 goto out;
2378 }
2379
2380 if (!KEY_SIZE(k))
2381 goto out;
2382
2383 if (refill->pred(buf, k)) {
2384 struct keybuf_key *w;
2385
2386 spin_lock(&buf->lock);
2387
2388 w = array_alloc(&buf->freelist);
2389 if (!w) {
2390 spin_unlock(&buf->lock);
2391 return MAP_DONE;
2392 }
2393
2394 w->private = NULL;
2395 bkey_copy(&w->key, k);
2396
2397 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2398 array_free(&buf->freelist, w);
2399 else
2400 refill->nr_found++;
2401
2402 if (array_freelist_empty(&buf->freelist))
2403 ret = MAP_DONE;
2404
2405 spin_unlock(&buf->lock);
2406 }
2407out:
2408 buf->last_scanned = *k;
2409 return ret;
2410}
2411
2412void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
2413 struct bkey *end, keybuf_pred_fn *pred)
2414{
2415 struct bkey start = buf->last_scanned;
2416 struct refill refill;
2417
2418 cond_resched();
2419
2420 bch_btree_op_init(&refill.op, -1);
2421 refill.nr_found = 0;
2422 refill.buf = buf;
2423 refill.end = end;
2424 refill.pred = pred;
2425
2426 bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2427 refill_keybuf_fn, MAP_END_KEY);
2428
2429 trace_bcache_keyscan(refill.nr_found,
2430 KEY_INODE(&start), KEY_OFFSET(&start),
2431 KEY_INODE(&buf->last_scanned),
2432 KEY_OFFSET(&buf->last_scanned));
2433
2434 spin_lock(&buf->lock);
2435
2436 if (!RB_EMPTY_ROOT(&buf->keys)) {
2437 struct keybuf_key *w;
2438 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2439 buf->start = START_KEY(&w->key);
2440
2441 w = RB_LAST(&buf->keys, struct keybuf_key, node);
2442 buf->end = w->key;
2443 } else {
2444 buf->start = MAX_KEY;
2445 buf->end = MAX_KEY;
2446 }
2447
2448 spin_unlock(&buf->lock);
2449}
2450
2451static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2452{
2453 rb_erase(&w->node, &buf->keys);
2454 array_free(&buf->freelist, w);
2455}
2456
2457void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2458{
2459 spin_lock(&buf->lock);
2460 __bch_keybuf_del(buf, w);
2461 spin_unlock(&buf->lock);
2462}
2463
2464bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2465 struct bkey *end)
2466{
2467 bool ret = false;
2468 struct keybuf_key *p, *w, s;
2469 s.key = *start;
2470
2471 if (bkey_cmp(end, &buf->start) <= 0 ||
2472 bkey_cmp(start, &buf->end) >= 0)
2473 return false;
2474
2475 spin_lock(&buf->lock);
2476 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2477
2478 while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2479 p = w;
2480 w = RB_NEXT(w, node);
2481
2482 if (p->private)
2483 ret = true;
2484 else
2485 __bch_keybuf_del(buf, p);
2486 }
2487
2488 spin_unlock(&buf->lock);
2489 return ret;
2490}
2491
2492struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2493{
2494 struct keybuf_key *w;
2495 spin_lock(&buf->lock);
2496
2497 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2498
2499 while (w && w->private)
2500 w = RB_NEXT(w, node);
2501
2502 if (w)
2503 w->private = ERR_PTR(-EINTR);
2504
2505 spin_unlock(&buf->lock);
2506 return w;
2507}
2508
2509struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2510 struct keybuf *buf,
2511 struct bkey *end,
2512 keybuf_pred_fn *pred)
2513{
2514 struct keybuf_key *ret;
2515
2516 while (1) {
2517 ret = bch_keybuf_next(buf);
2518 if (ret)
2519 break;
2520
2521 if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2522 pr_debug("scan finished");
2523 break;
2524 }
2525
2526 bch_refill_keybuf(c, buf, end, pred);
2527 }
2528
2529 return ret;
2530}
2531
2532void bch_keybuf_init(struct keybuf *buf)
2533{
2534 buf->last_scanned = MAX_KEY;
2535 buf->keys = RB_ROOT;
2536
2537 spin_lock_init(&buf->lock);
2538 array_allocator_init(&buf->freelist);
2539}
2540