1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include "bcache.h"
25#include "btree.h"
26#include "debug.h"
27#include "extents.h"
28
29#include <linux/slab.h>
30#include <linux/bitops.h>
31#include <linux/hash.h>
32#include <linux/kthread.h>
33#include <linux/prefetch.h>
34#include <linux/random.h>
35#include <linux/rcupdate.h>
36#include <linux/sched/clock.h>
37#include <linux/rculist.h>
38
39#include <trace/events/bcache.h>
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91#define MAX_NEED_GC 64
92#define MAX_SAVE_PRIO 72
93
94#define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
95
96#define PTR_HASH(c, k) \
97 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
98
99#define insert_lock(s, b) ((b)->level <= (s)->lock)
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119#define btree(fn, key, b, op, ...) \
120({ \
121 int _r, l = (b)->level - 1; \
122 bool _w = l <= (op)->lock; \
123 struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \
124 _w, b); \
125 if (!IS_ERR(_child)) { \
126 _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
127 rw_unlock(_w, _child); \
128 } else \
129 _r = PTR_ERR(_child); \
130 _r; \
131})
132
133
134
135
136
137
138
139#define btree_root(fn, c, op, ...) \
140({ \
141 int _r = -EINTR; \
142 do { \
143 struct btree *_b = (c)->root; \
144 bool _w = insert_lock(op, _b); \
145 rw_lock(_w, _b, _b->level); \
146 if (_b == (c)->root && \
147 _w == insert_lock(op, _b)) { \
148 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
149 } \
150 rw_unlock(_w, _b); \
151 bch_cannibalize_unlock(c); \
152 if (_r == -EINTR) \
153 schedule(); \
154 } while (_r == -EINTR); \
155 \
156 finish_wait(&(c)->btree_cache_wait, &(op)->wait); \
157 _r; \
158})
159
160static inline struct bset *write_block(struct btree *b)
161{
162 return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
163}
164
165static void bch_btree_init_next(struct btree *b)
166{
167
168 if (b->level && b->keys.nsets)
169 bch_btree_sort(&b->keys, &b->c->sort);
170 else
171 bch_btree_sort_lazy(&b->keys, &b->c->sort);
172
173 if (b->written < btree_blocks(b))
174 bch_bset_init_next(&b->keys, write_block(b),
175 bset_magic(&b->c->sb));
176
177}
178
179
180
181void bkey_put(struct cache_set *c, struct bkey *k)
182{
183 unsigned i;
184
185 for (i = 0; i < KEY_PTRS(k); i++)
186 if (ptr_available(c, k, i))
187 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
188}
189
190
191
192static uint64_t btree_csum_set(struct btree *b, struct bset *i)
193{
194 uint64_t crc = b->key.ptr[0];
195 void *data = (void *) i + 8, *end = bset_bkey_last(i);
196
197 crc = bch_crc64_update(crc, data, end - data);
198 return crc ^ 0xffffffffffffffffULL;
199}
200
201void bch_btree_node_read_done(struct btree *b)
202{
203 const char *err = "bad btree header";
204 struct bset *i = btree_bset_first(b);
205 struct btree_iter *iter;
206
207 iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
208 iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
209 iter->used = 0;
210
211#ifdef CONFIG_BCACHE_DEBUG
212 iter->b = &b->keys;
213#endif
214
215 if (!i->seq)
216 goto err;
217
218 for (;
219 b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
220 i = write_block(b)) {
221 err = "unsupported bset version";
222 if (i->version > BCACHE_BSET_VERSION)
223 goto err;
224
225 err = "bad btree header";
226 if (b->written + set_blocks(i, block_bytes(b->c)) >
227 btree_blocks(b))
228 goto err;
229
230 err = "bad magic";
231 if (i->magic != bset_magic(&b->c->sb))
232 goto err;
233
234 err = "bad checksum";
235 switch (i->version) {
236 case 0:
237 if (i->csum != csum_set(i))
238 goto err;
239 break;
240 case BCACHE_BSET_VERSION:
241 if (i->csum != btree_csum_set(b, i))
242 goto err;
243 break;
244 }
245
246 err = "empty set";
247 if (i != b->keys.set[0].data && !i->keys)
248 goto err;
249
250 bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
251
252 b->written += set_blocks(i, block_bytes(b->c));
253 }
254
255 err = "corrupted btree";
256 for (i = write_block(b);
257 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
258 i = ((void *) i) + block_bytes(b->c))
259 if (i->seq == b->keys.set[0].data->seq)
260 goto err;
261
262 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
263
264 i = b->keys.set[0].data;
265 err = "short btree key";
266 if (b->keys.set[0].size &&
267 bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
268 goto err;
269
270 if (b->written < btree_blocks(b))
271 bch_bset_init_next(&b->keys, write_block(b),
272 bset_magic(&b->c->sb));
273out:
274 mempool_free(iter, &b->c->fill_iter);
275 return;
276err:
277 set_btree_node_io_error(b);
278 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
279 err, PTR_BUCKET_NR(b->c, &b->key, 0),
280 bset_block_offset(b, i), i->keys);
281 goto out;
282}
283
284static void btree_node_read_endio(struct bio *bio)
285{
286 struct closure *cl = bio->bi_private;
287 closure_put(cl);
288}
289
290static void bch_btree_node_read(struct btree *b)
291{
292 uint64_t start_time = local_clock();
293 struct closure cl;
294 struct bio *bio;
295
296 trace_bcache_btree_read(b);
297
298 closure_init_stack(&cl);
299
300 bio = bch_bbio_alloc(b->c);
301 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
302 bio->bi_end_io = btree_node_read_endio;
303 bio->bi_private = &cl;
304 bio->bi_opf = REQ_OP_READ | REQ_META;
305
306 bch_bio_map(bio, b->keys.set[0].data);
307
308 bch_submit_bbio(bio, b->c, &b->key, 0);
309 closure_sync(&cl);
310
311 if (bio->bi_status)
312 set_btree_node_io_error(b);
313
314 bch_bbio_free(bio, b->c);
315
316 if (btree_node_io_error(b))
317 goto err;
318
319 bch_btree_node_read_done(b);
320 bch_time_stats_update(&b->c->btree_read_time, start_time);
321
322 return;
323err:
324 bch_cache_set_error(b->c, "io error reading bucket %zu",
325 PTR_BUCKET_NR(b->c, &b->key, 0));
326}
327
328static void btree_complete_write(struct btree *b, struct btree_write *w)
329{
330 if (w->prio_blocked &&
331 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
332 wake_up_allocators(b->c);
333
334 if (w->journal) {
335 atomic_dec_bug(w->journal);
336 __closure_wake_up(&b->c->journal.wait);
337 }
338
339 w->prio_blocked = 0;
340 w->journal = NULL;
341}
342
343static void btree_node_write_unlock(struct closure *cl)
344{
345 struct btree *b = container_of(cl, struct btree, io);
346
347 up(&b->io_mutex);
348}
349
350static void __btree_node_write_done(struct closure *cl)
351{
352 struct btree *b = container_of(cl, struct btree, io);
353 struct btree_write *w = btree_prev_write(b);
354
355 bch_bbio_free(b->bio, b->c);
356 b->bio = NULL;
357 btree_complete_write(b, w);
358
359 if (btree_node_dirty(b))
360 schedule_delayed_work(&b->work, 30 * HZ);
361
362 closure_return_with_destructor(cl, btree_node_write_unlock);
363}
364
365static void btree_node_write_done(struct closure *cl)
366{
367 struct btree *b = container_of(cl, struct btree, io);
368
369 bio_free_pages(b->bio);
370 __btree_node_write_done(cl);
371}
372
373static void btree_node_write_endio(struct bio *bio)
374{
375 struct closure *cl = bio->bi_private;
376 struct btree *b = container_of(cl, struct btree, io);
377
378 if (bio->bi_status)
379 set_btree_node_io_error(b);
380
381 bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree");
382 closure_put(cl);
383}
384
385static void do_btree_node_write(struct btree *b)
386{
387 struct closure *cl = &b->io;
388 struct bset *i = btree_bset_last(b);
389 BKEY_PADDED(key) k;
390
391 i->version = BCACHE_BSET_VERSION;
392 i->csum = btree_csum_set(b, i);
393
394 BUG_ON(b->bio);
395 b->bio = bch_bbio_alloc(b->c);
396
397 b->bio->bi_end_io = btree_node_write_endio;
398 b->bio->bi_private = cl;
399 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
400 b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA;
401 bch_bio_map(b->bio, i);
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418 bkey_copy(&k.key, &b->key);
419 SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
420 bset_sector_offset(&b->keys, i));
421
422 if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
423 int j;
424 struct bio_vec *bv;
425 void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
426
427 bio_for_each_segment_all(bv, b->bio, j)
428 memcpy(page_address(bv->bv_page),
429 base + j * PAGE_SIZE, PAGE_SIZE);
430
431 bch_submit_bbio(b->bio, b->c, &k.key, 0);
432
433 continue_at(cl, btree_node_write_done, NULL);
434 } else {
435
436 b->bio->bi_vcnt = 0;
437 bch_bio_map(b->bio, i);
438
439 bch_submit_bbio(b->bio, b->c, &k.key, 0);
440
441 closure_sync(cl);
442 continue_at_nobarrier(cl, __btree_node_write_done, NULL);
443 }
444}
445
446void __bch_btree_node_write(struct btree *b, struct closure *parent)
447{
448 struct bset *i = btree_bset_last(b);
449
450 lockdep_assert_held(&b->write_lock);
451
452 trace_bcache_btree_write(b);
453
454 BUG_ON(current->bio_list);
455 BUG_ON(b->written >= btree_blocks(b));
456 BUG_ON(b->written && !i->keys);
457 BUG_ON(btree_bset_first(b)->seq != i->seq);
458 bch_check_keys(&b->keys, "writing");
459
460 cancel_delayed_work(&b->work);
461
462
463 down(&b->io_mutex);
464 closure_init(&b->io, parent ?: &b->c->cl);
465
466 clear_bit(BTREE_NODE_dirty, &b->flags);
467 change_bit(BTREE_NODE_write_idx, &b->flags);
468
469 do_btree_node_write(b);
470
471 atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
472 &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
473
474 b->written += set_blocks(i, block_bytes(b->c));
475}
476
477void bch_btree_node_write(struct btree *b, struct closure *parent)
478{
479 unsigned nsets = b->keys.nsets;
480
481 lockdep_assert_held(&b->lock);
482
483 __bch_btree_node_write(b, parent);
484
485
486
487
488
489 if (nsets && !b->keys.nsets)
490 bch_btree_verify(b);
491
492 bch_btree_init_next(b);
493}
494
495static void bch_btree_node_write_sync(struct btree *b)
496{
497 struct closure cl;
498
499 closure_init_stack(&cl);
500
501 mutex_lock(&b->write_lock);
502 bch_btree_node_write(b, &cl);
503 mutex_unlock(&b->write_lock);
504
505 closure_sync(&cl);
506}
507
508static void btree_node_write_work(struct work_struct *w)
509{
510 struct btree *b = container_of(to_delayed_work(w), struct btree, work);
511
512 mutex_lock(&b->write_lock);
513 if (btree_node_dirty(b))
514 __bch_btree_node_write(b, NULL);
515 mutex_unlock(&b->write_lock);
516}
517
518static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
519{
520 struct bset *i = btree_bset_last(b);
521 struct btree_write *w = btree_current_write(b);
522
523 lockdep_assert_held(&b->write_lock);
524
525 BUG_ON(!b->written);
526 BUG_ON(!i->keys);
527
528 if (!btree_node_dirty(b))
529 schedule_delayed_work(&b->work, 30 * HZ);
530
531 set_btree_node_dirty(b);
532
533 if (journal_ref) {
534 if (w->journal &&
535 journal_pin_cmp(b->c, w->journal, journal_ref)) {
536 atomic_dec_bug(w->journal);
537 w->journal = NULL;
538 }
539
540 if (!w->journal) {
541 w->journal = journal_ref;
542 atomic_inc(w->journal);
543 }
544 }
545
546
547 if (set_bytes(i) > PAGE_SIZE - 48 &&
548 !current->bio_list)
549 bch_btree_node_write(b, NULL);
550}
551
552
553
554
555
556
557#define mca_reserve(c) (((c->root && c->root->level) \
558 ? c->root->level : 1) * 8 + 16)
559#define mca_can_free(c) \
560 max_t(int, 0, c->btree_cache_used - mca_reserve(c))
561
562static void mca_data_free(struct btree *b)
563{
564 BUG_ON(b->io_mutex.count != 1);
565
566 bch_btree_keys_free(&b->keys);
567
568 b->c->btree_cache_used--;
569 list_move(&b->list, &b->c->btree_cache_freed);
570}
571
572static void mca_bucket_free(struct btree *b)
573{
574 BUG_ON(btree_node_dirty(b));
575
576 b->key.ptr[0] = 0;
577 hlist_del_init_rcu(&b->hash);
578 list_move(&b->list, &b->c->btree_cache_freeable);
579}
580
581static unsigned btree_order(struct bkey *k)
582{
583 return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
584}
585
586static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
587{
588 if (!bch_btree_keys_alloc(&b->keys,
589 max_t(unsigned,
590 ilog2(b->c->btree_pages),
591 btree_order(k)),
592 gfp)) {
593 b->c->btree_cache_used++;
594 list_move(&b->list, &b->c->btree_cache);
595 } else {
596 list_move(&b->list, &b->c->btree_cache_freed);
597 }
598}
599
600static struct btree *mca_bucket_alloc(struct cache_set *c,
601 struct bkey *k, gfp_t gfp)
602{
603 struct btree *b = kzalloc(sizeof(struct btree), gfp);
604 if (!b)
605 return NULL;
606
607 init_rwsem(&b->lock);
608 lockdep_set_novalidate_class(&b->lock);
609 mutex_init(&b->write_lock);
610 lockdep_set_novalidate_class(&b->write_lock);
611 INIT_LIST_HEAD(&b->list);
612 INIT_DELAYED_WORK(&b->work, btree_node_write_work);
613 b->c = c;
614 sema_init(&b->io_mutex, 1);
615
616 mca_data_alloc(b, k, gfp);
617 return b;
618}
619
620static int mca_reap(struct btree *b, unsigned min_order, bool flush)
621{
622 struct closure cl;
623
624 closure_init_stack(&cl);
625 lockdep_assert_held(&b->c->bucket_lock);
626
627 if (!down_write_trylock(&b->lock))
628 return -ENOMEM;
629
630 BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
631
632 if (b->keys.page_order < min_order)
633 goto out_unlock;
634
635 if (!flush) {
636 if (btree_node_dirty(b))
637 goto out_unlock;
638
639 if (down_trylock(&b->io_mutex))
640 goto out_unlock;
641 up(&b->io_mutex);
642 }
643
644 mutex_lock(&b->write_lock);
645 if (btree_node_dirty(b))
646 __bch_btree_node_write(b, &cl);
647 mutex_unlock(&b->write_lock);
648
649 closure_sync(&cl);
650
651
652 down(&b->io_mutex);
653 up(&b->io_mutex);
654
655 return 0;
656out_unlock:
657 rw_unlock(true, b);
658 return -ENOMEM;
659}
660
661static unsigned long bch_mca_scan(struct shrinker *shrink,
662 struct shrink_control *sc)
663{
664 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
665 struct btree *b, *t;
666 unsigned long i, nr = sc->nr_to_scan;
667 unsigned long freed = 0;
668 unsigned int btree_cache_used;
669
670 if (c->shrinker_disabled)
671 return SHRINK_STOP;
672
673 if (c->btree_cache_alloc_lock)
674 return SHRINK_STOP;
675
676
677 if (sc->gfp_mask & __GFP_IO)
678 mutex_lock(&c->bucket_lock);
679 else if (!mutex_trylock(&c->bucket_lock))
680 return -1;
681
682
683
684
685
686
687
688
689 nr /= c->btree_pages;
690 nr = min_t(unsigned long, nr, mca_can_free(c));
691
692 i = 0;
693 btree_cache_used = c->btree_cache_used;
694 list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
695 if (nr <= 0)
696 goto out;
697
698 if (++i > 3 &&
699 !mca_reap(b, 0, false)) {
700 mca_data_free(b);
701 rw_unlock(true, b);
702 freed++;
703 }
704 nr--;
705 }
706
707 for (; (nr--) && i < btree_cache_used; i++) {
708 if (list_empty(&c->btree_cache))
709 goto out;
710
711 b = list_first_entry(&c->btree_cache, struct btree, list);
712 list_rotate_left(&c->btree_cache);
713
714 if (!b->accessed &&
715 !mca_reap(b, 0, false)) {
716 mca_bucket_free(b);
717 mca_data_free(b);
718 rw_unlock(true, b);
719 freed++;
720 } else
721 b->accessed = 0;
722 }
723out:
724 mutex_unlock(&c->bucket_lock);
725 return freed * c->btree_pages;
726}
727
728static unsigned long bch_mca_count(struct shrinker *shrink,
729 struct shrink_control *sc)
730{
731 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
732
733 if (c->shrinker_disabled)
734 return 0;
735
736 if (c->btree_cache_alloc_lock)
737 return 0;
738
739 return mca_can_free(c) * c->btree_pages;
740}
741
742void bch_btree_cache_free(struct cache_set *c)
743{
744 struct btree *b;
745 struct closure cl;
746 closure_init_stack(&cl);
747
748 if (c->shrink.list.next)
749 unregister_shrinker(&c->shrink);
750
751 mutex_lock(&c->bucket_lock);
752
753#ifdef CONFIG_BCACHE_DEBUG
754 if (c->verify_data)
755 list_move(&c->verify_data->list, &c->btree_cache);
756
757 free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c)));
758#endif
759
760 list_splice(&c->btree_cache_freeable,
761 &c->btree_cache);
762
763 while (!list_empty(&c->btree_cache)) {
764 b = list_first_entry(&c->btree_cache, struct btree, list);
765
766 if (btree_node_dirty(b))
767 btree_complete_write(b, btree_current_write(b));
768 clear_bit(BTREE_NODE_dirty, &b->flags);
769
770 mca_data_free(b);
771 }
772
773 while (!list_empty(&c->btree_cache_freed)) {
774 b = list_first_entry(&c->btree_cache_freed,
775 struct btree, list);
776 list_del(&b->list);
777 cancel_delayed_work_sync(&b->work);
778 kfree(b);
779 }
780
781 mutex_unlock(&c->bucket_lock);
782}
783
784int bch_btree_cache_alloc(struct cache_set *c)
785{
786 unsigned i;
787
788 for (i = 0; i < mca_reserve(c); i++)
789 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
790 return -ENOMEM;
791
792 list_splice_init(&c->btree_cache,
793 &c->btree_cache_freeable);
794
795#ifdef CONFIG_BCACHE_DEBUG
796 mutex_init(&c->verify_lock);
797
798 c->verify_ondisk = (void *)
799 __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c)));
800
801 c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
802
803 if (c->verify_data &&
804 c->verify_data->keys.set->data)
805 list_del_init(&c->verify_data->list);
806 else
807 c->verify_data = NULL;
808#endif
809
810 c->shrink.count_objects = bch_mca_count;
811 c->shrink.scan_objects = bch_mca_scan;
812 c->shrink.seeks = 4;
813 c->shrink.batch = c->btree_pages * 2;
814
815 if (register_shrinker(&c->shrink))
816 pr_warn("bcache: %s: could not register shrinker",
817 __func__);
818
819 return 0;
820}
821
822
823
824static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
825{
826 return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
827}
828
829static struct btree *mca_find(struct cache_set *c, struct bkey *k)
830{
831 struct btree *b;
832
833 rcu_read_lock();
834 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
835 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
836 goto out;
837 b = NULL;
838out:
839 rcu_read_unlock();
840 return b;
841}
842
843static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
844{
845 struct task_struct *old;
846
847 old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current);
848 if (old && old != current) {
849 if (op)
850 prepare_to_wait(&c->btree_cache_wait, &op->wait,
851 TASK_UNINTERRUPTIBLE);
852 return -EINTR;
853 }
854
855 return 0;
856}
857
858static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
859 struct bkey *k)
860{
861 struct btree *b;
862
863 trace_bcache_btree_cache_cannibalize(c);
864
865 if (mca_cannibalize_lock(c, op))
866 return ERR_PTR(-EINTR);
867
868 list_for_each_entry_reverse(b, &c->btree_cache, list)
869 if (!mca_reap(b, btree_order(k), false))
870 return b;
871
872 list_for_each_entry_reverse(b, &c->btree_cache, list)
873 if (!mca_reap(b, btree_order(k), true))
874 return b;
875
876 WARN(1, "btree cache cannibalize failed\n");
877 return ERR_PTR(-ENOMEM);
878}
879
880
881
882
883
884
885
886static void bch_cannibalize_unlock(struct cache_set *c)
887{
888 if (c->btree_cache_alloc_lock == current) {
889 c->btree_cache_alloc_lock = NULL;
890 wake_up(&c->btree_cache_wait);
891 }
892}
893
894static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
895 struct bkey *k, int level)
896{
897 struct btree *b;
898
899 BUG_ON(current->bio_list);
900
901 lockdep_assert_held(&c->bucket_lock);
902
903 if (mca_find(c, k))
904 return NULL;
905
906
907
908
909 list_for_each_entry(b, &c->btree_cache_freeable, list)
910 if (!mca_reap(b, btree_order(k), false))
911 goto out;
912
913
914
915
916 list_for_each_entry(b, &c->btree_cache_freed, list)
917 if (!mca_reap(b, 0, false)) {
918 mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
919 if (!b->keys.set[0].data)
920 goto err;
921 else
922 goto out;
923 }
924
925 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
926 if (!b)
927 goto err;
928
929 BUG_ON(!down_write_trylock(&b->lock));
930 if (!b->keys.set->data)
931 goto err;
932out:
933 BUG_ON(b->io_mutex.count != 1);
934
935 bkey_copy(&b->key, k);
936 list_move(&b->list, &c->btree_cache);
937 hlist_del_init_rcu(&b->hash);
938 hlist_add_head_rcu(&b->hash, mca_hash(c, k));
939
940 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
941 b->parent = (void *) ~0UL;
942 b->flags = 0;
943 b->written = 0;
944 b->level = level;
945
946 if (!b->level)
947 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
948 &b->c->expensive_debug_checks);
949 else
950 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
951 &b->c->expensive_debug_checks);
952
953 return b;
954err:
955 if (b)
956 rw_unlock(true, b);
957
958 b = mca_cannibalize(c, op, k);
959 if (!IS_ERR(b))
960 goto out;
961
962 return b;
963}
964
965
966
967
968
969
970
971
972
973
974struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
975 struct bkey *k, int level, bool write,
976 struct btree *parent)
977{
978 int i = 0;
979 struct btree *b;
980
981 BUG_ON(level < 0);
982retry:
983 b = mca_find(c, k);
984
985 if (!b) {
986 if (current->bio_list)
987 return ERR_PTR(-EAGAIN);
988
989 mutex_lock(&c->bucket_lock);
990 b = mca_alloc(c, op, k, level);
991 mutex_unlock(&c->bucket_lock);
992
993 if (!b)
994 goto retry;
995 if (IS_ERR(b))
996 return b;
997
998 bch_btree_node_read(b);
999
1000 if (!write)
1001 downgrade_write(&b->lock);
1002 } else {
1003 rw_lock(write, b, level);
1004 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
1005 rw_unlock(write, b);
1006 goto retry;
1007 }
1008 BUG_ON(b->level != level);
1009 }
1010
1011 b->parent = parent;
1012 b->accessed = 1;
1013
1014 for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
1015 prefetch(b->keys.set[i].tree);
1016 prefetch(b->keys.set[i].data);
1017 }
1018
1019 for (; i <= b->keys.nsets; i++)
1020 prefetch(b->keys.set[i].data);
1021
1022 if (btree_node_io_error(b)) {
1023 rw_unlock(write, b);
1024 return ERR_PTR(-EIO);
1025 }
1026
1027 BUG_ON(!b->written);
1028
1029 return b;
1030}
1031
1032static void btree_node_prefetch(struct btree *parent, struct bkey *k)
1033{
1034 struct btree *b;
1035
1036 mutex_lock(&parent->c->bucket_lock);
1037 b = mca_alloc(parent->c, NULL, k, parent->level - 1);
1038 mutex_unlock(&parent->c->bucket_lock);
1039
1040 if (!IS_ERR_OR_NULL(b)) {
1041 b->parent = parent;
1042 bch_btree_node_read(b);
1043 rw_unlock(true, b);
1044 }
1045}
1046
1047
1048
1049static void btree_node_free(struct btree *b)
1050{
1051 trace_bcache_btree_node_free(b);
1052
1053 BUG_ON(b == b->c->root);
1054
1055 mutex_lock(&b->write_lock);
1056
1057 if (btree_node_dirty(b))
1058 btree_complete_write(b, btree_current_write(b));
1059 clear_bit(BTREE_NODE_dirty, &b->flags);
1060
1061 mutex_unlock(&b->write_lock);
1062
1063 cancel_delayed_work(&b->work);
1064
1065 mutex_lock(&b->c->bucket_lock);
1066 bch_bucket_free(b->c, &b->key);
1067 mca_bucket_free(b);
1068 mutex_unlock(&b->c->bucket_lock);
1069}
1070
1071struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
1072 int level, bool wait,
1073 struct btree *parent)
1074{
1075 BKEY_PADDED(key) k;
1076 struct btree *b = ERR_PTR(-EAGAIN);
1077
1078 mutex_lock(&c->bucket_lock);
1079retry:
1080 if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
1081 goto err;
1082
1083 bkey_put(c, &k.key);
1084 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1085
1086 b = mca_alloc(c, op, &k.key, level);
1087 if (IS_ERR(b))
1088 goto err_free;
1089
1090 if (!b) {
1091 cache_bug(c,
1092 "Tried to allocate bucket that was in btree cache");
1093 goto retry;
1094 }
1095
1096 b->accessed = 1;
1097 b->parent = parent;
1098 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
1099
1100 mutex_unlock(&c->bucket_lock);
1101
1102 trace_bcache_btree_node_alloc(b);
1103 return b;
1104err_free:
1105 bch_bucket_free(c, &k.key);
1106err:
1107 mutex_unlock(&c->bucket_lock);
1108
1109 trace_bcache_btree_node_alloc_fail(c);
1110 return b;
1111}
1112
1113static struct btree *bch_btree_node_alloc(struct cache_set *c,
1114 struct btree_op *op, int level,
1115 struct btree *parent)
1116{
1117 return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
1118}
1119
1120static struct btree *btree_node_alloc_replacement(struct btree *b,
1121 struct btree_op *op)
1122{
1123 struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
1124 if (!IS_ERR_OR_NULL(n)) {
1125 mutex_lock(&n->write_lock);
1126 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
1127 bkey_copy_key(&n->key, &b->key);
1128 mutex_unlock(&n->write_lock);
1129 }
1130
1131 return n;
1132}
1133
1134static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1135{
1136 unsigned i;
1137
1138 mutex_lock(&b->c->bucket_lock);
1139
1140 atomic_inc(&b->c->prio_blocked);
1141
1142 bkey_copy(k, &b->key);
1143 bkey_copy_key(k, &ZERO_KEY);
1144
1145 for (i = 0; i < KEY_PTRS(k); i++)
1146 SET_PTR_GEN(k, i,
1147 bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
1148 PTR_BUCKET(b->c, &b->key, i)));
1149
1150 mutex_unlock(&b->c->bucket_lock);
1151}
1152
1153static int btree_check_reserve(struct btree *b, struct btree_op *op)
1154{
1155 struct cache_set *c = b->c;
1156 struct cache *ca;
1157 unsigned i, reserve = (c->root->level - b->level) * 2 + 1;
1158
1159 mutex_lock(&c->bucket_lock);
1160
1161 for_each_cache(ca, c, i)
1162 if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
1163 if (op)
1164 prepare_to_wait(&c->btree_cache_wait, &op->wait,
1165 TASK_UNINTERRUPTIBLE);
1166 mutex_unlock(&c->bucket_lock);
1167 return -EINTR;
1168 }
1169
1170 mutex_unlock(&c->bucket_lock);
1171
1172 return mca_cannibalize_lock(b->c, op);
1173}
1174
1175
1176
1177static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
1178 struct bkey *k)
1179{
1180 uint8_t stale = 0;
1181 unsigned i;
1182 struct bucket *g;
1183
1184
1185
1186
1187
1188
1189 if (!bkey_cmp(k, &ZERO_KEY))
1190 return stale;
1191
1192 for (i = 0; i < KEY_PTRS(k); i++) {
1193 if (!ptr_available(c, k, i))
1194 continue;
1195
1196 g = PTR_BUCKET(c, k, i);
1197
1198 if (gen_after(g->last_gc, PTR_GEN(k, i)))
1199 g->last_gc = PTR_GEN(k, i);
1200
1201 if (ptr_stale(c, k, i)) {
1202 stale = max(stale, ptr_stale(c, k, i));
1203 continue;
1204 }
1205
1206 cache_bug_on(GC_MARK(g) &&
1207 (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1208 c, "inconsistent ptrs: mark = %llu, level = %i",
1209 GC_MARK(g), level);
1210
1211 if (level)
1212 SET_GC_MARK(g, GC_MARK_METADATA);
1213 else if (KEY_DIRTY(k))
1214 SET_GC_MARK(g, GC_MARK_DIRTY);
1215 else if (!GC_MARK(g))
1216 SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
1217
1218
1219 SET_GC_SECTORS_USED(g, min_t(unsigned,
1220 GC_SECTORS_USED(g) + KEY_SIZE(k),
1221 MAX_GC_SECTORS_USED));
1222
1223 BUG_ON(!GC_SECTORS_USED(g));
1224 }
1225
1226 return stale;
1227}
1228
1229#define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
1230
1231void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
1232{
1233 unsigned i;
1234
1235 for (i = 0; i < KEY_PTRS(k); i++)
1236 if (ptr_available(c, k, i) &&
1237 !ptr_stale(c, k, i)) {
1238 struct bucket *b = PTR_BUCKET(c, k, i);
1239
1240 b->gen = PTR_GEN(k, i);
1241
1242 if (level && bkey_cmp(k, &ZERO_KEY))
1243 b->prio = BTREE_PRIO;
1244 else if (!level && b->prio == BTREE_PRIO)
1245 b->prio = INITIAL_PRIO;
1246 }
1247
1248 __bch_btree_mark_key(c, level, k);
1249}
1250
1251void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
1252{
1253 stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets;
1254}
1255
1256static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
1257{
1258 uint8_t stale = 0;
1259 unsigned keys = 0, good_keys = 0;
1260 struct bkey *k;
1261 struct btree_iter iter;
1262 struct bset_tree *t;
1263
1264 gc->nodes++;
1265
1266 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
1267 stale = max(stale, btree_mark_key(b, k));
1268 keys++;
1269
1270 if (bch_ptr_bad(&b->keys, k))
1271 continue;
1272
1273 gc->key_bytes += bkey_u64s(k);
1274 gc->nkeys++;
1275 good_keys++;
1276
1277 gc->data += KEY_SIZE(k);
1278 }
1279
1280 for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
1281 btree_bug_on(t->size &&
1282 bset_written(&b->keys, t) &&
1283 bkey_cmp(&b->key, &t->end) < 0,
1284 b, "found short btree key in gc");
1285
1286 if (b->c->gc_always_rewrite)
1287 return true;
1288
1289 if (stale > 10)
1290 return true;
1291
1292 if ((keys - good_keys) * 2 > keys)
1293 return true;
1294
1295 return false;
1296}
1297
1298#define GC_MERGE_NODES 4U
1299
1300struct gc_merge_info {
1301 struct btree *b;
1302 unsigned keys;
1303};
1304
1305static int bch_btree_insert_node(struct btree *, struct btree_op *,
1306 struct keylist *, atomic_t *, struct bkey *);
1307
1308static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1309 struct gc_stat *gc, struct gc_merge_info *r)
1310{
1311 unsigned i, nodes = 0, keys = 0, blocks;
1312 struct btree *new_nodes[GC_MERGE_NODES];
1313 struct keylist keylist;
1314 struct closure cl;
1315 struct bkey *k;
1316
1317 bch_keylist_init(&keylist);
1318
1319 if (btree_check_reserve(b, NULL))
1320 return 0;
1321
1322 memset(new_nodes, 0, sizeof(new_nodes));
1323 closure_init_stack(&cl);
1324
1325 while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
1326 keys += r[nodes++].keys;
1327
1328 blocks = btree_default_blocks(b->c) * 2 / 3;
1329
1330 if (nodes < 2 ||
1331 __set_blocks(b->keys.set[0].data, keys,
1332 block_bytes(b->c)) > blocks * (nodes - 1))
1333 return 0;
1334
1335 for (i = 0; i < nodes; i++) {
1336 new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
1337 if (IS_ERR_OR_NULL(new_nodes[i]))
1338 goto out_nocoalesce;
1339 }
1340
1341
1342
1343
1344
1345
1346
1347 if (btree_check_reserve(b, NULL))
1348 goto out_nocoalesce;
1349
1350 for (i = 0; i < nodes; i++)
1351 mutex_lock(&new_nodes[i]->write_lock);
1352
1353 for (i = nodes - 1; i > 0; --i) {
1354 struct bset *n1 = btree_bset_first(new_nodes[i]);
1355 struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
1356 struct bkey *k, *last = NULL;
1357
1358 keys = 0;
1359
1360 if (i > 1) {
1361 for (k = n2->start;
1362 k < bset_bkey_last(n2);
1363 k = bkey_next(k)) {
1364 if (__set_blocks(n1, n1->keys + keys +
1365 bkey_u64s(k),
1366 block_bytes(b->c)) > blocks)
1367 break;
1368
1369 last = k;
1370 keys += bkey_u64s(k);
1371 }
1372 } else {
1373
1374
1375
1376
1377
1378
1379
1380
1381 if (__set_blocks(n1, n1->keys + n2->keys,
1382 block_bytes(b->c)) >
1383 btree_blocks(new_nodes[i]))
1384 goto out_nocoalesce;
1385
1386 keys = n2->keys;
1387
1388 last = &r->b->key;
1389 }
1390
1391 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
1392 btree_blocks(new_nodes[i]));
1393
1394 if (last)
1395 bkey_copy_key(&new_nodes[i]->key, last);
1396
1397 memcpy(bset_bkey_last(n1),
1398 n2->start,
1399 (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
1400
1401 n1->keys += keys;
1402 r[i].keys = n1->keys;
1403
1404 memmove(n2->start,
1405 bset_bkey_idx(n2, keys),
1406 (void *) bset_bkey_last(n2) -
1407 (void *) bset_bkey_idx(n2, keys));
1408
1409 n2->keys -= keys;
1410
1411 if (__bch_keylist_realloc(&keylist,
1412 bkey_u64s(&new_nodes[i]->key)))
1413 goto out_nocoalesce;
1414
1415 bch_btree_node_write(new_nodes[i], &cl);
1416 bch_keylist_add(&keylist, &new_nodes[i]->key);
1417 }
1418
1419 for (i = 0; i < nodes; i++)
1420 mutex_unlock(&new_nodes[i]->write_lock);
1421
1422 closure_sync(&cl);
1423
1424
1425 BUG_ON(btree_bset_first(new_nodes[0])->keys);
1426 btree_node_free(new_nodes[0]);
1427 rw_unlock(true, new_nodes[0]);
1428 new_nodes[0] = NULL;
1429
1430 for (i = 0; i < nodes; i++) {
1431 if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
1432 goto out_nocoalesce;
1433
1434 make_btree_freeing_key(r[i].b, keylist.top);
1435 bch_keylist_push(&keylist);
1436 }
1437
1438 bch_btree_insert_node(b, op, &keylist, NULL, NULL);
1439 BUG_ON(!bch_keylist_empty(&keylist));
1440
1441 for (i = 0; i < nodes; i++) {
1442 btree_node_free(r[i].b);
1443 rw_unlock(true, r[i].b);
1444
1445 r[i].b = new_nodes[i];
1446 }
1447
1448 memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1449 r[nodes - 1].b = ERR_PTR(-EINTR);
1450
1451 trace_bcache_btree_gc_coalesce(nodes);
1452 gc->nodes--;
1453
1454 bch_keylist_free(&keylist);
1455
1456
1457 return -EINTR;
1458
1459out_nocoalesce:
1460 closure_sync(&cl);
1461 bch_keylist_free(&keylist);
1462
1463 while ((k = bch_keylist_pop(&keylist)))
1464 if (!bkey_cmp(k, &ZERO_KEY))
1465 atomic_dec(&b->c->prio_blocked);
1466
1467 for (i = 0; i < nodes; i++)
1468 if (!IS_ERR_OR_NULL(new_nodes[i])) {
1469 btree_node_free(new_nodes[i]);
1470 rw_unlock(true, new_nodes[i]);
1471 }
1472 return 0;
1473}
1474
1475static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
1476 struct btree *replace)
1477{
1478 struct keylist keys;
1479 struct btree *n;
1480
1481 if (btree_check_reserve(b, NULL))
1482 return 0;
1483
1484 n = btree_node_alloc_replacement(replace, NULL);
1485
1486
1487 if (btree_check_reserve(b, NULL)) {
1488 btree_node_free(n);
1489 rw_unlock(true, n);
1490 return 0;
1491 }
1492
1493 bch_btree_node_write_sync(n);
1494
1495 bch_keylist_init(&keys);
1496 bch_keylist_add(&keys, &n->key);
1497
1498 make_btree_freeing_key(replace, keys.top);
1499 bch_keylist_push(&keys);
1500
1501 bch_btree_insert_node(b, op, &keys, NULL, NULL);
1502 BUG_ON(!bch_keylist_empty(&keys));
1503
1504 btree_node_free(replace);
1505 rw_unlock(true, n);
1506
1507
1508 return -EINTR;
1509}
1510
1511static unsigned btree_gc_count_keys(struct btree *b)
1512{
1513 struct bkey *k;
1514 struct btree_iter iter;
1515 unsigned ret = 0;
1516
1517 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
1518 ret += bkey_u64s(k);
1519
1520 return ret;
1521}
1522
1523static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1524 struct closure *writes, struct gc_stat *gc)
1525{
1526 int ret = 0;
1527 bool should_rewrite;
1528 struct bkey *k;
1529 struct btree_iter iter;
1530 struct gc_merge_info r[GC_MERGE_NODES];
1531 struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
1532
1533 bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
1534
1535 for (i = r; i < r + ARRAY_SIZE(r); i++)
1536 i->b = ERR_PTR(-EINTR);
1537
1538 while (1) {
1539 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
1540 if (k) {
1541 r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
1542 true, b);
1543 if (IS_ERR(r->b)) {
1544 ret = PTR_ERR(r->b);
1545 break;
1546 }
1547
1548 r->keys = btree_gc_count_keys(r->b);
1549
1550 ret = btree_gc_coalesce(b, op, gc, r);
1551 if (ret)
1552 break;
1553 }
1554
1555 if (!last->b)
1556 break;
1557
1558 if (!IS_ERR(last->b)) {
1559 should_rewrite = btree_gc_mark_node(last->b, gc);
1560 if (should_rewrite) {
1561 ret = btree_gc_rewrite_node(b, op, last->b);
1562 if (ret)
1563 break;
1564 }
1565
1566 if (last->b->level) {
1567 ret = btree_gc_recurse(last->b, op, writes, gc);
1568 if (ret)
1569 break;
1570 }
1571
1572 bkey_copy_key(&b->c->gc_done, &last->b->key);
1573
1574
1575
1576
1577
1578 mutex_lock(&last->b->write_lock);
1579 if (btree_node_dirty(last->b))
1580 bch_btree_node_write(last->b, writes);
1581 mutex_unlock(&last->b->write_lock);
1582 rw_unlock(true, last->b);
1583 }
1584
1585 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1586 r->b = NULL;
1587
1588 if (need_resched()) {
1589 ret = -EAGAIN;
1590 break;
1591 }
1592 }
1593
1594 for (i = r; i < r + ARRAY_SIZE(r); i++)
1595 if (!IS_ERR_OR_NULL(i->b)) {
1596 mutex_lock(&i->b->write_lock);
1597 if (btree_node_dirty(i->b))
1598 bch_btree_node_write(i->b, writes);
1599 mutex_unlock(&i->b->write_lock);
1600 rw_unlock(true, i->b);
1601 }
1602
1603 return ret;
1604}
1605
1606static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1607 struct closure *writes, struct gc_stat *gc)
1608{
1609 struct btree *n = NULL;
1610 int ret = 0;
1611 bool should_rewrite;
1612
1613 should_rewrite = btree_gc_mark_node(b, gc);
1614 if (should_rewrite) {
1615 n = btree_node_alloc_replacement(b, NULL);
1616
1617 if (!IS_ERR_OR_NULL(n)) {
1618 bch_btree_node_write_sync(n);
1619
1620 bch_btree_set_root(n);
1621 btree_node_free(b);
1622 rw_unlock(true, n);
1623
1624 return -EINTR;
1625 }
1626 }
1627
1628 __bch_btree_mark_key(b->c, b->level + 1, &b->key);
1629
1630 if (b->level) {
1631 ret = btree_gc_recurse(b, op, writes, gc);
1632 if (ret)
1633 return ret;
1634 }
1635
1636 bkey_copy_key(&b->c->gc_done, &b->key);
1637
1638 return ret;
1639}
1640
1641static void btree_gc_start(struct cache_set *c)
1642{
1643 struct cache *ca;
1644 struct bucket *b;
1645 unsigned i;
1646
1647 if (!c->gc_mark_valid)
1648 return;
1649
1650 mutex_lock(&c->bucket_lock);
1651
1652 c->gc_mark_valid = 0;
1653 c->gc_done = ZERO_KEY;
1654
1655 for_each_cache(ca, c, i)
1656 for_each_bucket(b, ca) {
1657 b->last_gc = b->gen;
1658 if (!atomic_read(&b->pin)) {
1659 SET_GC_MARK(b, 0);
1660 SET_GC_SECTORS_USED(b, 0);
1661 }
1662 }
1663
1664 mutex_unlock(&c->bucket_lock);
1665}
1666
1667static void bch_btree_gc_finish(struct cache_set *c)
1668{
1669 struct bucket *b;
1670 struct cache *ca;
1671 unsigned i;
1672
1673 mutex_lock(&c->bucket_lock);
1674
1675 set_gc_sectors(c);
1676 c->gc_mark_valid = 1;
1677 c->need_gc = 0;
1678
1679 for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1680 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1681 GC_MARK_METADATA);
1682
1683
1684 rcu_read_lock();
1685 for (i = 0; i < c->devices_max_used; i++) {
1686 struct bcache_device *d = c->devices[i];
1687 struct cached_dev *dc;
1688 struct keybuf_key *w, *n;
1689 unsigned j;
1690
1691 if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1692 continue;
1693 dc = container_of(d, struct cached_dev, disk);
1694
1695 spin_lock(&dc->writeback_keys.lock);
1696 rbtree_postorder_for_each_entry_safe(w, n,
1697 &dc->writeback_keys.keys, node)
1698 for (j = 0; j < KEY_PTRS(&w->key); j++)
1699 SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1700 GC_MARK_DIRTY);
1701 spin_unlock(&dc->writeback_keys.lock);
1702 }
1703 rcu_read_unlock();
1704
1705 c->avail_nbuckets = 0;
1706 for_each_cache(ca, c, i) {
1707 uint64_t *i;
1708
1709 ca->invalidate_needs_gc = 0;
1710
1711 for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
1712 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1713
1714 for (i = ca->prio_buckets;
1715 i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
1716 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1717
1718 for_each_bucket(b, ca) {
1719 c->need_gc = max(c->need_gc, bucket_gc_gen(b));
1720
1721 if (atomic_read(&b->pin))
1722 continue;
1723
1724 BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
1725
1726 if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
1727 c->avail_nbuckets++;
1728 }
1729 }
1730
1731 mutex_unlock(&c->bucket_lock);
1732}
1733
1734static void bch_btree_gc(struct cache_set *c)
1735{
1736 int ret;
1737 struct gc_stat stats;
1738 struct closure writes;
1739 struct btree_op op;
1740 uint64_t start_time = local_clock();
1741
1742 trace_bcache_gc_start(c);
1743
1744 memset(&stats, 0, sizeof(struct gc_stat));
1745 closure_init_stack(&writes);
1746 bch_btree_op_init(&op, SHRT_MAX);
1747
1748 btree_gc_start(c);
1749
1750
1751 do {
1752 ret = btree_root(gc_root, c, &op, &writes, &stats);
1753 closure_sync(&writes);
1754 cond_resched();
1755
1756 if (ret && ret != -EAGAIN)
1757 pr_warn("gc failed!");
1758 } while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags));
1759
1760 bch_btree_gc_finish(c);
1761 wake_up_allocators(c);
1762
1763 bch_time_stats_update(&c->btree_gc_time, start_time);
1764
1765 stats.key_bytes *= sizeof(uint64_t);
1766 stats.data <<= 9;
1767 bch_update_bucket_in_use(c, &stats);
1768 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
1769
1770 trace_bcache_gc_end(c);
1771
1772 bch_moving_gc(c);
1773}
1774
1775static bool gc_should_run(struct cache_set *c)
1776{
1777 struct cache *ca;
1778 unsigned i;
1779
1780 for_each_cache(ca, c, i)
1781 if (ca->invalidate_needs_gc)
1782 return true;
1783
1784 if (atomic_read(&c->sectors_to_gc) < 0)
1785 return true;
1786
1787 return false;
1788}
1789
1790static int bch_gc_thread(void *arg)
1791{
1792 struct cache_set *c = arg;
1793
1794 while (1) {
1795 wait_event_interruptible(c->gc_wait,
1796 kthread_should_stop() ||
1797 test_bit(CACHE_SET_IO_DISABLE, &c->flags) ||
1798 gc_should_run(c));
1799
1800 if (kthread_should_stop() ||
1801 test_bit(CACHE_SET_IO_DISABLE, &c->flags))
1802 break;
1803
1804 set_gc_sectors(c);
1805 bch_btree_gc(c);
1806 }
1807
1808 wait_for_kthread_stop();
1809 return 0;
1810}
1811
1812int bch_gc_thread_start(struct cache_set *c)
1813{
1814 c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
1815 return PTR_ERR_OR_ZERO(c->gc_thread);
1816}
1817
1818
1819
1820static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
1821{
1822 int ret = 0;
1823 struct bkey *k, *p = NULL;
1824 struct btree_iter iter;
1825
1826 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1827 bch_initial_mark_key(b->c, b->level, k);
1828
1829 bch_initial_mark_key(b->c, b->level + 1, &b->key);
1830
1831 if (b->level) {
1832 bch_btree_iter_init(&b->keys, &iter, NULL);
1833
1834 do {
1835 k = bch_btree_iter_next_filter(&iter, &b->keys,
1836 bch_ptr_bad);
1837 if (k)
1838 btree_node_prefetch(b, k);
1839
1840 if (p)
1841 ret = btree(check_recurse, p, b, op);
1842
1843 p = k;
1844 } while (p && !ret);
1845 }
1846
1847 return ret;
1848}
1849
1850int bch_btree_check(struct cache_set *c)
1851{
1852 struct btree_op op;
1853
1854 bch_btree_op_init(&op, SHRT_MAX);
1855
1856 return btree_root(check_recurse, c, &op);
1857}
1858
1859void bch_initial_gc_finish(struct cache_set *c)
1860{
1861 struct cache *ca;
1862 struct bucket *b;
1863 unsigned i;
1864
1865 bch_btree_gc_finish(c);
1866
1867 mutex_lock(&c->bucket_lock);
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878 for_each_cache(ca, c, i) {
1879 for_each_bucket(b, ca) {
1880 if (fifo_full(&ca->free[RESERVE_PRIO]) &&
1881 fifo_full(&ca->free[RESERVE_BTREE]))
1882 break;
1883
1884 if (bch_can_invalidate_bucket(ca, b) &&
1885 !GC_MARK(b)) {
1886 __bch_invalidate_one_bucket(ca, b);
1887 if (!fifo_push(&ca->free[RESERVE_PRIO],
1888 b - ca->buckets))
1889 fifo_push(&ca->free[RESERVE_BTREE],
1890 b - ca->buckets);
1891 }
1892 }
1893 }
1894
1895 mutex_unlock(&c->bucket_lock);
1896}
1897
1898
1899
1900static bool btree_insert_key(struct btree *b, struct bkey *k,
1901 struct bkey *replace_key)
1902{
1903 unsigned status;
1904
1905 BUG_ON(bkey_cmp(k, &b->key) > 0);
1906
1907 status = bch_btree_insert_key(&b->keys, k, replace_key);
1908 if (status != BTREE_INSERT_STATUS_NO_INSERT) {
1909 bch_check_keys(&b->keys, "%u for %s", status,
1910 replace_key ? "replace" : "insert");
1911
1912 trace_bcache_btree_insert_key(b, k, replace_key != NULL,
1913 status);
1914 return true;
1915 } else
1916 return false;
1917}
1918
1919static size_t insert_u64s_remaining(struct btree *b)
1920{
1921 long ret = bch_btree_keys_u64s_remaining(&b->keys);
1922
1923
1924
1925
1926 if (b->keys.ops->is_extents)
1927 ret -= KEY_MAX_U64S;
1928
1929 return max(ret, 0L);
1930}
1931
1932static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
1933 struct keylist *insert_keys,
1934 struct bkey *replace_key)
1935{
1936 bool ret = false;
1937 int oldsize = bch_count_data(&b->keys);
1938
1939 while (!bch_keylist_empty(insert_keys)) {
1940 struct bkey *k = insert_keys->keys;
1941
1942 if (bkey_u64s(k) > insert_u64s_remaining(b))
1943 break;
1944
1945 if (bkey_cmp(k, &b->key) <= 0) {
1946 if (!b->level)
1947 bkey_put(b->c, k);
1948
1949 ret |= btree_insert_key(b, k, replace_key);
1950 bch_keylist_pop_front(insert_keys);
1951 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
1952 BKEY_PADDED(key) temp;
1953 bkey_copy(&temp.key, insert_keys->keys);
1954
1955 bch_cut_back(&b->key, &temp.key);
1956 bch_cut_front(&b->key, insert_keys->keys);
1957
1958 ret |= btree_insert_key(b, &temp.key, replace_key);
1959 break;
1960 } else {
1961 break;
1962 }
1963 }
1964
1965 if (!ret)
1966 op->insert_collision = true;
1967
1968 BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
1969
1970 BUG_ON(bch_count_data(&b->keys) < oldsize);
1971 return ret;
1972}
1973
1974static int btree_split(struct btree *b, struct btree_op *op,
1975 struct keylist *insert_keys,
1976 struct bkey *replace_key)
1977{
1978 bool split;
1979 struct btree *n1, *n2 = NULL, *n3 = NULL;
1980 uint64_t start_time = local_clock();
1981 struct closure cl;
1982 struct keylist parent_keys;
1983
1984 closure_init_stack(&cl);
1985 bch_keylist_init(&parent_keys);
1986
1987 if (btree_check_reserve(b, op)) {
1988 if (!b->level)
1989 return -EINTR;
1990 else
1991 WARN(1, "insufficient reserve for split\n");
1992 }
1993
1994 n1 = btree_node_alloc_replacement(b, op);
1995 if (IS_ERR(n1))
1996 goto err;
1997
1998 split = set_blocks(btree_bset_first(n1),
1999 block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
2000
2001 if (split) {
2002 unsigned keys = 0;
2003
2004 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
2005
2006 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent);
2007 if (IS_ERR(n2))
2008 goto err_free1;
2009
2010 if (!b->parent) {
2011 n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL);
2012 if (IS_ERR(n3))
2013 goto err_free2;
2014 }
2015
2016 mutex_lock(&n1->write_lock);
2017 mutex_lock(&n2->write_lock);
2018
2019 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2020
2021
2022
2023
2024
2025
2026 while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2027 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
2028 keys));
2029
2030 bkey_copy_key(&n1->key,
2031 bset_bkey_idx(btree_bset_first(n1), keys));
2032 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
2033
2034 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2035 btree_bset_first(n1)->keys = keys;
2036
2037 memcpy(btree_bset_first(n2)->start,
2038 bset_bkey_last(btree_bset_first(n1)),
2039 btree_bset_first(n2)->keys * sizeof(uint64_t));
2040
2041 bkey_copy_key(&n2->key, &b->key);
2042
2043 bch_keylist_add(&parent_keys, &n2->key);
2044 bch_btree_node_write(n2, &cl);
2045 mutex_unlock(&n2->write_lock);
2046 rw_unlock(true, n2);
2047 } else {
2048 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
2049
2050 mutex_lock(&n1->write_lock);
2051 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2052 }
2053
2054 bch_keylist_add(&parent_keys, &n1->key);
2055 bch_btree_node_write(n1, &cl);
2056 mutex_unlock(&n1->write_lock);
2057
2058 if (n3) {
2059
2060 mutex_lock(&n3->write_lock);
2061 bkey_copy_key(&n3->key, &MAX_KEY);
2062 bch_btree_insert_keys(n3, op, &parent_keys, NULL);
2063 bch_btree_node_write(n3, &cl);
2064 mutex_unlock(&n3->write_lock);
2065
2066 closure_sync(&cl);
2067 bch_btree_set_root(n3);
2068 rw_unlock(true, n3);
2069 } else if (!b->parent) {
2070
2071 closure_sync(&cl);
2072 bch_btree_set_root(n1);
2073 } else {
2074
2075 closure_sync(&cl);
2076 make_btree_freeing_key(b, parent_keys.top);
2077 bch_keylist_push(&parent_keys);
2078
2079 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
2080 BUG_ON(!bch_keylist_empty(&parent_keys));
2081 }
2082
2083 btree_node_free(b);
2084 rw_unlock(true, n1);
2085
2086 bch_time_stats_update(&b->c->btree_split_time, start_time);
2087
2088 return 0;
2089err_free2:
2090 bkey_put(b->c, &n2->key);
2091 btree_node_free(n2);
2092 rw_unlock(true, n2);
2093err_free1:
2094 bkey_put(b->c, &n1->key);
2095 btree_node_free(n1);
2096 rw_unlock(true, n1);
2097err:
2098 WARN(1, "bcache: btree split failed (level %u)", b->level);
2099
2100 if (n3 == ERR_PTR(-EAGAIN) ||
2101 n2 == ERR_PTR(-EAGAIN) ||
2102 n1 == ERR_PTR(-EAGAIN))
2103 return -EAGAIN;
2104
2105 return -ENOMEM;
2106}
2107
2108static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2109 struct keylist *insert_keys,
2110 atomic_t *journal_ref,
2111 struct bkey *replace_key)
2112{
2113 struct closure cl;
2114
2115 BUG_ON(b->level && replace_key);
2116
2117 closure_init_stack(&cl);
2118
2119 mutex_lock(&b->write_lock);
2120
2121 if (write_block(b) != btree_bset_last(b) &&
2122 b->keys.last_set_unwritten)
2123 bch_btree_init_next(b);
2124
2125 if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
2126 mutex_unlock(&b->write_lock);
2127 goto split;
2128 }
2129
2130 BUG_ON(write_block(b) != btree_bset_last(b));
2131
2132 if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
2133 if (!b->level)
2134 bch_btree_leaf_dirty(b, journal_ref);
2135 else
2136 bch_btree_node_write(b, &cl);
2137 }
2138
2139 mutex_unlock(&b->write_lock);
2140
2141
2142 closure_sync(&cl);
2143
2144 return 0;
2145split:
2146 if (current->bio_list) {
2147 op->lock = b->c->root->level + 1;
2148 return -EAGAIN;
2149 } else if (op->lock <= b->c->root->level) {
2150 op->lock = b->c->root->level + 1;
2151 return -EINTR;
2152 } else {
2153
2154 int ret = btree_split(b, op, insert_keys, replace_key);
2155
2156 if (bch_keylist_empty(insert_keys))
2157 return 0;
2158 else if (!ret)
2159 return -EINTR;
2160 return ret;
2161 }
2162}
2163
2164int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2165 struct bkey *check_key)
2166{
2167 int ret = -EINTR;
2168 uint64_t btree_ptr = b->key.ptr[0];
2169 unsigned long seq = b->seq;
2170 struct keylist insert;
2171 bool upgrade = op->lock == -1;
2172
2173 bch_keylist_init(&insert);
2174
2175 if (upgrade) {
2176 rw_unlock(false, b);
2177 rw_lock(true, b, b->level);
2178
2179 if (b->key.ptr[0] != btree_ptr ||
2180 b->seq != seq + 1) {
2181 op->lock = b->level;
2182 goto out;
2183 }
2184 }
2185
2186 SET_KEY_PTRS(check_key, 1);
2187 get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2188
2189 SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2190
2191 bch_keylist_add(&insert, check_key);
2192
2193 ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
2194
2195 BUG_ON(!ret && !bch_keylist_empty(&insert));
2196out:
2197 if (upgrade)
2198 downgrade_write(&b->lock);
2199 return ret;
2200}
2201
2202struct btree_insert_op {
2203 struct btree_op op;
2204 struct keylist *keys;
2205 atomic_t *journal_ref;
2206 struct bkey *replace_key;
2207};
2208
2209static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
2210{
2211 struct btree_insert_op *op = container_of(b_op,
2212 struct btree_insert_op, op);
2213
2214 int ret = bch_btree_insert_node(b, &op->op, op->keys,
2215 op->journal_ref, op->replace_key);
2216 if (ret && !bch_keylist_empty(op->keys))
2217 return ret;
2218 else
2219 return MAP_DONE;
2220}
2221
2222int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2223 atomic_t *journal_ref, struct bkey *replace_key)
2224{
2225 struct btree_insert_op op;
2226 int ret = 0;
2227
2228 BUG_ON(current->bio_list);
2229 BUG_ON(bch_keylist_empty(keys));
2230
2231 bch_btree_op_init(&op.op, 0);
2232 op.keys = keys;
2233 op.journal_ref = journal_ref;
2234 op.replace_key = replace_key;
2235
2236 while (!ret && !bch_keylist_empty(keys)) {
2237 op.op.lock = 0;
2238 ret = bch_btree_map_leaf_nodes(&op.op, c,
2239 &START_KEY(keys->keys),
2240 btree_insert_fn);
2241 }
2242
2243 if (ret) {
2244 struct bkey *k;
2245
2246 pr_err("error %i", ret);
2247
2248 while ((k = bch_keylist_pop(keys)))
2249 bkey_put(c, k);
2250 } else if (op.op.insert_collision)
2251 ret = -ESRCH;
2252
2253 return ret;
2254}
2255
2256void bch_btree_set_root(struct btree *b)
2257{
2258 unsigned i;
2259 struct closure cl;
2260
2261 closure_init_stack(&cl);
2262
2263 trace_bcache_btree_set_root(b);
2264
2265 BUG_ON(!b->written);
2266
2267 for (i = 0; i < KEY_PTRS(&b->key); i++)
2268 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2269
2270 mutex_lock(&b->c->bucket_lock);
2271 list_del_init(&b->list);
2272 mutex_unlock(&b->c->bucket_lock);
2273
2274 b->c->root = b;
2275
2276 bch_journal_meta(b->c, &cl);
2277 closure_sync(&cl);
2278}
2279
2280
2281
2282static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2283 struct bkey *from,
2284 btree_map_nodes_fn *fn, int flags)
2285{
2286 int ret = MAP_CONTINUE;
2287
2288 if (b->level) {
2289 struct bkey *k;
2290 struct btree_iter iter;
2291
2292 bch_btree_iter_init(&b->keys, &iter, from);
2293
2294 while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
2295 bch_ptr_bad))) {
2296 ret = btree(map_nodes_recurse, k, b,
2297 op, from, fn, flags);
2298 from = NULL;
2299
2300 if (ret != MAP_CONTINUE)
2301 return ret;
2302 }
2303 }
2304
2305 if (!b->level || flags == MAP_ALL_NODES)
2306 ret = fn(op, b);
2307
2308 return ret;
2309}
2310
2311int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2312 struct bkey *from, btree_map_nodes_fn *fn, int flags)
2313{
2314 return btree_root(map_nodes_recurse, c, op, from, fn, flags);
2315}
2316
2317static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2318 struct bkey *from, btree_map_keys_fn *fn,
2319 int flags)
2320{
2321 int ret = MAP_CONTINUE;
2322 struct bkey *k;
2323 struct btree_iter iter;
2324
2325 bch_btree_iter_init(&b->keys, &iter, from);
2326
2327 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
2328 ret = !b->level
2329 ? fn(op, b, k)
2330 : btree(map_keys_recurse, k, b, op, from, fn, flags);
2331 from = NULL;
2332
2333 if (ret != MAP_CONTINUE)
2334 return ret;
2335 }
2336
2337 if (!b->level && (flags & MAP_END_KEY))
2338 ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2339 KEY_OFFSET(&b->key), 0));
2340
2341 return ret;
2342}
2343
2344int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2345 struct bkey *from, btree_map_keys_fn *fn, int flags)
2346{
2347 return btree_root(map_keys_recurse, c, op, from, fn, flags);
2348}
2349
2350
2351
2352static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2353{
2354
2355 if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2356 return -1;
2357 if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2358 return 1;
2359 return 0;
2360}
2361
2362static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2363 struct keybuf_key *r)
2364{
2365 return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2366}
2367
2368struct refill {
2369 struct btree_op op;
2370 unsigned nr_found;
2371 struct keybuf *buf;
2372 struct bkey *end;
2373 keybuf_pred_fn *pred;
2374};
2375
2376static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2377 struct bkey *k)
2378{
2379 struct refill *refill = container_of(op, struct refill, op);
2380 struct keybuf *buf = refill->buf;
2381 int ret = MAP_CONTINUE;
2382
2383 if (bkey_cmp(k, refill->end) >= 0) {
2384 ret = MAP_DONE;
2385 goto out;
2386 }
2387
2388 if (!KEY_SIZE(k))
2389 goto out;
2390
2391 if (refill->pred(buf, k)) {
2392 struct keybuf_key *w;
2393
2394 spin_lock(&buf->lock);
2395
2396 w = array_alloc(&buf->freelist);
2397 if (!w) {
2398 spin_unlock(&buf->lock);
2399 return MAP_DONE;
2400 }
2401
2402 w->private = NULL;
2403 bkey_copy(&w->key, k);
2404
2405 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2406 array_free(&buf->freelist, w);
2407 else
2408 refill->nr_found++;
2409
2410 if (array_freelist_empty(&buf->freelist))
2411 ret = MAP_DONE;
2412
2413 spin_unlock(&buf->lock);
2414 }
2415out:
2416 buf->last_scanned = *k;
2417 return ret;
2418}
2419
2420void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
2421 struct bkey *end, keybuf_pred_fn *pred)
2422{
2423 struct bkey start = buf->last_scanned;
2424 struct refill refill;
2425
2426 cond_resched();
2427
2428 bch_btree_op_init(&refill.op, -1);
2429 refill.nr_found = 0;
2430 refill.buf = buf;
2431 refill.end = end;
2432 refill.pred = pred;
2433
2434 bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2435 refill_keybuf_fn, MAP_END_KEY);
2436
2437 trace_bcache_keyscan(refill.nr_found,
2438 KEY_INODE(&start), KEY_OFFSET(&start),
2439 KEY_INODE(&buf->last_scanned),
2440 KEY_OFFSET(&buf->last_scanned));
2441
2442 spin_lock(&buf->lock);
2443
2444 if (!RB_EMPTY_ROOT(&buf->keys)) {
2445 struct keybuf_key *w;
2446 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2447 buf->start = START_KEY(&w->key);
2448
2449 w = RB_LAST(&buf->keys, struct keybuf_key, node);
2450 buf->end = w->key;
2451 } else {
2452 buf->start = MAX_KEY;
2453 buf->end = MAX_KEY;
2454 }
2455
2456 spin_unlock(&buf->lock);
2457}
2458
2459static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2460{
2461 rb_erase(&w->node, &buf->keys);
2462 array_free(&buf->freelist, w);
2463}
2464
2465void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2466{
2467 spin_lock(&buf->lock);
2468 __bch_keybuf_del(buf, w);
2469 spin_unlock(&buf->lock);
2470}
2471
2472bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2473 struct bkey *end)
2474{
2475 bool ret = false;
2476 struct keybuf_key *p, *w, s;
2477 s.key = *start;
2478
2479 if (bkey_cmp(end, &buf->start) <= 0 ||
2480 bkey_cmp(start, &buf->end) >= 0)
2481 return false;
2482
2483 spin_lock(&buf->lock);
2484 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2485
2486 while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2487 p = w;
2488 w = RB_NEXT(w, node);
2489
2490 if (p->private)
2491 ret = true;
2492 else
2493 __bch_keybuf_del(buf, p);
2494 }
2495
2496 spin_unlock(&buf->lock);
2497 return ret;
2498}
2499
2500struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2501{
2502 struct keybuf_key *w;
2503 spin_lock(&buf->lock);
2504
2505 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2506
2507 while (w && w->private)
2508 w = RB_NEXT(w, node);
2509
2510 if (w)
2511 w->private = ERR_PTR(-EINTR);
2512
2513 spin_unlock(&buf->lock);
2514 return w;
2515}
2516
2517struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2518 struct keybuf *buf,
2519 struct bkey *end,
2520 keybuf_pred_fn *pred)
2521{
2522 struct keybuf_key *ret;
2523
2524 while (1) {
2525 ret = bch_keybuf_next(buf);
2526 if (ret)
2527 break;
2528
2529 if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2530 pr_debug("scan finished");
2531 break;
2532 }
2533
2534 bch_refill_keybuf(c, buf, end, pred);
2535 }
2536
2537 return ret;
2538}
2539
2540void bch_keybuf_init(struct keybuf *buf)
2541{
2542 buf->last_scanned = MAX_KEY;
2543 buf->keys = RB_ROOT;
2544
2545 spin_lock_init(&buf->lock);
2546 array_allocator_init(&buf->freelist);
2547}
2548