1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64#include "bcache.h"
65#include "btree.h"
66
67#include <linux/blkdev.h>
68#include <linux/kthread.h>
69#include <linux/random.h>
70#include <trace/events/bcache.h>
71
72#define MAX_OPEN_BUCKETS 128
73
74
75
76uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
77{
78 uint8_t ret = ++b->gen;
79
80 ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b));
81 WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX);
82
83 return ret;
84}
85
86void bch_rescale_priorities(struct cache_set *c, int sectors)
87{
88 struct cache *ca;
89 struct bucket *b;
90 unsigned long next = c->nbuckets * c->cache->sb.bucket_size / 1024;
91 int r;
92
93 atomic_sub(sectors, &c->rescale);
94
95 do {
96 r = atomic_read(&c->rescale);
97
98 if (r >= 0)
99 return;
100 } while (atomic_cmpxchg(&c->rescale, r, r + next) != r);
101
102 mutex_lock(&c->bucket_lock);
103
104 c->min_prio = USHRT_MAX;
105
106 ca = c->cache;
107 for_each_bucket(b, ca)
108 if (b->prio &&
109 b->prio != BTREE_PRIO &&
110 !atomic_read(&b->pin)) {
111 b->prio--;
112 c->min_prio = min(c->min_prio, b->prio);
113 }
114
115 mutex_unlock(&c->bucket_lock);
116}
117
118
119
120
121
122
123
124
125static inline bool can_inc_bucket_gen(struct bucket *b)
126{
127 return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX;
128}
129
130bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b)
131{
132 BUG_ON(!ca->set->gc_mark_valid);
133
134 return (!GC_MARK(b) ||
135 GC_MARK(b) == GC_MARK_RECLAIMABLE) &&
136 !atomic_read(&b->pin) &&
137 can_inc_bucket_gen(b);
138}
139
140void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
141{
142 lockdep_assert_held(&ca->set->bucket_lock);
143 BUG_ON(GC_MARK(b) && GC_MARK(b) != GC_MARK_RECLAIMABLE);
144
145 if (GC_SECTORS_USED(b))
146 trace_bcache_invalidate(ca, b - ca->buckets);
147
148 bch_inc_gen(ca, b);
149 b->prio = INITIAL_PRIO;
150 atomic_inc(&b->pin);
151}
152
153static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
154{
155 __bch_invalidate_one_bucket(ca, b);
156
157 fifo_push(&ca->free_inc, b - ca->buckets);
158}
159
160
161
162
163
164
165
166
167
168
169#define bucket_prio(b) \
170({ \
171 unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
172 \
173 (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \
174})
175
176#define bucket_max_cmp(l, r) (bucket_prio(l) < bucket_prio(r))
177#define bucket_min_cmp(l, r) (bucket_prio(l) > bucket_prio(r))
178
179static void invalidate_buckets_lru(struct cache *ca)
180{
181 struct bucket *b;
182 ssize_t i;
183
184 ca->heap.used = 0;
185
186 for_each_bucket(b, ca) {
187 if (!bch_can_invalidate_bucket(ca, b))
188 continue;
189
190 if (!heap_full(&ca->heap))
191 heap_add(&ca->heap, b, bucket_max_cmp);
192 else if (bucket_max_cmp(b, heap_peek(&ca->heap))) {
193 ca->heap.data[0] = b;
194 heap_sift(&ca->heap, 0, bucket_max_cmp);
195 }
196 }
197
198 for (i = ca->heap.used / 2 - 1; i >= 0; --i)
199 heap_sift(&ca->heap, i, bucket_min_cmp);
200
201 while (!fifo_full(&ca->free_inc)) {
202 if (!heap_pop(&ca->heap, b, bucket_min_cmp)) {
203
204
205
206
207 ca->invalidate_needs_gc = 1;
208 wake_up_gc(ca->set);
209 return;
210 }
211
212 bch_invalidate_one_bucket(ca, b);
213 }
214}
215
216static void invalidate_buckets_fifo(struct cache *ca)
217{
218 struct bucket *b;
219 size_t checked = 0;
220
221 while (!fifo_full(&ca->free_inc)) {
222 if (ca->fifo_last_bucket < ca->sb.first_bucket ||
223 ca->fifo_last_bucket >= ca->sb.nbuckets)
224 ca->fifo_last_bucket = ca->sb.first_bucket;
225
226 b = ca->buckets + ca->fifo_last_bucket++;
227
228 if (bch_can_invalidate_bucket(ca, b))
229 bch_invalidate_one_bucket(ca, b);
230
231 if (++checked >= ca->sb.nbuckets) {
232 ca->invalidate_needs_gc = 1;
233 wake_up_gc(ca->set);
234 return;
235 }
236 }
237}
238
239static void invalidate_buckets_random(struct cache *ca)
240{
241 struct bucket *b;
242 size_t checked = 0;
243
244 while (!fifo_full(&ca->free_inc)) {
245 size_t n;
246
247 get_random_bytes(&n, sizeof(n));
248
249 n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket);
250 n += ca->sb.first_bucket;
251
252 b = ca->buckets + n;
253
254 if (bch_can_invalidate_bucket(ca, b))
255 bch_invalidate_one_bucket(ca, b);
256
257 if (++checked >= ca->sb.nbuckets / 2) {
258 ca->invalidate_needs_gc = 1;
259 wake_up_gc(ca->set);
260 return;
261 }
262 }
263}
264
265static void invalidate_buckets(struct cache *ca)
266{
267 BUG_ON(ca->invalidate_needs_gc);
268
269 switch (CACHE_REPLACEMENT(&ca->sb)) {
270 case CACHE_REPLACEMENT_LRU:
271 invalidate_buckets_lru(ca);
272 break;
273 case CACHE_REPLACEMENT_FIFO:
274 invalidate_buckets_fifo(ca);
275 break;
276 case CACHE_REPLACEMENT_RANDOM:
277 invalidate_buckets_random(ca);
278 break;
279 }
280}
281
282#define allocator_wait(ca, cond) \
283do { \
284 while (1) { \
285 set_current_state(TASK_INTERRUPTIBLE); \
286 if (cond) \
287 break; \
288 \
289 mutex_unlock(&(ca)->set->bucket_lock); \
290 if (kthread_should_stop() || \
291 test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)) { \
292 set_current_state(TASK_RUNNING); \
293 goto out; \
294 } \
295 \
296 schedule(); \
297 mutex_lock(&(ca)->set->bucket_lock); \
298 } \
299 __set_current_state(TASK_RUNNING); \
300} while (0)
301
302static int bch_allocator_push(struct cache *ca, long bucket)
303{
304 unsigned int i;
305
306
307 if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
308 return true;
309
310 for (i = 0; i < RESERVE_NR; i++)
311 if (fifo_push(&ca->free[i], bucket))
312 return true;
313
314 return false;
315}
316
317static int bch_allocator_thread(void *arg)
318{
319 struct cache *ca = arg;
320
321 mutex_lock(&ca->set->bucket_lock);
322
323 while (1) {
324
325
326
327
328
329 while (1) {
330 long bucket;
331
332 if (!fifo_pop(&ca->free_inc, bucket))
333 break;
334
335 if (ca->discard) {
336 mutex_unlock(&ca->set->bucket_lock);
337 blkdev_issue_discard(ca->bdev,
338 bucket_to_sector(ca->set, bucket),
339 ca->sb.bucket_size, GFP_KERNEL, 0);
340 mutex_lock(&ca->set->bucket_lock);
341 }
342
343 allocator_wait(ca, bch_allocator_push(ca, bucket));
344 wake_up(&ca->set->btree_cache_wait);
345 wake_up(&ca->set->bucket_wait);
346 }
347
348
349
350
351
352
353
354retry_invalidate:
355 allocator_wait(ca, ca->set->gc_mark_valid &&
356 !ca->invalidate_needs_gc);
357 invalidate_buckets(ca);
358
359
360
361
362
363 allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
364 if (CACHE_SYNC(&ca->sb)) {
365
366
367
368
369
370
371
372
373
374
375
376 if (!fifo_full(&ca->free_inc))
377 goto retry_invalidate;
378
379 if (bch_prio_write(ca, false) < 0) {
380 ca->invalidate_needs_gc = 1;
381 wake_up_gc(ca->set);
382 }
383 }
384 }
385out:
386 wait_for_kthread_stop();
387 return 0;
388}
389
390
391
392long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait)
393{
394 DEFINE_WAIT(w);
395 struct bucket *b;
396 long r;
397
398
399
400 if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)))
401 return -1;
402
403
404 if (fifo_pop(&ca->free[RESERVE_NONE], r) ||
405 fifo_pop(&ca->free[reserve], r))
406 goto out;
407
408 if (!wait) {
409 trace_bcache_alloc_fail(ca, reserve);
410 return -1;
411 }
412
413 do {
414 prepare_to_wait(&ca->set->bucket_wait, &w,
415 TASK_UNINTERRUPTIBLE);
416
417 mutex_unlock(&ca->set->bucket_lock);
418 schedule();
419 mutex_lock(&ca->set->bucket_lock);
420 } while (!fifo_pop(&ca->free[RESERVE_NONE], r) &&
421 !fifo_pop(&ca->free[reserve], r));
422
423 finish_wait(&ca->set->bucket_wait, &w);
424out:
425 if (ca->alloc_thread)
426 wake_up_process(ca->alloc_thread);
427
428 trace_bcache_alloc(ca, reserve);
429
430 if (expensive_debug_checks(ca->set)) {
431 size_t iter;
432 long i;
433 unsigned int j;
434
435 for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
436 BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
437
438 for (j = 0; j < RESERVE_NR; j++)
439 fifo_for_each(i, &ca->free[j], iter)
440 BUG_ON(i == r);
441 fifo_for_each(i, &ca->free_inc, iter)
442 BUG_ON(i == r);
443 }
444
445 b = ca->buckets + r;
446
447 BUG_ON(atomic_read(&b->pin) != 1);
448
449 SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
450
451 if (reserve <= RESERVE_PRIO) {
452 SET_GC_MARK(b, GC_MARK_METADATA);
453 SET_GC_MOVE(b, 0);
454 b->prio = BTREE_PRIO;
455 } else {
456 SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
457 SET_GC_MOVE(b, 0);
458 b->prio = INITIAL_PRIO;
459 }
460
461 if (ca->set->avail_nbuckets > 0) {
462 ca->set->avail_nbuckets--;
463 bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
464 }
465
466 return r;
467}
468
469void __bch_bucket_free(struct cache *ca, struct bucket *b)
470{
471 SET_GC_MARK(b, 0);
472 SET_GC_SECTORS_USED(b, 0);
473
474 if (ca->set->avail_nbuckets < ca->set->nbuckets) {
475 ca->set->avail_nbuckets++;
476 bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
477 }
478}
479
480void bch_bucket_free(struct cache_set *c, struct bkey *k)
481{
482 unsigned int i;
483
484 for (i = 0; i < KEY_PTRS(k); i++)
485 __bch_bucket_free(PTR_CACHE(c, k, i),
486 PTR_BUCKET(c, k, i));
487}
488
489int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
490 struct bkey *k, bool wait)
491{
492 struct cache *ca;
493 long b;
494
495
496 if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
497 return -1;
498
499 lockdep_assert_held(&c->bucket_lock);
500
501 bkey_init(k);
502
503 ca = c->cache;
504 b = bch_bucket_alloc(ca, reserve, wait);
505 if (b == -1)
506 goto err;
507
508 k->ptr[0] = MAKE_PTR(ca->buckets[b].gen,
509 bucket_to_sector(c, b),
510 ca->sb.nr_this_dev);
511
512 SET_KEY_PTRS(k, 1);
513
514 return 0;
515err:
516 bch_bucket_free(c, k);
517 bkey_put(c, k);
518 return -1;
519}
520
521int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
522 struct bkey *k, bool wait)
523{
524 int ret;
525
526 mutex_lock(&c->bucket_lock);
527 ret = __bch_bucket_alloc_set(c, reserve, k, wait);
528 mutex_unlock(&c->bucket_lock);
529 return ret;
530}
531
532
533
534struct open_bucket {
535 struct list_head list;
536 unsigned int last_write_point;
537 unsigned int sectors_free;
538 BKEY_PADDED(key);
539};
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566static struct open_bucket *pick_data_bucket(struct cache_set *c,
567 const struct bkey *search,
568 unsigned int write_point,
569 struct bkey *alloc)
570{
571 struct open_bucket *ret, *ret_task = NULL;
572
573 list_for_each_entry_reverse(ret, &c->data_buckets, list)
574 if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) !=
575 UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)]))
576 continue;
577 else if (!bkey_cmp(&ret->key, search))
578 goto found;
579 else if (ret->last_write_point == write_point)
580 ret_task = ret;
581
582 ret = ret_task ?: list_first_entry(&c->data_buckets,
583 struct open_bucket, list);
584found:
585 if (!ret->sectors_free && KEY_PTRS(alloc)) {
586 ret->sectors_free = c->cache->sb.bucket_size;
587 bkey_copy(&ret->key, alloc);
588 bkey_init(alloc);
589 }
590
591 if (!ret->sectors_free)
592 ret = NULL;
593
594 return ret;
595}
596
597
598
599
600
601
602
603
604
605
606
607bool bch_alloc_sectors(struct cache_set *c,
608 struct bkey *k,
609 unsigned int sectors,
610 unsigned int write_point,
611 unsigned int write_prio,
612 bool wait)
613{
614 struct open_bucket *b;
615 BKEY_PADDED(key) alloc;
616 unsigned int i;
617
618
619
620
621
622
623
624
625 bkey_init(&alloc.key);
626 spin_lock(&c->data_bucket_lock);
627
628 while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
629 unsigned int watermark = write_prio
630 ? RESERVE_MOVINGGC
631 : RESERVE_NONE;
632
633 spin_unlock(&c->data_bucket_lock);
634
635 if (bch_bucket_alloc_set(c, watermark, &alloc.key, wait))
636 return false;
637
638 spin_lock(&c->data_bucket_lock);
639 }
640
641
642
643
644
645
646 if (KEY_PTRS(&alloc.key))
647 bkey_put(c, &alloc.key);
648
649 for (i = 0; i < KEY_PTRS(&b->key); i++)
650 EBUG_ON(ptr_stale(c, &b->key, i));
651
652
653
654 for (i = 0; i < KEY_PTRS(&b->key); i++)
655 k->ptr[i] = b->key.ptr[i];
656
657 sectors = min(sectors, b->sectors_free);
658
659 SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
660 SET_KEY_SIZE(k, sectors);
661 SET_KEY_PTRS(k, KEY_PTRS(&b->key));
662
663
664
665
666
667 list_move_tail(&b->list, &c->data_buckets);
668 bkey_copy_key(&b->key, k);
669 b->last_write_point = write_point;
670
671 b->sectors_free -= sectors;
672
673 for (i = 0; i < KEY_PTRS(&b->key); i++) {
674 SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
675
676 atomic_long_add(sectors,
677 &PTR_CACHE(c, &b->key, i)->sectors_written);
678 }
679
680 if (b->sectors_free < c->cache->sb.block_size)
681 b->sectors_free = 0;
682
683
684
685
686
687
688 if (b->sectors_free)
689 for (i = 0; i < KEY_PTRS(&b->key); i++)
690 atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
691
692 spin_unlock(&c->data_bucket_lock);
693 return true;
694}
695
696
697
698void bch_open_buckets_free(struct cache_set *c)
699{
700 struct open_bucket *b;
701
702 while (!list_empty(&c->data_buckets)) {
703 b = list_first_entry(&c->data_buckets,
704 struct open_bucket, list);
705 list_del(&b->list);
706 kfree(b);
707 }
708}
709
710int bch_open_buckets_alloc(struct cache_set *c)
711{
712 int i;
713
714 spin_lock_init(&c->data_bucket_lock);
715
716 for (i = 0; i < MAX_OPEN_BUCKETS; i++) {
717 struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
718
719 if (!b)
720 return -ENOMEM;
721
722 list_add(&b->list, &c->data_buckets);
723 }
724
725 return 0;
726}
727
728int bch_cache_allocator_start(struct cache *ca)
729{
730 struct task_struct *k = kthread_run(bch_allocator_thread,
731 ca, "bcache_allocator");
732 if (IS_ERR(k))
733 return PTR_ERR(k);
734
735 ca->alloc_thread = k;
736 return 0;
737}
738