1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64#include "bcache.h"
65#include "btree.h"
66
67#include <linux/blkdev.h>
68#include <linux/kthread.h>
69#include <linux/random.h>
70#include <trace/events/bcache.h>
71
72#define MAX_OPEN_BUCKETS 128
73
74
75
76uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
77{
78 uint8_t ret = ++b->gen;
79
80 ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b));
81 WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX);
82
83 return ret;
84}
85
86void bch_rescale_priorities(struct cache_set *c, int sectors)
87{
88 struct cache *ca;
89 struct bucket *b;
90 unsigned next = c->nbuckets * c->sb.bucket_size / 1024;
91 unsigned i;
92 int r;
93
94 atomic_sub(sectors, &c->rescale);
95
96 do {
97 r = atomic_read(&c->rescale);
98
99 if (r >= 0)
100 return;
101 } while (atomic_cmpxchg(&c->rescale, r, r + next) != r);
102
103 mutex_lock(&c->bucket_lock);
104
105 c->min_prio = USHRT_MAX;
106
107 for_each_cache(ca, c, i)
108 for_each_bucket(b, ca)
109 if (b->prio &&
110 b->prio != BTREE_PRIO &&
111 !atomic_read(&b->pin)) {
112 b->prio--;
113 c->min_prio = min(c->min_prio, b->prio);
114 }
115
116 mutex_unlock(&c->bucket_lock);
117}
118
119
120
121
122
123
124
125
126static inline bool can_inc_bucket_gen(struct bucket *b)
127{
128 return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX;
129}
130
131bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b)
132{
133 BUG_ON(!ca->set->gc_mark_valid);
134
135 return (!GC_MARK(b) ||
136 GC_MARK(b) == GC_MARK_RECLAIMABLE) &&
137 !atomic_read(&b->pin) &&
138 can_inc_bucket_gen(b);
139}
140
141void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
142{
143 lockdep_assert_held(&ca->set->bucket_lock);
144 BUG_ON(GC_MARK(b) && GC_MARK(b) != GC_MARK_RECLAIMABLE);
145
146 if (GC_SECTORS_USED(b))
147 trace_bcache_invalidate(ca, b - ca->buckets);
148
149 bch_inc_gen(ca, b);
150 b->prio = INITIAL_PRIO;
151 atomic_inc(&b->pin);
152}
153
154static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
155{
156 __bch_invalidate_one_bucket(ca, b);
157
158 fifo_push(&ca->free_inc, b - ca->buckets);
159}
160
161
162
163
164
165
166
167
168
169
170#define bucket_prio(b) \
171({ \
172 unsigned min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
173 \
174 (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \
175})
176
177#define bucket_max_cmp(l, r) (bucket_prio(l) < bucket_prio(r))
178#define bucket_min_cmp(l, r) (bucket_prio(l) > bucket_prio(r))
179
180static void invalidate_buckets_lru(struct cache *ca)
181{
182 struct bucket *b;
183 ssize_t i;
184
185 ca->heap.used = 0;
186
187 for_each_bucket(b, ca) {
188 if (!bch_can_invalidate_bucket(ca, b))
189 continue;
190
191 if (!heap_full(&ca->heap))
192 heap_add(&ca->heap, b, bucket_max_cmp);
193 else if (bucket_max_cmp(b, heap_peek(&ca->heap))) {
194 ca->heap.data[0] = b;
195 heap_sift(&ca->heap, 0, bucket_max_cmp);
196 }
197 }
198
199 for (i = ca->heap.used / 2 - 1; i >= 0; --i)
200 heap_sift(&ca->heap, i, bucket_min_cmp);
201
202 while (!fifo_full(&ca->free_inc)) {
203 if (!heap_pop(&ca->heap, b, bucket_min_cmp)) {
204
205
206
207
208 ca->invalidate_needs_gc = 1;
209 wake_up_gc(ca->set);
210 return;
211 }
212
213 bch_invalidate_one_bucket(ca, b);
214 }
215}
216
217static void invalidate_buckets_fifo(struct cache *ca)
218{
219 struct bucket *b;
220 size_t checked = 0;
221
222 while (!fifo_full(&ca->free_inc)) {
223 if (ca->fifo_last_bucket < ca->sb.first_bucket ||
224 ca->fifo_last_bucket >= ca->sb.nbuckets)
225 ca->fifo_last_bucket = ca->sb.first_bucket;
226
227 b = ca->buckets + ca->fifo_last_bucket++;
228
229 if (bch_can_invalidate_bucket(ca, b))
230 bch_invalidate_one_bucket(ca, b);
231
232 if (++checked >= ca->sb.nbuckets) {
233 ca->invalidate_needs_gc = 1;
234 wake_up_gc(ca->set);
235 return;
236 }
237 }
238}
239
240static void invalidate_buckets_random(struct cache *ca)
241{
242 struct bucket *b;
243 size_t checked = 0;
244
245 while (!fifo_full(&ca->free_inc)) {
246 size_t n;
247 get_random_bytes(&n, sizeof(n));
248
249 n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket);
250 n += ca->sb.first_bucket;
251
252 b = ca->buckets + n;
253
254 if (bch_can_invalidate_bucket(ca, b))
255 bch_invalidate_one_bucket(ca, b);
256
257 if (++checked >= ca->sb.nbuckets / 2) {
258 ca->invalidate_needs_gc = 1;
259 wake_up_gc(ca->set);
260 return;
261 }
262 }
263}
264
265static void invalidate_buckets(struct cache *ca)
266{
267 BUG_ON(ca->invalidate_needs_gc);
268
269 switch (CACHE_REPLACEMENT(&ca->sb)) {
270 case CACHE_REPLACEMENT_LRU:
271 invalidate_buckets_lru(ca);
272 break;
273 case CACHE_REPLACEMENT_FIFO:
274 invalidate_buckets_fifo(ca);
275 break;
276 case CACHE_REPLACEMENT_RANDOM:
277 invalidate_buckets_random(ca);
278 break;
279 }
280}
281
282#define allocator_wait(ca, cond) \
283do { \
284 while (1) { \
285 set_current_state(TASK_INTERRUPTIBLE); \
286 if (cond) \
287 break; \
288 \
289 mutex_unlock(&(ca)->set->bucket_lock); \
290 if (kthread_should_stop()) { \
291 set_current_state(TASK_RUNNING); \
292 return 0; \
293 } \
294 \
295 schedule(); \
296 mutex_lock(&(ca)->set->bucket_lock); \
297 } \
298 __set_current_state(TASK_RUNNING); \
299} while (0)
300
301static int bch_allocator_push(struct cache *ca, long bucket)
302{
303 unsigned i;
304
305
306 if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
307 return true;
308
309 for (i = 0; i < RESERVE_NR; i++)
310 if (fifo_push(&ca->free[i], bucket))
311 return true;
312
313 return false;
314}
315
316static int bch_allocator_thread(void *arg)
317{
318 struct cache *ca = arg;
319
320 mutex_lock(&ca->set->bucket_lock);
321
322 while (1) {
323
324
325
326
327
328 while (!fifo_empty(&ca->free_inc)) {
329 long bucket;
330
331 fifo_pop(&ca->free_inc, bucket);
332
333 if (ca->discard) {
334 mutex_unlock(&ca->set->bucket_lock);
335 blkdev_issue_discard(ca->bdev,
336 bucket_to_sector(ca->set, bucket),
337 ca->sb.bucket_size, GFP_KERNEL, 0);
338 mutex_lock(&ca->set->bucket_lock);
339 }
340
341 allocator_wait(ca, bch_allocator_push(ca, bucket));
342 wake_up(&ca->set->btree_cache_wait);
343 wake_up(&ca->set->bucket_wait);
344 }
345
346
347
348
349
350
351
352retry_invalidate:
353 allocator_wait(ca, ca->set->gc_mark_valid &&
354 !ca->invalidate_needs_gc);
355 invalidate_buckets(ca);
356
357
358
359
360
361 allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
362 if (CACHE_SYNC(&ca->set->sb)) {
363
364
365
366
367
368
369
370
371
372
373
374 if (!fifo_full(&ca->free_inc))
375 goto retry_invalidate;
376
377 bch_prio_write(ca);
378 }
379 }
380}
381
382
383
384long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
385{
386 DEFINE_WAIT(w);
387 struct bucket *b;
388 long r;
389
390
391 if (fifo_pop(&ca->free[RESERVE_NONE], r) ||
392 fifo_pop(&ca->free[reserve], r))
393 goto out;
394
395 if (!wait) {
396 trace_bcache_alloc_fail(ca, reserve);
397 return -1;
398 }
399
400 do {
401 prepare_to_wait(&ca->set->bucket_wait, &w,
402 TASK_UNINTERRUPTIBLE);
403
404 mutex_unlock(&ca->set->bucket_lock);
405 schedule();
406 mutex_lock(&ca->set->bucket_lock);
407 } while (!fifo_pop(&ca->free[RESERVE_NONE], r) &&
408 !fifo_pop(&ca->free[reserve], r));
409
410 finish_wait(&ca->set->bucket_wait, &w);
411out:
412 if (ca->alloc_thread)
413 wake_up_process(ca->alloc_thread);
414
415 trace_bcache_alloc(ca, reserve);
416
417 if (expensive_debug_checks(ca->set)) {
418 size_t iter;
419 long i;
420 unsigned j;
421
422 for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
423 BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
424
425 for (j = 0; j < RESERVE_NR; j++)
426 fifo_for_each(i, &ca->free[j], iter)
427 BUG_ON(i == r);
428 fifo_for_each(i, &ca->free_inc, iter)
429 BUG_ON(i == r);
430 }
431
432 b = ca->buckets + r;
433
434 BUG_ON(atomic_read(&b->pin) != 1);
435
436 SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
437
438 if (reserve <= RESERVE_PRIO) {
439 SET_GC_MARK(b, GC_MARK_METADATA);
440 SET_GC_MOVE(b, 0);
441 b->prio = BTREE_PRIO;
442 } else {
443 SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
444 SET_GC_MOVE(b, 0);
445 b->prio = INITIAL_PRIO;
446 }
447
448 if (ca->set->avail_nbuckets > 0) {
449 ca->set->avail_nbuckets--;
450 bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
451 }
452
453 return r;
454}
455
456void __bch_bucket_free(struct cache *ca, struct bucket *b)
457{
458 SET_GC_MARK(b, 0);
459 SET_GC_SECTORS_USED(b, 0);
460
461 if (ca->set->avail_nbuckets < ca->set->nbuckets) {
462 ca->set->avail_nbuckets++;
463 bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
464 }
465}
466
467void bch_bucket_free(struct cache_set *c, struct bkey *k)
468{
469 unsigned i;
470
471 for (i = 0; i < KEY_PTRS(k); i++)
472 __bch_bucket_free(PTR_CACHE(c, k, i),
473 PTR_BUCKET(c, k, i));
474}
475
476int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
477 struct bkey *k, int n, bool wait)
478{
479 int i;
480
481 lockdep_assert_held(&c->bucket_lock);
482 BUG_ON(!n || n > c->caches_loaded || n > 8);
483
484 bkey_init(k);
485
486
487
488 for (i = 0; i < n; i++) {
489 struct cache *ca = c->cache_by_alloc[i];
490 long b = bch_bucket_alloc(ca, reserve, wait);
491
492 if (b == -1)
493 goto err;
494
495 k->ptr[i] = MAKE_PTR(ca->buckets[b].gen,
496 bucket_to_sector(c, b),
497 ca->sb.nr_this_dev);
498
499 SET_KEY_PTRS(k, i + 1);
500 }
501
502 return 0;
503err:
504 bch_bucket_free(c, k);
505 bkey_put(c, k);
506 return -1;
507}
508
509int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
510 struct bkey *k, int n, bool wait)
511{
512 int ret;
513 mutex_lock(&c->bucket_lock);
514 ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
515 mutex_unlock(&c->bucket_lock);
516 return ret;
517}
518
519
520
521struct open_bucket {
522 struct list_head list;
523 unsigned last_write_point;
524 unsigned sectors_free;
525 BKEY_PADDED(key);
526};
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553static struct open_bucket *pick_data_bucket(struct cache_set *c,
554 const struct bkey *search,
555 unsigned write_point,
556 struct bkey *alloc)
557{
558 struct open_bucket *ret, *ret_task = NULL;
559
560 list_for_each_entry_reverse(ret, &c->data_buckets, list)
561 if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) !=
562 UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)]))
563 continue;
564 else if (!bkey_cmp(&ret->key, search))
565 goto found;
566 else if (ret->last_write_point == write_point)
567 ret_task = ret;
568
569 ret = ret_task ?: list_first_entry(&c->data_buckets,
570 struct open_bucket, list);
571found:
572 if (!ret->sectors_free && KEY_PTRS(alloc)) {
573 ret->sectors_free = c->sb.bucket_size;
574 bkey_copy(&ret->key, alloc);
575 bkey_init(alloc);
576 }
577
578 if (!ret->sectors_free)
579 ret = NULL;
580
581 return ret;
582}
583
584
585
586
587
588
589
590
591
592
593
594bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
595 unsigned write_point, unsigned write_prio, bool wait)
596{
597 struct open_bucket *b;
598 BKEY_PADDED(key) alloc;
599 unsigned i;
600
601
602
603
604
605
606
607
608 bkey_init(&alloc.key);
609 spin_lock(&c->data_bucket_lock);
610
611 while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
612 unsigned watermark = write_prio
613 ? RESERVE_MOVINGGC
614 : RESERVE_NONE;
615
616 spin_unlock(&c->data_bucket_lock);
617
618 if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait))
619 return false;
620
621 spin_lock(&c->data_bucket_lock);
622 }
623
624
625
626
627
628
629 if (KEY_PTRS(&alloc.key))
630 bkey_put(c, &alloc.key);
631
632 for (i = 0; i < KEY_PTRS(&b->key); i++)
633 EBUG_ON(ptr_stale(c, &b->key, i));
634
635
636
637 for (i = 0; i < KEY_PTRS(&b->key); i++)
638 k->ptr[i] = b->key.ptr[i];
639
640 sectors = min(sectors, b->sectors_free);
641
642 SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
643 SET_KEY_SIZE(k, sectors);
644 SET_KEY_PTRS(k, KEY_PTRS(&b->key));
645
646
647
648
649
650 list_move_tail(&b->list, &c->data_buckets);
651 bkey_copy_key(&b->key, k);
652 b->last_write_point = write_point;
653
654 b->sectors_free -= sectors;
655
656 for (i = 0; i < KEY_PTRS(&b->key); i++) {
657 SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
658
659 atomic_long_add(sectors,
660 &PTR_CACHE(c, &b->key, i)->sectors_written);
661 }
662
663 if (b->sectors_free < c->sb.block_size)
664 b->sectors_free = 0;
665
666
667
668
669
670
671 if (b->sectors_free)
672 for (i = 0; i < KEY_PTRS(&b->key); i++)
673 atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
674
675 spin_unlock(&c->data_bucket_lock);
676 return true;
677}
678
679
680
681void bch_open_buckets_free(struct cache_set *c)
682{
683 struct open_bucket *b;
684
685 while (!list_empty(&c->data_buckets)) {
686 b = list_first_entry(&c->data_buckets,
687 struct open_bucket, list);
688 list_del(&b->list);
689 kfree(b);
690 }
691}
692
693int bch_open_buckets_alloc(struct cache_set *c)
694{
695 int i;
696
697 spin_lock_init(&c->data_bucket_lock);
698
699 for (i = 0; i < MAX_OPEN_BUCKETS; i++) {
700 struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
701 if (!b)
702 return -ENOMEM;
703
704 list_add(&b->list, &c->data_buckets);
705 }
706
707 return 0;
708}
709
710int bch_cache_allocator_start(struct cache *ca)
711{
712 struct task_struct *k = kthread_run(bch_allocator_thread,
713 ca, "bcache_allocator");
714 if (IS_ERR(k))
715 return PTR_ERR(k);
716
717 ca->alloc_thread = k;
718 return 0;
719}
720