1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64#include "bcache.h"
65#include "btree.h"
66
67#include <linux/blkdev.h>
68#include <linux/kthread.h>
69#include <linux/random.h>
70#include <trace/events/bcache.h>
71
72#define MAX_OPEN_BUCKETS 128
73
74
75
76uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
77{
78 uint8_t ret = ++b->gen;
79
80 ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b));
81 WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX);
82
83 return ret;
84}
85
86void bch_rescale_priorities(struct cache_set *c, int sectors)
87{
88 struct cache *ca;
89 struct bucket *b;
90 unsigned next = c->nbuckets * c->sb.bucket_size / 1024;
91 unsigned i;
92 int r;
93
94 atomic_sub(sectors, &c->rescale);
95
96 do {
97 r = atomic_read(&c->rescale);
98
99 if (r >= 0)
100 return;
101 } while (atomic_cmpxchg(&c->rescale, r, r + next) != r);
102
103 mutex_lock(&c->bucket_lock);
104
105 c->min_prio = USHRT_MAX;
106
107 for_each_cache(ca, c, i)
108 for_each_bucket(b, ca)
109 if (b->prio &&
110 b->prio != BTREE_PRIO &&
111 !atomic_read(&b->pin)) {
112 b->prio--;
113 c->min_prio = min(c->min_prio, b->prio);
114 }
115
116 mutex_unlock(&c->bucket_lock);
117}
118
119
120
121
122
123
124
125
126static inline bool can_inc_bucket_gen(struct bucket *b)
127{
128 return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX;
129}
130
131bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b)
132{
133 BUG_ON(!ca->set->gc_mark_valid);
134
135 return (!GC_MARK(b) ||
136 GC_MARK(b) == GC_MARK_RECLAIMABLE) &&
137 !atomic_read(&b->pin) &&
138 can_inc_bucket_gen(b);
139}
140
141void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
142{
143 lockdep_assert_held(&ca->set->bucket_lock);
144 BUG_ON(GC_MARK(b) && GC_MARK(b) != GC_MARK_RECLAIMABLE);
145
146 if (GC_SECTORS_USED(b))
147 trace_bcache_invalidate(ca, b - ca->buckets);
148
149 bch_inc_gen(ca, b);
150 b->prio = INITIAL_PRIO;
151 atomic_inc(&b->pin);
152}
153
154static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
155{
156 __bch_invalidate_one_bucket(ca, b);
157
158 fifo_push(&ca->free_inc, b - ca->buckets);
159}
160
161
162
163
164
165
166
167
168
169
170#define bucket_prio(b) \
171({ \
172 unsigned min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
173 \
174 (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \
175})
176
177#define bucket_max_cmp(l, r) (bucket_prio(l) < bucket_prio(r))
178#define bucket_min_cmp(l, r) (bucket_prio(l) > bucket_prio(r))
179
180static void invalidate_buckets_lru(struct cache *ca)
181{
182 struct bucket *b;
183 ssize_t i;
184
185 ca->heap.used = 0;
186
187 for_each_bucket(b, ca) {
188 if (!bch_can_invalidate_bucket(ca, b))
189 continue;
190
191 if (!heap_full(&ca->heap))
192 heap_add(&ca->heap, b, bucket_max_cmp);
193 else if (bucket_max_cmp(b, heap_peek(&ca->heap))) {
194 ca->heap.data[0] = b;
195 heap_sift(&ca->heap, 0, bucket_max_cmp);
196 }
197 }
198
199 for (i = ca->heap.used / 2 - 1; i >= 0; --i)
200 heap_sift(&ca->heap, i, bucket_min_cmp);
201
202 while (!fifo_full(&ca->free_inc)) {
203 if (!heap_pop(&ca->heap, b, bucket_min_cmp)) {
204
205
206
207
208 ca->invalidate_needs_gc = 1;
209 wake_up_gc(ca->set);
210 return;
211 }
212
213 bch_invalidate_one_bucket(ca, b);
214 }
215}
216
217static void invalidate_buckets_fifo(struct cache *ca)
218{
219 struct bucket *b;
220 size_t checked = 0;
221
222 while (!fifo_full(&ca->free_inc)) {
223 if (ca->fifo_last_bucket < ca->sb.first_bucket ||
224 ca->fifo_last_bucket >= ca->sb.nbuckets)
225 ca->fifo_last_bucket = ca->sb.first_bucket;
226
227 b = ca->buckets + ca->fifo_last_bucket++;
228
229 if (bch_can_invalidate_bucket(ca, b))
230 bch_invalidate_one_bucket(ca, b);
231
232 if (++checked >= ca->sb.nbuckets) {
233 ca->invalidate_needs_gc = 1;
234 wake_up_gc(ca->set);
235 return;
236 }
237 }
238}
239
240static void invalidate_buckets_random(struct cache *ca)
241{
242 struct bucket *b;
243 size_t checked = 0;
244
245 while (!fifo_full(&ca->free_inc)) {
246 size_t n;
247 get_random_bytes(&n, sizeof(n));
248
249 n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket);
250 n += ca->sb.first_bucket;
251
252 b = ca->buckets + n;
253
254 if (bch_can_invalidate_bucket(ca, b))
255 bch_invalidate_one_bucket(ca, b);
256
257 if (++checked >= ca->sb.nbuckets / 2) {
258 ca->invalidate_needs_gc = 1;
259 wake_up_gc(ca->set);
260 return;
261 }
262 }
263}
264
265static void invalidate_buckets(struct cache *ca)
266{
267 BUG_ON(ca->invalidate_needs_gc);
268
269 switch (CACHE_REPLACEMENT(&ca->sb)) {
270 case CACHE_REPLACEMENT_LRU:
271 invalidate_buckets_lru(ca);
272 break;
273 case CACHE_REPLACEMENT_FIFO:
274 invalidate_buckets_fifo(ca);
275 break;
276 case CACHE_REPLACEMENT_RANDOM:
277 invalidate_buckets_random(ca);
278 break;
279 }
280}
281
282#define allocator_wait(ca, cond) \
283do { \
284 while (1) { \
285 set_current_state(TASK_INTERRUPTIBLE); \
286 if (cond) \
287 break; \
288 \
289 mutex_unlock(&(ca)->set->bucket_lock); \
290 if (kthread_should_stop() || \
291 test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)) { \
292 set_current_state(TASK_RUNNING); \
293 goto out; \
294 } \
295 \
296 schedule(); \
297 mutex_lock(&(ca)->set->bucket_lock); \
298 } \
299 __set_current_state(TASK_RUNNING); \
300} while (0)
301
302static int bch_allocator_push(struct cache *ca, long bucket)
303{
304 unsigned i;
305
306
307 if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
308 return true;
309
310 for (i = 0; i < RESERVE_NR; i++)
311 if (fifo_push(&ca->free[i], bucket))
312 return true;
313
314 return false;
315}
316
317static int bch_allocator_thread(void *arg)
318{
319 struct cache *ca = arg;
320
321 mutex_lock(&ca->set->bucket_lock);
322
323 while (1) {
324
325
326
327
328
329 while (!fifo_empty(&ca->free_inc)) {
330 long bucket;
331
332 fifo_pop(&ca->free_inc, bucket);
333
334 if (ca->discard) {
335 mutex_unlock(&ca->set->bucket_lock);
336 blkdev_issue_discard(ca->bdev,
337 bucket_to_sector(ca->set, bucket),
338 ca->sb.bucket_size, GFP_KERNEL, 0);
339 mutex_lock(&ca->set->bucket_lock);
340 }
341
342 allocator_wait(ca, bch_allocator_push(ca, bucket));
343 wake_up(&ca->set->btree_cache_wait);
344 wake_up(&ca->set->bucket_wait);
345 }
346
347
348
349
350
351
352
353retry_invalidate:
354 allocator_wait(ca, ca->set->gc_mark_valid &&
355 !ca->invalidate_needs_gc);
356 invalidate_buckets(ca);
357
358
359
360
361
362 allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
363 if (CACHE_SYNC(&ca->set->sb)) {
364
365
366
367
368
369
370
371
372
373
374
375 if (!fifo_full(&ca->free_inc))
376 goto retry_invalidate;
377
378 bch_prio_write(ca);
379 }
380 }
381out:
382 wait_for_kthread_stop();
383 return 0;
384}
385
386
387
388long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
389{
390 DEFINE_WAIT(w);
391 struct bucket *b;
392 long r;
393
394
395 if (fifo_pop(&ca->free[RESERVE_NONE], r) ||
396 fifo_pop(&ca->free[reserve], r))
397 goto out;
398
399 if (!wait) {
400 trace_bcache_alloc_fail(ca, reserve);
401 return -1;
402 }
403
404 do {
405 prepare_to_wait(&ca->set->bucket_wait, &w,
406 TASK_UNINTERRUPTIBLE);
407
408 mutex_unlock(&ca->set->bucket_lock);
409 schedule();
410 mutex_lock(&ca->set->bucket_lock);
411 } while (!fifo_pop(&ca->free[RESERVE_NONE], r) &&
412 !fifo_pop(&ca->free[reserve], r));
413
414 finish_wait(&ca->set->bucket_wait, &w);
415out:
416 if (ca->alloc_thread)
417 wake_up_process(ca->alloc_thread);
418
419 trace_bcache_alloc(ca, reserve);
420
421 if (expensive_debug_checks(ca->set)) {
422 size_t iter;
423 long i;
424 unsigned j;
425
426 for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
427 BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
428
429 for (j = 0; j < RESERVE_NR; j++)
430 fifo_for_each(i, &ca->free[j], iter)
431 BUG_ON(i == r);
432 fifo_for_each(i, &ca->free_inc, iter)
433 BUG_ON(i == r);
434 }
435
436 b = ca->buckets + r;
437
438 BUG_ON(atomic_read(&b->pin) != 1);
439
440 SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
441
442 if (reserve <= RESERVE_PRIO) {
443 SET_GC_MARK(b, GC_MARK_METADATA);
444 SET_GC_MOVE(b, 0);
445 b->prio = BTREE_PRIO;
446 } else {
447 SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
448 SET_GC_MOVE(b, 0);
449 b->prio = INITIAL_PRIO;
450 }
451
452 if (ca->set->avail_nbuckets > 0) {
453 ca->set->avail_nbuckets--;
454 bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
455 }
456
457 return r;
458}
459
460void __bch_bucket_free(struct cache *ca, struct bucket *b)
461{
462 SET_GC_MARK(b, 0);
463 SET_GC_SECTORS_USED(b, 0);
464
465 if (ca->set->avail_nbuckets < ca->set->nbuckets) {
466 ca->set->avail_nbuckets++;
467 bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
468 }
469}
470
471void bch_bucket_free(struct cache_set *c, struct bkey *k)
472{
473 unsigned i;
474
475 for (i = 0; i < KEY_PTRS(k); i++)
476 __bch_bucket_free(PTR_CACHE(c, k, i),
477 PTR_BUCKET(c, k, i));
478}
479
480int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
481 struct bkey *k, int n, bool wait)
482{
483 int i;
484
485 lockdep_assert_held(&c->bucket_lock);
486 BUG_ON(!n || n > c->caches_loaded || n > 8);
487
488 bkey_init(k);
489
490
491
492 for (i = 0; i < n; i++) {
493 struct cache *ca = c->cache_by_alloc[i];
494 long b = bch_bucket_alloc(ca, reserve, wait);
495
496 if (b == -1)
497 goto err;
498
499 k->ptr[i] = MAKE_PTR(ca->buckets[b].gen,
500 bucket_to_sector(c, b),
501 ca->sb.nr_this_dev);
502
503 SET_KEY_PTRS(k, i + 1);
504 }
505
506 return 0;
507err:
508 bch_bucket_free(c, k);
509 bkey_put(c, k);
510 return -1;
511}
512
513int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
514 struct bkey *k, int n, bool wait)
515{
516 int ret;
517 mutex_lock(&c->bucket_lock);
518 ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
519 mutex_unlock(&c->bucket_lock);
520 return ret;
521}
522
523
524
525struct open_bucket {
526 struct list_head list;
527 unsigned last_write_point;
528 unsigned sectors_free;
529 BKEY_PADDED(key);
530};
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557static struct open_bucket *pick_data_bucket(struct cache_set *c,
558 const struct bkey *search,
559 unsigned write_point,
560 struct bkey *alloc)
561{
562 struct open_bucket *ret, *ret_task = NULL;
563
564 list_for_each_entry_reverse(ret, &c->data_buckets, list)
565 if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) !=
566 UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)]))
567 continue;
568 else if (!bkey_cmp(&ret->key, search))
569 goto found;
570 else if (ret->last_write_point == write_point)
571 ret_task = ret;
572
573 ret = ret_task ?: list_first_entry(&c->data_buckets,
574 struct open_bucket, list);
575found:
576 if (!ret->sectors_free && KEY_PTRS(alloc)) {
577 ret->sectors_free = c->sb.bucket_size;
578 bkey_copy(&ret->key, alloc);
579 bkey_init(alloc);
580 }
581
582 if (!ret->sectors_free)
583 ret = NULL;
584
585 return ret;
586}
587
588
589
590
591
592
593
594
595
596
597
598bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
599 unsigned write_point, unsigned write_prio, bool wait)
600{
601 struct open_bucket *b;
602 BKEY_PADDED(key) alloc;
603 unsigned i;
604
605
606
607
608
609
610
611
612 bkey_init(&alloc.key);
613 spin_lock(&c->data_bucket_lock);
614
615 while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
616 unsigned watermark = write_prio
617 ? RESERVE_MOVINGGC
618 : RESERVE_NONE;
619
620 spin_unlock(&c->data_bucket_lock);
621
622 if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait))
623 return false;
624
625 spin_lock(&c->data_bucket_lock);
626 }
627
628
629
630
631
632
633 if (KEY_PTRS(&alloc.key))
634 bkey_put(c, &alloc.key);
635
636 for (i = 0; i < KEY_PTRS(&b->key); i++)
637 EBUG_ON(ptr_stale(c, &b->key, i));
638
639
640
641 for (i = 0; i < KEY_PTRS(&b->key); i++)
642 k->ptr[i] = b->key.ptr[i];
643
644 sectors = min(sectors, b->sectors_free);
645
646 SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
647 SET_KEY_SIZE(k, sectors);
648 SET_KEY_PTRS(k, KEY_PTRS(&b->key));
649
650
651
652
653
654 list_move_tail(&b->list, &c->data_buckets);
655 bkey_copy_key(&b->key, k);
656 b->last_write_point = write_point;
657
658 b->sectors_free -= sectors;
659
660 for (i = 0; i < KEY_PTRS(&b->key); i++) {
661 SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
662
663 atomic_long_add(sectors,
664 &PTR_CACHE(c, &b->key, i)->sectors_written);
665 }
666
667 if (b->sectors_free < c->sb.block_size)
668 b->sectors_free = 0;
669
670
671
672
673
674
675 if (b->sectors_free)
676 for (i = 0; i < KEY_PTRS(&b->key); i++)
677 atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
678
679 spin_unlock(&c->data_bucket_lock);
680 return true;
681}
682
683
684
685void bch_open_buckets_free(struct cache_set *c)
686{
687 struct open_bucket *b;
688
689 while (!list_empty(&c->data_buckets)) {
690 b = list_first_entry(&c->data_buckets,
691 struct open_bucket, list);
692 list_del(&b->list);
693 kfree(b);
694 }
695}
696
697int bch_open_buckets_alloc(struct cache_set *c)
698{
699 int i;
700
701 spin_lock_init(&c->data_bucket_lock);
702
703 for (i = 0; i < MAX_OPEN_BUCKETS; i++) {
704 struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
705 if (!b)
706 return -ENOMEM;
707
708 list_add(&b->list, &c->data_buckets);
709 }
710
711 return 0;
712}
713
714int bch_cache_allocator_start(struct cache *ca)
715{
716 struct task_struct *k = kthread_run(bch_allocator_thread,
717 ca, "bcache_allocator");
718 if (IS_ERR(k))
719 return PTR_ERR(k);
720
721 ca->alloc_thread = k;
722 return 0;
723}
724