1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63#include "bcache.h"
64#include "btree.h"
65
66#include <linux/blkdev.h>
67#include <linux/kthread.h>
68#include <linux/random.h>
69#include <trace/events/bcache.h>
70
71
72
73uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
74{
75 uint8_t ret = ++b->gen;
76
77 ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b));
78 WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX);
79
80 return ret;
81}
82
83void bch_rescale_priorities(struct cache_set *c, int sectors)
84{
85 struct cache *ca;
86 struct bucket *b;
87 unsigned next = c->nbuckets * c->sb.bucket_size / 1024;
88 unsigned i;
89 int r;
90
91 atomic_sub(sectors, &c->rescale);
92
93 do {
94 r = atomic_read(&c->rescale);
95
96 if (r >= 0)
97 return;
98 } while (atomic_cmpxchg(&c->rescale, r, r + next) != r);
99
100 mutex_lock(&c->bucket_lock);
101
102 c->min_prio = USHRT_MAX;
103
104 for_each_cache(ca, c, i)
105 for_each_bucket(b, ca)
106 if (b->prio &&
107 b->prio != BTREE_PRIO &&
108 !atomic_read(&b->pin)) {
109 b->prio--;
110 c->min_prio = min(c->min_prio, b->prio);
111 }
112
113 mutex_unlock(&c->bucket_lock);
114}
115
116
117
118
119
120
121
122
123static inline bool can_inc_bucket_gen(struct bucket *b)
124{
125 return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX;
126}
127
128bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b)
129{
130 BUG_ON(!ca->set->gc_mark_valid);
131
132 return (!GC_MARK(b) ||
133 GC_MARK(b) == GC_MARK_RECLAIMABLE) &&
134 !atomic_read(&b->pin) &&
135 can_inc_bucket_gen(b);
136}
137
138void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
139{
140 lockdep_assert_held(&ca->set->bucket_lock);
141 BUG_ON(GC_MARK(b) && GC_MARK(b) != GC_MARK_RECLAIMABLE);
142
143 if (GC_SECTORS_USED(b))
144 trace_bcache_invalidate(ca, b - ca->buckets);
145
146 bch_inc_gen(ca, b);
147 b->prio = INITIAL_PRIO;
148 atomic_inc(&b->pin);
149}
150
151static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
152{
153 __bch_invalidate_one_bucket(ca, b);
154
155 fifo_push(&ca->free_inc, b - ca->buckets);
156}
157
158
159
160
161
162
163
164
165
166
167#define bucket_prio(b) \
168({ \
169 unsigned min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
170 \
171 (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \
172})
173
174#define bucket_max_cmp(l, r) (bucket_prio(l) < bucket_prio(r))
175#define bucket_min_cmp(l, r) (bucket_prio(l) > bucket_prio(r))
176
177static void invalidate_buckets_lru(struct cache *ca)
178{
179 struct bucket *b;
180 ssize_t i;
181
182 ca->heap.used = 0;
183
184 for_each_bucket(b, ca) {
185 if (!bch_can_invalidate_bucket(ca, b))
186 continue;
187
188 if (!heap_full(&ca->heap))
189 heap_add(&ca->heap, b, bucket_max_cmp);
190 else if (bucket_max_cmp(b, heap_peek(&ca->heap))) {
191 ca->heap.data[0] = b;
192 heap_sift(&ca->heap, 0, bucket_max_cmp);
193 }
194 }
195
196 for (i = ca->heap.used / 2 - 1; i >= 0; --i)
197 heap_sift(&ca->heap, i, bucket_min_cmp);
198
199 while (!fifo_full(&ca->free_inc)) {
200 if (!heap_pop(&ca->heap, b, bucket_min_cmp)) {
201
202
203
204
205 ca->invalidate_needs_gc = 1;
206 wake_up_gc(ca->set);
207 return;
208 }
209
210 bch_invalidate_one_bucket(ca, b);
211 }
212}
213
214static void invalidate_buckets_fifo(struct cache *ca)
215{
216 struct bucket *b;
217 size_t checked = 0;
218
219 while (!fifo_full(&ca->free_inc)) {
220 if (ca->fifo_last_bucket < ca->sb.first_bucket ||
221 ca->fifo_last_bucket >= ca->sb.nbuckets)
222 ca->fifo_last_bucket = ca->sb.first_bucket;
223
224 b = ca->buckets + ca->fifo_last_bucket++;
225
226 if (bch_can_invalidate_bucket(ca, b))
227 bch_invalidate_one_bucket(ca, b);
228
229 if (++checked >= ca->sb.nbuckets) {
230 ca->invalidate_needs_gc = 1;
231 wake_up_gc(ca->set);
232 return;
233 }
234 }
235}
236
237static void invalidate_buckets_random(struct cache *ca)
238{
239 struct bucket *b;
240 size_t checked = 0;
241
242 while (!fifo_full(&ca->free_inc)) {
243 size_t n;
244 get_random_bytes(&n, sizeof(n));
245
246 n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket);
247 n += ca->sb.first_bucket;
248
249 b = ca->buckets + n;
250
251 if (bch_can_invalidate_bucket(ca, b))
252 bch_invalidate_one_bucket(ca, b);
253
254 if (++checked >= ca->sb.nbuckets / 2) {
255 ca->invalidate_needs_gc = 1;
256 wake_up_gc(ca->set);
257 return;
258 }
259 }
260}
261
262static void invalidate_buckets(struct cache *ca)
263{
264 BUG_ON(ca->invalidate_needs_gc);
265
266 switch (CACHE_REPLACEMENT(&ca->sb)) {
267 case CACHE_REPLACEMENT_LRU:
268 invalidate_buckets_lru(ca);
269 break;
270 case CACHE_REPLACEMENT_FIFO:
271 invalidate_buckets_fifo(ca);
272 break;
273 case CACHE_REPLACEMENT_RANDOM:
274 invalidate_buckets_random(ca);
275 break;
276 }
277}
278
279#define allocator_wait(ca, cond) \
280do { \
281 while (1) { \
282 set_current_state(TASK_INTERRUPTIBLE); \
283 if (cond) \
284 break; \
285 \
286 mutex_unlock(&(ca)->set->bucket_lock); \
287 if (kthread_should_stop()) \
288 return 0; \
289 \
290 schedule(); \
291 mutex_lock(&(ca)->set->bucket_lock); \
292 } \
293 __set_current_state(TASK_RUNNING); \
294} while (0)
295
296static int bch_allocator_push(struct cache *ca, long bucket)
297{
298 unsigned i;
299
300
301 if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
302 return true;
303
304 for (i = 0; i < RESERVE_NR; i++)
305 if (fifo_push(&ca->free[i], bucket))
306 return true;
307
308 return false;
309}
310
311static int bch_allocator_thread(void *arg)
312{
313 struct cache *ca = arg;
314
315 mutex_lock(&ca->set->bucket_lock);
316
317 while (1) {
318
319
320
321
322
323 while (!fifo_empty(&ca->free_inc)) {
324 long bucket;
325
326 fifo_pop(&ca->free_inc, bucket);
327
328 if (ca->discard) {
329 mutex_unlock(&ca->set->bucket_lock);
330 blkdev_issue_discard(ca->bdev,
331 bucket_to_sector(ca->set, bucket),
332 ca->sb.bucket_size, GFP_KERNEL, 0);
333 mutex_lock(&ca->set->bucket_lock);
334 }
335
336 allocator_wait(ca, bch_allocator_push(ca, bucket));
337 wake_up(&ca->set->btree_cache_wait);
338 wake_up(&ca->set->bucket_wait);
339 }
340
341
342
343
344
345
346
347retry_invalidate:
348 allocator_wait(ca, ca->set->gc_mark_valid &&
349 !ca->invalidate_needs_gc);
350 invalidate_buckets(ca);
351
352
353
354
355
356 allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
357 if (CACHE_SYNC(&ca->set->sb)) {
358
359
360
361
362
363
364
365
366
367
368
369 if (!fifo_full(&ca->free_inc))
370 goto retry_invalidate;
371
372 bch_prio_write(ca);
373 }
374 }
375}
376
377
378
379long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
380{
381 DEFINE_WAIT(w);
382 struct bucket *b;
383 long r;
384
385
386 if (fifo_pop(&ca->free[RESERVE_NONE], r) ||
387 fifo_pop(&ca->free[reserve], r))
388 goto out;
389
390 if (!wait) {
391 trace_bcache_alloc_fail(ca, reserve);
392 return -1;
393 }
394
395 do {
396 prepare_to_wait(&ca->set->bucket_wait, &w,
397 TASK_UNINTERRUPTIBLE);
398
399 mutex_unlock(&ca->set->bucket_lock);
400 schedule();
401 mutex_lock(&ca->set->bucket_lock);
402 } while (!fifo_pop(&ca->free[RESERVE_NONE], r) &&
403 !fifo_pop(&ca->free[reserve], r));
404
405 finish_wait(&ca->set->bucket_wait, &w);
406out:
407 wake_up_process(ca->alloc_thread);
408
409 trace_bcache_alloc(ca, reserve);
410
411 if (expensive_debug_checks(ca->set)) {
412 size_t iter;
413 long i;
414 unsigned j;
415
416 for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
417 BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
418
419 for (j = 0; j < RESERVE_NR; j++)
420 fifo_for_each(i, &ca->free[j], iter)
421 BUG_ON(i == r);
422 fifo_for_each(i, &ca->free_inc, iter)
423 BUG_ON(i == r);
424 }
425
426 b = ca->buckets + r;
427
428 BUG_ON(atomic_read(&b->pin) != 1);
429
430 SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
431
432 if (reserve <= RESERVE_PRIO) {
433 SET_GC_MARK(b, GC_MARK_METADATA);
434 SET_GC_MOVE(b, 0);
435 b->prio = BTREE_PRIO;
436 } else {
437 SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
438 SET_GC_MOVE(b, 0);
439 b->prio = INITIAL_PRIO;
440 }
441
442 return r;
443}
444
445void __bch_bucket_free(struct cache *ca, struct bucket *b)
446{
447 SET_GC_MARK(b, 0);
448 SET_GC_SECTORS_USED(b, 0);
449}
450
451void bch_bucket_free(struct cache_set *c, struct bkey *k)
452{
453 unsigned i;
454
455 for (i = 0; i < KEY_PTRS(k); i++)
456 __bch_bucket_free(PTR_CACHE(c, k, i),
457 PTR_BUCKET(c, k, i));
458}
459
460int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
461 struct bkey *k, int n, bool wait)
462{
463 int i;
464
465 lockdep_assert_held(&c->bucket_lock);
466 BUG_ON(!n || n > c->caches_loaded || n > 8);
467
468 bkey_init(k);
469
470
471
472 for (i = 0; i < n; i++) {
473 struct cache *ca = c->cache_by_alloc[i];
474 long b = bch_bucket_alloc(ca, reserve, wait);
475
476 if (b == -1)
477 goto err;
478
479 k->ptr[i] = PTR(ca->buckets[b].gen,
480 bucket_to_sector(c, b),
481 ca->sb.nr_this_dev);
482
483 SET_KEY_PTRS(k, i + 1);
484 }
485
486 return 0;
487err:
488 bch_bucket_free(c, k);
489 bkey_put(c, k);
490 return -1;
491}
492
493int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
494 struct bkey *k, int n, bool wait)
495{
496 int ret;
497 mutex_lock(&c->bucket_lock);
498 ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
499 mutex_unlock(&c->bucket_lock);
500 return ret;
501}
502
503
504
505struct open_bucket {
506 struct list_head list;
507 unsigned last_write_point;
508 unsigned sectors_free;
509 BKEY_PADDED(key);
510};
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531static struct open_bucket *pick_data_bucket(struct cache_set *c,
532 const struct bkey *search,
533 unsigned write_point,
534 struct bkey *alloc)
535{
536 struct open_bucket *ret, *ret_task = NULL;
537
538 list_for_each_entry_reverse(ret, &c->data_buckets, list)
539 if (!bkey_cmp(&ret->key, search))
540 goto found;
541 else if (ret->last_write_point == write_point)
542 ret_task = ret;
543
544 ret = ret_task ?: list_first_entry(&c->data_buckets,
545 struct open_bucket, list);
546found:
547 if (!ret->sectors_free && KEY_PTRS(alloc)) {
548 ret->sectors_free = c->sb.bucket_size;
549 bkey_copy(&ret->key, alloc);
550 bkey_init(alloc);
551 }
552
553 if (!ret->sectors_free)
554 ret = NULL;
555
556 return ret;
557}
558
559
560
561
562
563
564
565
566
567
568
569bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
570 unsigned write_point, unsigned write_prio, bool wait)
571{
572 struct open_bucket *b;
573 BKEY_PADDED(key) alloc;
574 unsigned i;
575
576
577
578
579
580
581
582
583 bkey_init(&alloc.key);
584 spin_lock(&c->data_bucket_lock);
585
586 while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
587 unsigned watermark = write_prio
588 ? RESERVE_MOVINGGC
589 : RESERVE_NONE;
590
591 spin_unlock(&c->data_bucket_lock);
592
593 if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait))
594 return false;
595
596 spin_lock(&c->data_bucket_lock);
597 }
598
599
600
601
602
603
604 if (KEY_PTRS(&alloc.key))
605 bkey_put(c, &alloc.key);
606
607 for (i = 0; i < KEY_PTRS(&b->key); i++)
608 EBUG_ON(ptr_stale(c, &b->key, i));
609
610
611
612 for (i = 0; i < KEY_PTRS(&b->key); i++)
613 k->ptr[i] = b->key.ptr[i];
614
615 sectors = min(sectors, b->sectors_free);
616
617 SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
618 SET_KEY_SIZE(k, sectors);
619 SET_KEY_PTRS(k, KEY_PTRS(&b->key));
620
621
622
623
624
625 list_move_tail(&b->list, &c->data_buckets);
626 bkey_copy_key(&b->key, k);
627 b->last_write_point = write_point;
628
629 b->sectors_free -= sectors;
630
631 for (i = 0; i < KEY_PTRS(&b->key); i++) {
632 SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
633
634 atomic_long_add(sectors,
635 &PTR_CACHE(c, &b->key, i)->sectors_written);
636 }
637
638 if (b->sectors_free < c->sb.block_size)
639 b->sectors_free = 0;
640
641
642
643
644
645
646 if (b->sectors_free)
647 for (i = 0; i < KEY_PTRS(&b->key); i++)
648 atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
649
650 spin_unlock(&c->data_bucket_lock);
651 return true;
652}
653
654
655
656void bch_open_buckets_free(struct cache_set *c)
657{
658 struct open_bucket *b;
659
660 while (!list_empty(&c->data_buckets)) {
661 b = list_first_entry(&c->data_buckets,
662 struct open_bucket, list);
663 list_del(&b->list);
664 kfree(b);
665 }
666}
667
668int bch_open_buckets_alloc(struct cache_set *c)
669{
670 int i;
671
672 spin_lock_init(&c->data_bucket_lock);
673
674 for (i = 0; i < 6; i++) {
675 struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
676 if (!b)
677 return -ENOMEM;
678
679 list_add(&b->list, &c->data_buckets);
680 }
681
682 return 0;
683}
684
685int bch_cache_allocator_start(struct cache *ca)
686{
687 struct task_struct *k = kthread_run(bch_allocator_thread,
688 ca, "bcache_allocator");
689 if (IS_ERR(k))
690 return PTR_ERR(k);
691
692 ca->alloc_thread = k;
693 return 0;
694}
695