1
2
3
4
5
6
7
8
9#include "bcache.h"
10#include "btree.h"
11#include "debug.h"
12#include "request.h"
13
14#include <linux/cgroup.h>
15#include <linux/module.h>
16#include <linux/hash.h>
17#include <linux/random.h>
18#include "blk-cgroup.h"
19
20#include <trace/events/bcache.h>
21
22#define CUTOFF_CACHE_ADD 95
23#define CUTOFF_CACHE_READA 90
24#define CUTOFF_WRITEBACK 50
25#define CUTOFF_WRITEBACK_SYNC 75
26
27struct kmem_cache *bch_search_cache;
28
29static void check_should_skip(struct cached_dev *, struct search *);
30
31
32
33#ifdef CONFIG_CGROUP_BCACHE
34static struct bch_cgroup bcache_default_cgroup = { .cache_mode = -1 };
35
36static struct bch_cgroup *cgroup_to_bcache(struct cgroup *cgroup)
37{
38 struct cgroup_subsys_state *css;
39 return cgroup &&
40 (css = cgroup_subsys_state(cgroup, bcache_subsys_id))
41 ? container_of(css, struct bch_cgroup, css)
42 : &bcache_default_cgroup;
43}
44
45struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio)
46{
47 struct cgroup_subsys_state *css = bio->bi_css
48 ? cgroup_subsys_state(bio->bi_css->cgroup, bcache_subsys_id)
49 : task_subsys_state(current, bcache_subsys_id);
50
51 return css
52 ? container_of(css, struct bch_cgroup, css)
53 : &bcache_default_cgroup;
54}
55
56static ssize_t cache_mode_read(struct cgroup *cgrp, struct cftype *cft,
57 struct file *file,
58 char __user *buf, size_t nbytes, loff_t *ppos)
59{
60 char tmp[1024];
61 int len = bch_snprint_string_list(tmp, PAGE_SIZE, bch_cache_modes,
62 cgroup_to_bcache(cgrp)->cache_mode + 1);
63
64 if (len < 0)
65 return len;
66
67 return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
68}
69
70static int cache_mode_write(struct cgroup *cgrp, struct cftype *cft,
71 const char *buf)
72{
73 int v = bch_read_string_list(buf, bch_cache_modes);
74 if (v < 0)
75 return v;
76
77 cgroup_to_bcache(cgrp)->cache_mode = v - 1;
78 return 0;
79}
80
81static u64 bch_verify_read(struct cgroup *cgrp, struct cftype *cft)
82{
83 return cgroup_to_bcache(cgrp)->verify;
84}
85
86static int bch_verify_write(struct cgroup *cgrp, struct cftype *cft, u64 val)
87{
88 cgroup_to_bcache(cgrp)->verify = val;
89 return 0;
90}
91
92static u64 bch_cache_hits_read(struct cgroup *cgrp, struct cftype *cft)
93{
94 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
95 return atomic_read(&bcachecg->stats.cache_hits);
96}
97
98static u64 bch_cache_misses_read(struct cgroup *cgrp, struct cftype *cft)
99{
100 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
101 return atomic_read(&bcachecg->stats.cache_misses);
102}
103
104static u64 bch_cache_bypass_hits_read(struct cgroup *cgrp,
105 struct cftype *cft)
106{
107 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
108 return atomic_read(&bcachecg->stats.cache_bypass_hits);
109}
110
111static u64 bch_cache_bypass_misses_read(struct cgroup *cgrp,
112 struct cftype *cft)
113{
114 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
115 return atomic_read(&bcachecg->stats.cache_bypass_misses);
116}
117
118static struct cftype bch_files[] = {
119 {
120 .name = "cache_mode",
121 .read = cache_mode_read,
122 .write_string = cache_mode_write,
123 },
124 {
125 .name = "verify",
126 .read_u64 = bch_verify_read,
127 .write_u64 = bch_verify_write,
128 },
129 {
130 .name = "cache_hits",
131 .read_u64 = bch_cache_hits_read,
132 },
133 {
134 .name = "cache_misses",
135 .read_u64 = bch_cache_misses_read,
136 },
137 {
138 .name = "cache_bypass_hits",
139 .read_u64 = bch_cache_bypass_hits_read,
140 },
141 {
142 .name = "cache_bypass_misses",
143 .read_u64 = bch_cache_bypass_misses_read,
144 },
145 { }
146};
147
148static void init_bch_cgroup(struct bch_cgroup *cg)
149{
150 cg->cache_mode = -1;
151}
152
153static struct cgroup_subsys_state *bcachecg_create(struct cgroup *cgroup)
154{
155 struct bch_cgroup *cg;
156
157 cg = kzalloc(sizeof(*cg), GFP_KERNEL);
158 if (!cg)
159 return ERR_PTR(-ENOMEM);
160 init_bch_cgroup(cg);
161 return &cg->css;
162}
163
164static void bcachecg_destroy(struct cgroup *cgroup)
165{
166 struct bch_cgroup *cg = cgroup_to_bcache(cgroup);
167 free_css_id(&bcache_subsys, &cg->css);
168 kfree(cg);
169}
170
171struct cgroup_subsys bcache_subsys = {
172 .create = bcachecg_create,
173 .destroy = bcachecg_destroy,
174 .subsys_id = bcache_subsys_id,
175 .name = "bcache",
176 .module = THIS_MODULE,
177};
178EXPORT_SYMBOL_GPL(bcache_subsys);
179#endif
180
181static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)
182{
183#ifdef CONFIG_CGROUP_BCACHE
184 int r = bch_bio_to_cgroup(bio)->cache_mode;
185 if (r >= 0)
186 return r;
187#endif
188 return BDEV_CACHE_MODE(&dc->sb);
189}
190
191static bool verify(struct cached_dev *dc, struct bio *bio)
192{
193#ifdef CONFIG_CGROUP_BCACHE
194 if (bch_bio_to_cgroup(bio)->verify)
195 return true;
196#endif
197 return dc->verify;
198}
199
200static void bio_csum(struct bio *bio, struct bkey *k)
201{
202 struct bio_vec *bv;
203 uint64_t csum = 0;
204 int i;
205
206 bio_for_each_segment(bv, bio, i) {
207 void *d = kmap(bv->bv_page) + bv->bv_offset;
208 csum = bch_crc64_update(csum, d, bv->bv_len);
209 kunmap(bv->bv_page);
210 }
211
212 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
213}
214
215
216
217static void bio_invalidate(struct closure *cl)
218{
219 struct btree_op *op = container_of(cl, struct btree_op, cl);
220 struct bio *bio = op->cache_bio;
221
222 pr_debug("invalidating %i sectors from %llu",
223 bio_sectors(bio), (uint64_t) bio->bi_sector);
224
225 while (bio_sectors(bio)) {
226 unsigned len = min(bio_sectors(bio), 1U << 14);
227
228 if (bch_keylist_realloc(&op->keys, 0, op->c))
229 goto out;
230
231 bio->bi_sector += len;
232 bio->bi_size -= len << 9;
233
234 bch_keylist_add(&op->keys,
235 &KEY(op->inode, bio->bi_sector, len));
236 }
237
238 op->insert_data_done = true;
239 bio_put(bio);
240out:
241 continue_at(cl, bch_journal, bcache_wq);
242}
243
244struct open_bucket {
245 struct list_head list;
246 struct task_struct *last;
247 unsigned sectors_free;
248 BKEY_PADDED(key);
249};
250
251void bch_open_buckets_free(struct cache_set *c)
252{
253 struct open_bucket *b;
254
255 while (!list_empty(&c->data_buckets)) {
256 b = list_first_entry(&c->data_buckets,
257 struct open_bucket, list);
258 list_del(&b->list);
259 kfree(b);
260 }
261}
262
263int bch_open_buckets_alloc(struct cache_set *c)
264{
265 int i;
266
267 spin_lock_init(&c->data_bucket_lock);
268
269 for (i = 0; i < 6; i++) {
270 struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
271 if (!b)
272 return -ENOMEM;
273
274 list_add(&b->list, &c->data_buckets);
275 }
276
277 return 0;
278}
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299static struct open_bucket *pick_data_bucket(struct cache_set *c,
300 const struct bkey *search,
301 struct task_struct *task,
302 struct bkey *alloc)
303{
304 struct open_bucket *ret, *ret_task = NULL;
305
306 list_for_each_entry_reverse(ret, &c->data_buckets, list)
307 if (!bkey_cmp(&ret->key, search))
308 goto found;
309 else if (ret->last == task)
310 ret_task = ret;
311
312 ret = ret_task ?: list_first_entry(&c->data_buckets,
313 struct open_bucket, list);
314found:
315 if (!ret->sectors_free && KEY_PTRS(alloc)) {
316 ret->sectors_free = c->sb.bucket_size;
317 bkey_copy(&ret->key, alloc);
318 bkey_init(alloc);
319 }
320
321 if (!ret->sectors_free)
322 ret = NULL;
323
324 return ret;
325}
326
327
328
329
330
331
332
333
334
335
336
337static bool bch_alloc_sectors(struct bkey *k, unsigned sectors,
338 struct search *s)
339{
340 struct cache_set *c = s->op.c;
341 struct open_bucket *b;
342 BKEY_PADDED(key) alloc;
343 struct closure cl, *w = NULL;
344 unsigned i;
345
346 if (s->writeback) {
347 closure_init_stack(&cl);
348 w = &cl;
349 }
350
351
352
353
354
355
356
357
358 bkey_init(&alloc.key);
359 spin_lock(&c->data_bucket_lock);
360
361 while (!(b = pick_data_bucket(c, k, s->task, &alloc.key))) {
362 unsigned watermark = s->op.write_prio
363 ? WATERMARK_MOVINGGC
364 : WATERMARK_NONE;
365
366 spin_unlock(&c->data_bucket_lock);
367
368 if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, w))
369 return false;
370
371 spin_lock(&c->data_bucket_lock);
372 }
373
374
375
376
377
378
379 if (KEY_PTRS(&alloc.key))
380 __bkey_put(c, &alloc.key);
381
382 for (i = 0; i < KEY_PTRS(&b->key); i++)
383 EBUG_ON(ptr_stale(c, &b->key, i));
384
385
386
387 for (i = 0; i < KEY_PTRS(&b->key); i++)
388 k->ptr[i] = b->key.ptr[i];
389
390 sectors = min(sectors, b->sectors_free);
391
392 SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
393 SET_KEY_SIZE(k, sectors);
394 SET_KEY_PTRS(k, KEY_PTRS(&b->key));
395
396
397
398
399
400 list_move_tail(&b->list, &c->data_buckets);
401 bkey_copy_key(&b->key, k);
402 b->last = s->task;
403
404 b->sectors_free -= sectors;
405
406 for (i = 0; i < KEY_PTRS(&b->key); i++) {
407 SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
408
409 atomic_long_add(sectors,
410 &PTR_CACHE(c, &b->key, i)->sectors_written);
411 }
412
413 if (b->sectors_free < c->sb.block_size)
414 b->sectors_free = 0;
415
416
417
418
419
420
421 if (b->sectors_free)
422 for (i = 0; i < KEY_PTRS(&b->key); i++)
423 atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
424
425 spin_unlock(&c->data_bucket_lock);
426 return true;
427}
428
429static void bch_insert_data_error(struct closure *cl)
430{
431 struct btree_op *op = container_of(cl, struct btree_op, cl);
432
433
434
435
436
437
438
439
440
441
442 struct bkey *src = op->keys.bottom, *dst = op->keys.bottom;
443
444 while (src != op->keys.top) {
445 struct bkey *n = bkey_next(src);
446
447 SET_KEY_PTRS(src, 0);
448 bkey_copy(dst, src);
449
450 dst = bkey_next(dst);
451 src = n;
452 }
453
454 op->keys.top = dst;
455
456 bch_journal(cl);
457}
458
459static void bch_insert_data_endio(struct bio *bio, int error)
460{
461 struct closure *cl = bio->bi_private;
462 struct btree_op *op = container_of(cl, struct btree_op, cl);
463 struct search *s = container_of(op, struct search, op);
464
465 if (error) {
466
467 if (s->writeback)
468 s->error = error;
469 else if (s->write)
470 set_closure_fn(cl, bch_insert_data_error, bcache_wq);
471 else
472 set_closure_fn(cl, NULL, NULL);
473 }
474
475 bch_bbio_endio(op->c, bio, error, "writing data to cache");
476}
477
478static void bch_insert_data_loop(struct closure *cl)
479{
480 struct btree_op *op = container_of(cl, struct btree_op, cl);
481 struct search *s = container_of(op, struct search, op);
482 struct bio *bio = op->cache_bio, *n;
483
484 if (op->skip)
485 return bio_invalidate(cl);
486
487 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
488 set_gc_sectors(op->c);
489 bch_queue_gc(op->c);
490 }
491
492
493
494
495
496 bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA);
497
498 do {
499 unsigned i;
500 struct bkey *k;
501 struct bio_set *split = s->d
502 ? s->d->bio_split : op->c->bio_split;
503
504
505 if (bch_keylist_realloc(&op->keys,
506 1 + (op->csum ? 1 : 0),
507 op->c))
508 continue_at(cl, bch_journal, bcache_wq);
509
510 k = op->keys.top;
511 bkey_init(k);
512 SET_KEY_INODE(k, op->inode);
513 SET_KEY_OFFSET(k, bio->bi_sector);
514
515 if (!bch_alloc_sectors(k, bio_sectors(bio), s))
516 goto err;
517
518 n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split);
519 if (!n) {
520 __bkey_put(op->c, k);
521 continue_at(cl, bch_insert_data_loop, bcache_wq);
522 }
523
524 n->bi_end_io = bch_insert_data_endio;
525 n->bi_private = cl;
526
527 if (s->writeback) {
528 SET_KEY_DIRTY(k, true);
529
530 for (i = 0; i < KEY_PTRS(k); i++)
531 SET_GC_MARK(PTR_BUCKET(op->c, k, i),
532 GC_MARK_DIRTY);
533 }
534
535 SET_KEY_CSUM(k, op->csum);
536 if (KEY_CSUM(k))
537 bio_csum(n, k);
538
539 pr_debug("%s", pkey(k));
540 bch_keylist_push(&op->keys);
541
542 trace_bcache_cache_insert(n, n->bi_sector, n->bi_bdev);
543 n->bi_rw |= REQ_WRITE;
544 bch_submit_bbio(n, op->c, k, 0);
545 } while (n != bio);
546
547 op->insert_data_done = true;
548 continue_at(cl, bch_journal, bcache_wq);
549err:
550
551 BUG_ON(s->writeback);
552
553
554
555
556
557
558
559 if (s->write) {
560
561
562
563
564
565
566 op->skip = true;
567 return bio_invalidate(cl);
568 } else {
569
570
571
572
573 op->insert_data_done = true;
574 bio_put(bio);
575
576 if (!bch_keylist_empty(&op->keys))
577 continue_at(cl, bch_journal, bcache_wq);
578 else
579 closure_return(cl);
580 }
581}
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602void bch_insert_data(struct closure *cl)
603{
604 struct btree_op *op = container_of(cl, struct btree_op, cl);
605
606 bch_keylist_init(&op->keys);
607 bio_get(op->cache_bio);
608 bch_insert_data_loop(cl);
609}
610
611void bch_btree_insert_async(struct closure *cl)
612{
613 struct btree_op *op = container_of(cl, struct btree_op, cl);
614 struct search *s = container_of(op, struct search, op);
615
616 if (bch_btree_insert(op, op->c)) {
617 s->error = -ENOMEM;
618 op->insert_data_done = true;
619 }
620
621 if (op->insert_data_done) {
622 bch_keylist_free(&op->keys);
623 closure_return(cl);
624 } else
625 continue_at(cl, bch_insert_data_loop, bcache_wq);
626}
627
628
629
630static void request_endio(struct bio *bio, int error)
631{
632 struct closure *cl = bio->bi_private;
633
634 if (error) {
635 struct search *s = container_of(cl, struct search, cl);
636 s->error = error;
637
638 s->recoverable = false;
639 }
640
641 bio_put(bio);
642 closure_put(cl);
643}
644
645void bch_cache_read_endio(struct bio *bio, int error)
646{
647 struct bbio *b = container_of(bio, struct bbio, bio);
648 struct closure *cl = bio->bi_private;
649 struct search *s = container_of(cl, struct search, cl);
650
651
652
653
654
655
656
657
658 if (error)
659 s->error = error;
660 else if (ptr_stale(s->op.c, &b->key, 0)) {
661 atomic_long_inc(&s->op.c->cache_read_races);
662 s->error = -EINTR;
663 }
664
665 bch_bbio_endio(s->op.c, bio, error, "reading from cache");
666}
667
668static void bio_complete(struct search *s)
669{
670 if (s->orig_bio) {
671 int cpu, rw = bio_data_dir(s->orig_bio);
672 unsigned long duration = jiffies - s->start_time;
673
674 cpu = part_stat_lock();
675 part_round_stats(cpu, &s->d->disk->part0);
676 part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration);
677 part_stat_unlock();
678
679 trace_bcache_request_end(s, s->orig_bio);
680 bio_endio(s->orig_bio, s->error);
681 s->orig_bio = NULL;
682 }
683}
684
685static void do_bio_hook(struct search *s)
686{
687 struct bio *bio = &s->bio.bio;
688 memcpy(bio, s->orig_bio, sizeof(struct bio));
689
690 bio->bi_end_io = request_endio;
691 bio->bi_private = &s->cl;
692 atomic_set(&bio->bi_cnt, 3);
693}
694
695static void search_free(struct closure *cl)
696{
697 struct search *s = container_of(cl, struct search, cl);
698 bio_complete(s);
699
700 if (s->op.cache_bio)
701 bio_put(s->op.cache_bio);
702
703 if (s->unaligned_bvec)
704 mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec);
705
706 closure_debug_destroy(cl);
707 mempool_free(s, s->d->c->search);
708}
709
710static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
711{
712 struct bio_vec *bv;
713 struct search *s = mempool_alloc(d->c->search, GFP_NOIO);
714 memset(s, 0, offsetof(struct search, op.keys));
715
716 __closure_init(&s->cl, NULL);
717
718 s->op.inode = d->id;
719 s->op.c = d->c;
720 s->d = d;
721 s->op.lock = -1;
722 s->task = current;
723 s->orig_bio = bio;
724 s->write = (bio->bi_rw & REQ_WRITE) != 0;
725 s->op.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
726 s->op.skip = (bio->bi_rw & REQ_DISCARD) != 0;
727 s->recoverable = 1;
728 s->start_time = jiffies;
729 do_bio_hook(s);
730
731 if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) {
732 bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO);
733 memcpy(bv, bio_iovec(bio),
734 sizeof(struct bio_vec) * bio_segments(bio));
735
736 s->bio.bio.bi_io_vec = bv;
737 s->unaligned_bvec = 1;
738 }
739
740 return s;
741}
742
743static void btree_read_async(struct closure *cl)
744{
745 struct btree_op *op = container_of(cl, struct btree_op, cl);
746
747 int ret = btree_root(search_recurse, op->c, op);
748
749 if (ret == -EAGAIN)
750 continue_at(cl, btree_read_async, bcache_wq);
751
752 closure_return(cl);
753}
754
755
756
757static void cached_dev_bio_complete(struct closure *cl)
758{
759 struct search *s = container_of(cl, struct search, cl);
760 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
761
762 search_free(cl);
763 cached_dev_put(dc);
764}
765
766
767
768static void cached_dev_read_complete(struct closure *cl)
769{
770 struct search *s = container_of(cl, struct search, cl);
771
772 if (s->op.insert_collision)
773 bch_mark_cache_miss_collision(s);
774
775 if (s->op.cache_bio) {
776 int i;
777 struct bio_vec *bv;
778
779 __bio_for_each_segment(bv, s->op.cache_bio, i, 0)
780 __free_page(bv->bv_page);
781 }
782
783 cached_dev_bio_complete(cl);
784}
785
786static void request_read_error(struct closure *cl)
787{
788 struct search *s = container_of(cl, struct search, cl);
789 struct bio_vec *bv;
790 int i;
791
792 if (s->recoverable) {
793
794
795
796 pr_debug("recovering at sector %llu",
797 (uint64_t) s->orig_bio->bi_sector);
798
799 s->error = 0;
800 bv = s->bio.bio.bi_io_vec;
801 do_bio_hook(s);
802 s->bio.bio.bi_io_vec = bv;
803
804 if (!s->unaligned_bvec)
805 bio_for_each_segment(bv, s->orig_bio, i)
806 bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
807 else
808 memcpy(s->bio.bio.bi_io_vec,
809 bio_iovec(s->orig_bio),
810 sizeof(struct bio_vec) *
811 bio_segments(s->orig_bio));
812
813
814
815 trace_bcache_read_retry(&s->bio.bio);
816 closure_bio_submit(&s->bio.bio, &s->cl, s->d);
817 }
818
819 continue_at(cl, cached_dev_read_complete, NULL);
820}
821
822static void request_read_done(struct closure *cl)
823{
824 struct search *s = container_of(cl, struct search, cl);
825 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
826
827
828
829
830
831
832
833
834
835 if (s->op.cache_bio) {
836 struct bio_vec *src, *dst;
837 unsigned src_offset, dst_offset, bytes;
838 void *dst_ptr;
839
840 bio_reset(s->op.cache_bio);
841 s->op.cache_bio->bi_sector = s->cache_miss->bi_sector;
842 s->op.cache_bio->bi_bdev = s->cache_miss->bi_bdev;
843 s->op.cache_bio->bi_size = s->cache_bio_sectors << 9;
844 bch_bio_map(s->op.cache_bio, NULL);
845
846 src = bio_iovec(s->op.cache_bio);
847 dst = bio_iovec(s->cache_miss);
848 src_offset = src->bv_offset;
849 dst_offset = dst->bv_offset;
850 dst_ptr = kmap(dst->bv_page);
851
852 while (1) {
853 if (dst_offset == dst->bv_offset + dst->bv_len) {
854 kunmap(dst->bv_page);
855 dst++;
856 if (dst == bio_iovec_idx(s->cache_miss,
857 s->cache_miss->bi_vcnt))
858 break;
859
860 dst_offset = dst->bv_offset;
861 dst_ptr = kmap(dst->bv_page);
862 }
863
864 if (src_offset == src->bv_offset + src->bv_len) {
865 src++;
866 if (src == bio_iovec_idx(s->op.cache_bio,
867 s->op.cache_bio->bi_vcnt))
868 BUG();
869
870 src_offset = src->bv_offset;
871 }
872
873 bytes = min(dst->bv_offset + dst->bv_len - dst_offset,
874 src->bv_offset + src->bv_len - src_offset);
875
876 memcpy(dst_ptr + dst_offset,
877 page_address(src->bv_page) + src_offset,
878 bytes);
879
880 src_offset += bytes;
881 dst_offset += bytes;
882 }
883
884 bio_put(s->cache_miss);
885 s->cache_miss = NULL;
886 }
887
888 if (verify(dc, &s->bio.bio) && s->recoverable)
889 bch_data_verify(s);
890
891 bio_complete(s);
892
893 if (s->op.cache_bio &&
894 !test_bit(CACHE_SET_STOPPING, &s->op.c->flags)) {
895 s->op.type = BTREE_REPLACE;
896 closure_call(&s->op.cl, bch_insert_data, NULL, cl);
897 }
898
899 continue_at(cl, cached_dev_read_complete, NULL);
900}
901
902static void request_read_done_bh(struct closure *cl)
903{
904 struct search *s = container_of(cl, struct search, cl);
905 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
906
907 bch_mark_cache_accounting(s, !s->cache_miss, s->op.skip);
908
909 if (s->error)
910 continue_at_nobarrier(cl, request_read_error, bcache_wq);
911 else if (s->op.cache_bio || verify(dc, &s->bio.bio))
912 continue_at_nobarrier(cl, request_read_done, bcache_wq);
913 else
914 continue_at_nobarrier(cl, cached_dev_read_complete, NULL);
915}
916
917static int cached_dev_cache_miss(struct btree *b, struct search *s,
918 struct bio *bio, unsigned sectors)
919{
920 int ret = 0;
921 unsigned reada;
922 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
923 struct bio *miss;
924
925 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
926 if (!miss)
927 return -EAGAIN;
928
929 if (miss == bio)
930 s->op.lookup_done = true;
931
932 miss->bi_end_io = request_endio;
933 miss->bi_private = &s->cl;
934
935 if (s->cache_miss || s->op.skip)
936 goto out_submit;
937
938 if (miss != bio ||
939 (bio->bi_rw & REQ_RAHEAD) ||
940 (bio->bi_rw & REQ_META) ||
941 s->op.c->gc_stats.in_use >= CUTOFF_CACHE_READA)
942 reada = 0;
943 else {
944 reada = min(dc->readahead >> 9,
945 sectors - bio_sectors(miss));
946
947 if (bio_end(miss) + reada > bdev_sectors(miss->bi_bdev))
948 reada = bdev_sectors(miss->bi_bdev) - bio_end(miss);
949 }
950
951 s->cache_bio_sectors = bio_sectors(miss) + reada;
952 s->op.cache_bio = bio_alloc_bioset(GFP_NOWAIT,
953 DIV_ROUND_UP(s->cache_bio_sectors, PAGE_SECTORS),
954 dc->disk.bio_split);
955
956 if (!s->op.cache_bio)
957 goto out_submit;
958
959 s->op.cache_bio->bi_sector = miss->bi_sector;
960 s->op.cache_bio->bi_bdev = miss->bi_bdev;
961 s->op.cache_bio->bi_size = s->cache_bio_sectors << 9;
962
963 s->op.cache_bio->bi_end_io = request_endio;
964 s->op.cache_bio->bi_private = &s->cl;
965
966
967 ret = -EINTR;
968 if (!bch_btree_insert_check_key(b, &s->op, s->op.cache_bio))
969 goto out_put;
970
971 bch_bio_map(s->op.cache_bio, NULL);
972 if (bch_bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO))
973 goto out_put;
974
975 s->cache_miss = miss;
976 bio_get(s->op.cache_bio);
977
978 trace_bcache_cache_miss(s->orig_bio);
979 closure_bio_submit(s->op.cache_bio, &s->cl, s->d);
980
981 return ret;
982out_put:
983 bio_put(s->op.cache_bio);
984 s->op.cache_bio = NULL;
985out_submit:
986 closure_bio_submit(miss, &s->cl, s->d);
987 return ret;
988}
989
990static void request_read(struct cached_dev *dc, struct search *s)
991{
992 struct closure *cl = &s->cl;
993
994 check_should_skip(dc, s);
995 closure_call(&s->op.cl, btree_read_async, NULL, cl);
996
997 continue_at(cl, request_read_done_bh, NULL);
998}
999
1000
1001
1002static void cached_dev_write_complete(struct closure *cl)
1003{
1004 struct search *s = container_of(cl, struct search, cl);
1005 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
1006
1007 up_read_non_owner(&dc->writeback_lock);
1008 cached_dev_bio_complete(cl);
1009}
1010
1011static bool should_writeback(struct cached_dev *dc, struct bio *bio)
1012{
1013 unsigned threshold = (bio->bi_rw & REQ_SYNC)
1014 ? CUTOFF_WRITEBACK_SYNC
1015 : CUTOFF_WRITEBACK;
1016
1017 return !atomic_read(&dc->disk.detaching) &&
1018 cache_mode(dc, bio) == CACHE_MODE_WRITEBACK &&
1019 dc->disk.c->gc_stats.in_use < threshold;
1020}
1021
1022static void request_write(struct cached_dev *dc, struct search *s)
1023{
1024 struct closure *cl = &s->cl;
1025 struct bio *bio = &s->bio.bio;
1026 struct bkey start, end;
1027 start = KEY(dc->disk.id, bio->bi_sector, 0);
1028 end = KEY(dc->disk.id, bio_end(bio), 0);
1029
1030 bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, &start, &end);
1031
1032 check_should_skip(dc, s);
1033 down_read_non_owner(&dc->writeback_lock);
1034
1035 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
1036 s->op.skip = false;
1037 s->writeback = true;
1038 }
1039
1040 if (bio->bi_rw & REQ_DISCARD)
1041 goto skip;
1042
1043 if (s->op.skip)
1044 goto skip;
1045
1046 if (should_writeback(dc, s->orig_bio))
1047 s->writeback = true;
1048
1049 if (!s->writeback) {
1050 s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO,
1051 dc->disk.bio_split);
1052
1053 trace_bcache_writethrough(s->orig_bio);
1054 closure_bio_submit(bio, cl, s->d);
1055 } else {
1056 trace_bcache_writeback(s->orig_bio);
1057 bch_writeback_add(dc, bio_sectors(bio));
1058 s->op.cache_bio = bio;
1059
1060 if (bio->bi_rw & REQ_FLUSH) {
1061
1062 struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
1063 dc->disk.bio_split);
1064
1065 flush->bi_rw = WRITE_FLUSH;
1066 flush->bi_bdev = bio->bi_bdev;
1067 flush->bi_end_io = request_endio;
1068 flush->bi_private = cl;
1069
1070 closure_bio_submit(flush, cl, s->d);
1071 }
1072 }
1073out:
1074 closure_call(&s->op.cl, bch_insert_data, NULL, cl);
1075 continue_at(cl, cached_dev_write_complete, NULL);
1076skip:
1077 s->op.skip = true;
1078 s->op.cache_bio = s->orig_bio;
1079 bio_get(s->op.cache_bio);
1080 trace_bcache_write_skip(s->orig_bio);
1081
1082 if ((bio->bi_rw & REQ_DISCARD) &&
1083 !blk_queue_discard(bdev_get_queue(dc->bdev)))
1084 goto out;
1085
1086 closure_bio_submit(bio, cl, s->d);
1087 goto out;
1088}
1089
1090static void request_nodata(struct cached_dev *dc, struct search *s)
1091{
1092 struct closure *cl = &s->cl;
1093 struct bio *bio = &s->bio.bio;
1094
1095 if (bio->bi_rw & REQ_DISCARD) {
1096 request_write(dc, s);
1097 return;
1098 }
1099
1100 if (s->op.flush_journal)
1101 bch_journal_meta(s->op.c, cl);
1102
1103 closure_bio_submit(bio, cl, s->d);
1104
1105 continue_at(cl, cached_dev_bio_complete, NULL);
1106}
1107
1108
1109
1110int bch_get_congested(struct cache_set *c)
1111{
1112 int i;
1113
1114 if (!c->congested_read_threshold_us &&
1115 !c->congested_write_threshold_us)
1116 return 0;
1117
1118 i = (local_clock_us() - c->congested_last_us) / 1024;
1119 if (i < 0)
1120 return 0;
1121
1122 i += atomic_read(&c->congested);
1123 if (i >= 0)
1124 return 0;
1125
1126 i += CONGESTED_MAX;
1127
1128 return i <= 0 ? 1 : fract_exp_two(i, 6);
1129}
1130
1131static void add_sequential(struct task_struct *t)
1132{
1133 ewma_add(t->sequential_io_avg,
1134 t->sequential_io, 8, 0);
1135
1136 t->sequential_io = 0;
1137}
1138
1139static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
1140{
1141 return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
1142}
1143
1144static void check_should_skip(struct cached_dev *dc, struct search *s)
1145{
1146 struct cache_set *c = s->op.c;
1147 struct bio *bio = &s->bio.bio;
1148
1149 long rand;
1150 int cutoff = bch_get_congested(c);
1151 unsigned mode = cache_mode(dc, bio);
1152
1153 if (atomic_read(&dc->disk.detaching) ||
1154 c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
1155 (bio->bi_rw & REQ_DISCARD))
1156 goto skip;
1157
1158 if (mode == CACHE_MODE_NONE ||
1159 (mode == CACHE_MODE_WRITEAROUND &&
1160 (bio->bi_rw & REQ_WRITE)))
1161 goto skip;
1162
1163 if (bio->bi_sector & (c->sb.block_size - 1) ||
1164 bio_sectors(bio) & (c->sb.block_size - 1)) {
1165 pr_debug("skipping unaligned io");
1166 goto skip;
1167 }
1168
1169 if (!cutoff) {
1170 cutoff = dc->sequential_cutoff >> 9;
1171
1172 if (!cutoff)
1173 goto rescale;
1174
1175 if (mode == CACHE_MODE_WRITEBACK &&
1176 (bio->bi_rw & REQ_WRITE) &&
1177 (bio->bi_rw & REQ_SYNC))
1178 goto rescale;
1179 }
1180
1181 if (dc->sequential_merge) {
1182 struct io *i;
1183
1184 spin_lock(&dc->io_lock);
1185
1186 hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash)
1187 if (i->last == bio->bi_sector &&
1188 time_before(jiffies, i->jiffies))
1189 goto found;
1190
1191 i = list_first_entry(&dc->io_lru, struct io, lru);
1192
1193 add_sequential(s->task);
1194 i->sequential = 0;
1195found:
1196 if (i->sequential + bio->bi_size > i->sequential)
1197 i->sequential += bio->bi_size;
1198
1199 i->last = bio_end(bio);
1200 i->jiffies = jiffies + msecs_to_jiffies(5000);
1201 s->task->sequential_io = i->sequential;
1202
1203 hlist_del(&i->hash);
1204 hlist_add_head(&i->hash, iohash(dc, i->last));
1205 list_move_tail(&i->lru, &dc->io_lru);
1206
1207 spin_unlock(&dc->io_lock);
1208 } else {
1209 s->task->sequential_io = bio->bi_size;
1210
1211 add_sequential(s->task);
1212 }
1213
1214 rand = get_random_int();
1215 cutoff -= bitmap_weight(&rand, BITS_PER_LONG);
1216
1217 if (cutoff <= (int) (max(s->task->sequential_io,
1218 s->task->sequential_io_avg) >> 9))
1219 goto skip;
1220
1221rescale:
1222 bch_rescale_priorities(c, bio_sectors(bio));
1223 return;
1224skip:
1225 bch_mark_sectors_bypassed(s, bio_sectors(bio));
1226 s->op.skip = true;
1227}
1228
1229static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
1230{
1231 struct search *s;
1232 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
1233 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1234 int cpu, rw = bio_data_dir(bio);
1235
1236 cpu = part_stat_lock();
1237 part_stat_inc(cpu, &d->disk->part0, ios[rw]);
1238 part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
1239 part_stat_unlock();
1240
1241 bio->bi_bdev = dc->bdev;
1242 bio->bi_sector += dc->sb.data_offset;
1243
1244 if (cached_dev_get(dc)) {
1245 s = search_alloc(bio, d);
1246 trace_bcache_request_start(s, bio);
1247
1248 if (!bio_has_data(bio))
1249 request_nodata(dc, s);
1250 else if (rw)
1251 request_write(dc, s);
1252 else
1253 request_read(dc, s);
1254 } else {
1255 if ((bio->bi_rw & REQ_DISCARD) &&
1256 !blk_queue_discard(bdev_get_queue(dc->bdev)))
1257 bio_endio(bio, 0);
1258 else
1259 bch_generic_make_request(bio, &d->bio_split_hook);
1260 }
1261}
1262
1263static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
1264 unsigned int cmd, unsigned long arg)
1265{
1266 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1267 return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
1268}
1269
1270static int cached_dev_congested(void *data, int bits)
1271{
1272 struct bcache_device *d = data;
1273 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1274 struct request_queue *q = bdev_get_queue(dc->bdev);
1275 int ret = 0;
1276
1277 if (bdi_congested(&q->backing_dev_info, bits))
1278 return 1;
1279
1280 if (cached_dev_get(dc)) {
1281 unsigned i;
1282 struct cache *ca;
1283
1284 for_each_cache(ca, d->c, i) {
1285 q = bdev_get_queue(ca->bdev);
1286 ret |= bdi_congested(&q->backing_dev_info, bits);
1287 }
1288
1289 cached_dev_put(dc);
1290 }
1291
1292 return ret;
1293}
1294
1295void bch_cached_dev_request_init(struct cached_dev *dc)
1296{
1297 struct gendisk *g = dc->disk.disk;
1298
1299 g->queue->make_request_fn = cached_dev_make_request;
1300 g->queue->backing_dev_info.congested_fn = cached_dev_congested;
1301 dc->disk.cache_miss = cached_dev_cache_miss;
1302 dc->disk.ioctl = cached_dev_ioctl;
1303}
1304
1305
1306
1307static int flash_dev_cache_miss(struct btree *b, struct search *s,
1308 struct bio *bio, unsigned sectors)
1309{
1310
1311
1312 while (bio->bi_idx != bio->bi_vcnt) {
1313 struct bio_vec *bv = bio_iovec(bio);
1314 unsigned j = min(bv->bv_len >> 9, sectors);
1315
1316 void *p = kmap(bv->bv_page);
1317 memset(p + bv->bv_offset, 0, j << 9);
1318 kunmap(bv->bv_page);
1319
1320 bv->bv_len -= j << 9;
1321 bv->bv_offset += j << 9;
1322
1323 if (bv->bv_len)
1324 return 0;
1325
1326 bio->bi_sector += j;
1327 bio->bi_size -= j << 9;
1328
1329 bio->bi_idx++;
1330 sectors -= j;
1331 }
1332
1333 s->op.lookup_done = true;
1334
1335 return 0;
1336}
1337
1338static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1339{
1340 struct search *s;
1341 struct closure *cl;
1342 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
1343 int cpu, rw = bio_data_dir(bio);
1344
1345 cpu = part_stat_lock();
1346 part_stat_inc(cpu, &d->disk->part0, ios[rw]);
1347 part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
1348 part_stat_unlock();
1349
1350 s = search_alloc(bio, d);
1351 cl = &s->cl;
1352 bio = &s->bio.bio;
1353
1354 trace_bcache_request_start(s, bio);
1355
1356 if (bio_has_data(bio) && !rw) {
1357 closure_call(&s->op.cl, btree_read_async, NULL, cl);
1358 } else if (bio_has_data(bio) || s->op.skip) {
1359 bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys,
1360 &KEY(d->id, bio->bi_sector, 0),
1361 &KEY(d->id, bio_end(bio), 0));
1362
1363 s->writeback = true;
1364 s->op.cache_bio = bio;
1365
1366 closure_call(&s->op.cl, bch_insert_data, NULL, cl);
1367 } else {
1368
1369 if (s->op.flush_journal)
1370 bch_journal_meta(s->op.c, cl);
1371 }
1372
1373 continue_at(cl, search_free, NULL);
1374}
1375
1376static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1377 unsigned int cmd, unsigned long arg)
1378{
1379 return -ENOTTY;
1380}
1381
1382static int flash_dev_congested(void *data, int bits)
1383{
1384 struct bcache_device *d = data;
1385 struct request_queue *q;
1386 struct cache *ca;
1387 unsigned i;
1388 int ret = 0;
1389
1390 for_each_cache(ca, d->c, i) {
1391 q = bdev_get_queue(ca->bdev);
1392 ret |= bdi_congested(&q->backing_dev_info, bits);
1393 }
1394
1395 return ret;
1396}
1397
1398void bch_flash_dev_request_init(struct bcache_device *d)
1399{
1400 struct gendisk *g = d->disk;
1401
1402 g->queue->make_request_fn = flash_dev_make_request;
1403 g->queue->backing_dev_info.congested_fn = flash_dev_congested;
1404 d->cache_miss = flash_dev_cache_miss;
1405 d->ioctl = flash_dev_ioctl;
1406}
1407
1408void bch_request_exit(void)
1409{
1410#ifdef CONFIG_CGROUP_BCACHE
1411 cgroup_unload_subsys(&bcache_subsys);
1412#endif
1413 if (bch_search_cache)
1414 kmem_cache_destroy(bch_search_cache);
1415}
1416
1417int __init bch_request_init(void)
1418{
1419 bch_search_cache = KMEM_CACHE(search, 0);
1420 if (!bch_search_cache)
1421 return -ENOMEM;
1422
1423#ifdef CONFIG_CGROUP_BCACHE
1424 cgroup_load_subsys(&bcache_subsys);
1425 init_bch_cgroup(&bcache_default_cgroup);
1426
1427 cgroup_add_cftypes(&bcache_subsys, bch_files);
1428#endif
1429 return 0;
1430}
1431