1
2
3
4
5
6
7
8
9
10#include "bcache.h"
11#include "btree.h"
12#include "debug.h"
13#include "request.h"
14#include "writeback.h"
15
16#include <linux/module.h>
17#include <linux/hash.h>
18#include <linux/random.h>
19#include <linux/backing-dev.h>
20
21#include <trace/events/bcache.h>
22
23#define CUTOFF_CACHE_ADD 95
24#define CUTOFF_CACHE_READA 90
25
26struct kmem_cache *bch_search_cache;
27
28static void bch_data_insert_start(struct closure *cl);
29
30static unsigned int cache_mode(struct cached_dev *dc)
31{
32 return BDEV_CACHE_MODE(&dc->sb);
33}
34
35static bool verify(struct cached_dev *dc)
36{
37 return dc->verify;
38}
39
40static void bio_csum(struct bio *bio, struct bkey *k)
41{
42 struct bio_vec bv;
43 struct bvec_iter iter;
44 uint64_t csum = 0;
45
46 bio_for_each_segment(bv, bio, iter) {
47 void *d = kmap(bv.bv_page) + bv.bv_offset;
48
49 csum = bch_crc64_update(csum, d, bv.bv_len);
50 kunmap(bv.bv_page);
51 }
52
53 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
54}
55
56
57
58static void bch_data_insert_keys(struct closure *cl)
59{
60 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
61 atomic_t *journal_ref = NULL;
62 struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
63 int ret;
64
65 if (!op->replace)
66 journal_ref = bch_journal(op->c, &op->insert_keys,
67 op->flush_journal ? cl : NULL);
68
69 ret = bch_btree_insert(op->c, &op->insert_keys,
70 journal_ref, replace_key);
71 if (ret == -ESRCH) {
72 op->replace_collision = true;
73 } else if (ret) {
74 op->status = BLK_STS_RESOURCE;
75 op->insert_data_done = true;
76 }
77
78 if (journal_ref)
79 atomic_dec_bug(journal_ref);
80
81 if (!op->insert_data_done) {
82 continue_at(cl, bch_data_insert_start, op->wq);
83 return;
84 }
85
86 bch_keylist_free(&op->insert_keys);
87 closure_return(cl);
88}
89
90static int bch_keylist_realloc(struct keylist *l, unsigned int u64s,
91 struct cache_set *c)
92{
93 size_t oldsize = bch_keylist_nkeys(l);
94 size_t newsize = oldsize + u64s;
95
96
97
98
99
100
101
102 if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
103 return -ENOMEM;
104
105 return __bch_keylist_realloc(l, u64s);
106}
107
108static void bch_data_invalidate(struct closure *cl)
109{
110 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
111 struct bio *bio = op->bio;
112
113 pr_debug("invalidating %i sectors from %llu\n",
114 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
115
116 while (bio_sectors(bio)) {
117 unsigned int sectors = min(bio_sectors(bio),
118 1U << (KEY_SIZE_BITS - 1));
119
120 if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
121 goto out;
122
123 bio->bi_iter.bi_sector += sectors;
124 bio->bi_iter.bi_size -= sectors << 9;
125
126 bch_keylist_add(&op->insert_keys,
127 &KEY(op->inode,
128 bio->bi_iter.bi_sector,
129 sectors));
130 }
131
132 op->insert_data_done = true;
133
134 bio_put(bio);
135out:
136 continue_at(cl, bch_data_insert_keys, op->wq);
137}
138
139static void bch_data_insert_error(struct closure *cl)
140{
141 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
142
143
144
145
146
147
148
149
150
151
152 struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
153
154 while (src != op->insert_keys.top) {
155 struct bkey *n = bkey_next(src);
156
157 SET_KEY_PTRS(src, 0);
158 memmove(dst, src, bkey_bytes(src));
159
160 dst = bkey_next(dst);
161 src = n;
162 }
163
164 op->insert_keys.top = dst;
165
166 bch_data_insert_keys(cl);
167}
168
169static void bch_data_insert_endio(struct bio *bio)
170{
171 struct closure *cl = bio->bi_private;
172 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
173
174 if (bio->bi_status) {
175
176 if (op->writeback)
177 op->status = bio->bi_status;
178 else if (!op->replace)
179 set_closure_fn(cl, bch_data_insert_error, op->wq);
180 else
181 set_closure_fn(cl, NULL, NULL);
182 }
183
184 bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
185}
186
187static void bch_data_insert_start(struct closure *cl)
188{
189 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
190 struct bio *bio = op->bio, *n;
191
192 if (op->bypass)
193 return bch_data_invalidate(cl);
194
195 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
196 wake_up_gc(op->c);
197
198
199
200
201
202 bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
203
204 do {
205 unsigned int i;
206 struct bkey *k;
207 struct bio_set *split = &op->c->bio_split;
208
209
210 if (bch_keylist_realloc(&op->insert_keys,
211 3 + (op->csum ? 1 : 0),
212 op->c)) {
213 continue_at(cl, bch_data_insert_keys, op->wq);
214 return;
215 }
216
217 k = op->insert_keys.top;
218 bkey_init(k);
219 SET_KEY_INODE(k, op->inode);
220 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
221
222 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
223 op->write_point, op->write_prio,
224 op->writeback))
225 goto err;
226
227 n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
228
229 n->bi_end_io = bch_data_insert_endio;
230 n->bi_private = cl;
231
232 if (op->writeback) {
233 SET_KEY_DIRTY(k, true);
234
235 for (i = 0; i < KEY_PTRS(k); i++)
236 SET_GC_MARK(PTR_BUCKET(op->c, k, i),
237 GC_MARK_DIRTY);
238 }
239
240 SET_KEY_CSUM(k, op->csum);
241 if (KEY_CSUM(k))
242 bio_csum(n, k);
243
244 trace_bcache_cache_insert(k);
245 bch_keylist_push(&op->insert_keys);
246
247 bio_set_op_attrs(n, REQ_OP_WRITE, 0);
248 bch_submit_bbio(n, op->c, k, 0);
249 } while (n != bio);
250
251 op->insert_data_done = true;
252 continue_at(cl, bch_data_insert_keys, op->wq);
253 return;
254err:
255
256 BUG_ON(op->writeback);
257
258
259
260
261
262
263
264 if (!op->replace) {
265
266
267
268
269
270
271 op->bypass = true;
272 return bch_data_invalidate(cl);
273 } else {
274
275
276
277
278 op->insert_data_done = true;
279 bio_put(bio);
280
281 if (!bch_keylist_empty(&op->insert_keys))
282 continue_at(cl, bch_data_insert_keys, op->wq);
283 else
284 closure_return(cl);
285 }
286}
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308void bch_data_insert(struct closure *cl)
309{
310 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
311
312 trace_bcache_write(op->c, op->inode, op->bio,
313 op->writeback, op->bypass);
314
315 bch_keylist_init(&op->insert_keys);
316 bio_get(op->bio);
317 bch_data_insert_start(cl);
318}
319
320
321
322
323
324unsigned int bch_get_congested(const struct cache_set *c)
325{
326 int i;
327
328 if (!c->congested_read_threshold_us &&
329 !c->congested_write_threshold_us)
330 return 0;
331
332 i = (local_clock_us() - c->congested_last_us) / 1024;
333 if (i < 0)
334 return 0;
335
336 i += atomic_read(&c->congested);
337 if (i >= 0)
338 return 0;
339
340 i += CONGESTED_MAX;
341
342 if (i > 0)
343 i = fract_exp_two(i, 6);
344
345 i -= hweight32(get_random_u32());
346
347 return i > 0 ? i : 1;
348}
349
350static void add_sequential(struct task_struct *t)
351{
352 ewma_add(t->sequential_io_avg,
353 t->sequential_io, 8, 0);
354
355 t->sequential_io = 0;
356}
357
358static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
359{
360 return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
361}
362
363static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
364{
365 struct cache_set *c = dc->disk.c;
366 unsigned int mode = cache_mode(dc);
367 unsigned int sectors, congested;
368 struct task_struct *task = current;
369 struct io *i;
370
371 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
372 c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
373 (bio_op(bio) == REQ_OP_DISCARD))
374 goto skip;
375
376 if (mode == CACHE_MODE_NONE ||
377 (mode == CACHE_MODE_WRITEAROUND &&
378 op_is_write(bio_op(bio))))
379 goto skip;
380
381
382
383
384
385
386
387
388
389
390
391 if ((bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND))) {
392 if (!(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
393 (dc->cache_readahead_policy != BCH_CACHE_READA_ALL))
394 goto skip;
395 }
396
397 if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
398 bio_sectors(bio) & (c->sb.block_size - 1)) {
399 pr_debug("skipping unaligned io\n");
400 goto skip;
401 }
402
403 if (bypass_torture_test(dc)) {
404 if ((get_random_int() & 3) == 3)
405 goto skip;
406 else
407 goto rescale;
408 }
409
410 congested = bch_get_congested(c);
411 if (!congested && !dc->sequential_cutoff)
412 goto rescale;
413
414 spin_lock(&dc->io_lock);
415
416 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
417 if (i->last == bio->bi_iter.bi_sector &&
418 time_before(jiffies, i->jiffies))
419 goto found;
420
421 i = list_first_entry(&dc->io_lru, struct io, lru);
422
423 add_sequential(task);
424 i->sequential = 0;
425found:
426 if (i->sequential + bio->bi_iter.bi_size > i->sequential)
427 i->sequential += bio->bi_iter.bi_size;
428
429 i->last = bio_end_sector(bio);
430 i->jiffies = jiffies + msecs_to_jiffies(5000);
431 task->sequential_io = i->sequential;
432
433 hlist_del(&i->hash);
434 hlist_add_head(&i->hash, iohash(dc, i->last));
435 list_move_tail(&i->lru, &dc->io_lru);
436
437 spin_unlock(&dc->io_lock);
438
439 sectors = max(task->sequential_io,
440 task->sequential_io_avg) >> 9;
441
442 if (dc->sequential_cutoff &&
443 sectors >= dc->sequential_cutoff >> 9) {
444 trace_bcache_bypass_sequential(bio);
445 goto skip;
446 }
447
448 if (congested && sectors >= congested) {
449 trace_bcache_bypass_congested(bio);
450 goto skip;
451 }
452
453rescale:
454 bch_rescale_priorities(c, bio_sectors(bio));
455 return false;
456skip:
457 bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
458 return true;
459}
460
461
462
463struct search {
464
465 struct closure cl;
466
467 struct bbio bio;
468 struct bio *orig_bio;
469 struct bio *cache_miss;
470 struct bcache_device *d;
471
472 unsigned int insert_bio_sectors;
473 unsigned int recoverable:1;
474 unsigned int write:1;
475 unsigned int read_dirty_data:1;
476 unsigned int cache_missed:1;
477
478 unsigned long start_time;
479
480 struct btree_op op;
481 struct data_insert_op iop;
482};
483
484static void bch_cache_read_endio(struct bio *bio)
485{
486 struct bbio *b = container_of(bio, struct bbio, bio);
487 struct closure *cl = bio->bi_private;
488 struct search *s = container_of(cl, struct search, cl);
489
490
491
492
493
494
495
496
497 if (bio->bi_status)
498 s->iop.status = bio->bi_status;
499 else if (!KEY_DIRTY(&b->key) &&
500 ptr_stale(s->iop.c, &b->key, 0)) {
501 atomic_long_inc(&s->iop.c->cache_read_races);
502 s->iop.status = BLK_STS_IOERR;
503 }
504
505 bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache");
506}
507
508
509
510
511
512static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
513{
514 struct search *s = container_of(op, struct search, op);
515 struct bio *n, *bio = &s->bio.bio;
516 struct bkey *bio_key;
517 unsigned int ptr;
518
519 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
520 return MAP_CONTINUE;
521
522 if (KEY_INODE(k) != s->iop.inode ||
523 KEY_START(k) > bio->bi_iter.bi_sector) {
524 unsigned int bio_sectors = bio_sectors(bio);
525 unsigned int sectors = KEY_INODE(k) == s->iop.inode
526 ? min_t(uint64_t, INT_MAX,
527 KEY_START(k) - bio->bi_iter.bi_sector)
528 : INT_MAX;
529 int ret = s->d->cache_miss(b, s, bio, sectors);
530
531 if (ret != MAP_CONTINUE)
532 return ret;
533
534
535 BUG_ON(bio_sectors <= sectors);
536 }
537
538 if (!KEY_SIZE(k))
539 return MAP_CONTINUE;
540
541
542 ptr = 0;
543
544 PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
545
546 if (KEY_DIRTY(k))
547 s->read_dirty_data = true;
548
549 n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
550 KEY_OFFSET(k) - bio->bi_iter.bi_sector),
551 GFP_NOIO, &s->d->bio_split);
552
553 bio_key = &container_of(n, struct bbio, bio)->key;
554 bch_bkey_copy_single_ptr(bio_key, k, ptr);
555
556 bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
557 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
558
559 n->bi_end_io = bch_cache_read_endio;
560 n->bi_private = &s->cl;
561
562
563
564
565
566
567
568
569
570
571
572
573 __bch_submit_bbio(n, b->c);
574 return n == bio ? MAP_DONE : MAP_CONTINUE;
575}
576
577static void cache_lookup(struct closure *cl)
578{
579 struct search *s = container_of(cl, struct search, iop.cl);
580 struct bio *bio = &s->bio.bio;
581 struct cached_dev *dc;
582 int ret;
583
584 bch_btree_op_init(&s->op, -1);
585
586 ret = bch_btree_map_keys(&s->op, s->iop.c,
587 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
588 cache_lookup_fn, MAP_END_KEY);
589 if (ret == -EAGAIN) {
590 continue_at(cl, cache_lookup, bcache_wq);
591 return;
592 }
593
594
595
596
597
598
599
600
601
602
603 if (ret < 0) {
604 BUG_ON(ret == -EINTR);
605 if (s->d && s->d->c &&
606 !UUID_FLASH_ONLY(&s->d->c->uuids[s->d->id])) {
607 dc = container_of(s->d, struct cached_dev, disk);
608 if (dc && atomic_read(&dc->has_dirty))
609 s->recoverable = false;
610 }
611 if (!s->iop.status)
612 s->iop.status = BLK_STS_IOERR;
613 }
614
615 closure_return(cl);
616}
617
618
619
620static void request_endio(struct bio *bio)
621{
622 struct closure *cl = bio->bi_private;
623
624 if (bio->bi_status) {
625 struct search *s = container_of(cl, struct search, cl);
626
627 s->iop.status = bio->bi_status;
628
629 s->recoverable = false;
630 }
631
632 bio_put(bio);
633 closure_put(cl);
634}
635
636static void backing_request_endio(struct bio *bio)
637{
638 struct closure *cl = bio->bi_private;
639
640 if (bio->bi_status) {
641 struct search *s = container_of(cl, struct search, cl);
642 struct cached_dev *dc = container_of(s->d,
643 struct cached_dev, disk);
644
645
646
647
648
649
650
651 if (unlikely(s->iop.writeback &&
652 bio->bi_opf & REQ_PREFLUSH)) {
653 pr_err("Can't flush %s: returned bi_status %i\n",
654 dc->backing_dev_name, bio->bi_status);
655 } else {
656
657 s->iop.status = bio->bi_status;
658 }
659 s->recoverable = false;
660
661 bch_count_backing_io_errors(dc, bio);
662 }
663
664 bio_put(bio);
665 closure_put(cl);
666}
667
668static void bio_complete(struct search *s)
669{
670 if (s->orig_bio) {
671 bio_end_io_acct(s->orig_bio, s->start_time);
672 trace_bcache_request_end(s->d, s->orig_bio);
673 s->orig_bio->bi_status = s->iop.status;
674 bio_endio(s->orig_bio);
675 s->orig_bio = NULL;
676 }
677}
678
679static void do_bio_hook(struct search *s,
680 struct bio *orig_bio,
681 bio_end_io_t *end_io_fn)
682{
683 struct bio *bio = &s->bio.bio;
684
685 bio_init(bio, NULL, 0);
686 __bio_clone_fast(bio, orig_bio);
687
688
689
690
691
692
693 bio->bi_end_io = end_io_fn;
694 bio->bi_private = &s->cl;
695
696 bio_cnt_set(bio, 3);
697}
698
699static void search_free(struct closure *cl)
700{
701 struct search *s = container_of(cl, struct search, cl);
702
703 atomic_dec(&s->iop.c->search_inflight);
704
705 if (s->iop.bio)
706 bio_put(s->iop.bio);
707
708 bio_complete(s);
709 closure_debug_destroy(cl);
710 mempool_free(s, &s->iop.c->search);
711}
712
713static inline struct search *search_alloc(struct bio *bio,
714 struct bcache_device *d)
715{
716 struct search *s;
717
718 s = mempool_alloc(&d->c->search, GFP_NOIO);
719
720 closure_init(&s->cl, NULL);
721 do_bio_hook(s, bio, request_endio);
722 atomic_inc(&d->c->search_inflight);
723
724 s->orig_bio = bio;
725 s->cache_miss = NULL;
726 s->cache_missed = 0;
727 s->d = d;
728 s->recoverable = 1;
729 s->write = op_is_write(bio_op(bio));
730 s->read_dirty_data = 0;
731 s->start_time = bio_start_io_acct(bio);
732
733 s->iop.c = d->c;
734 s->iop.bio = NULL;
735 s->iop.inode = d->id;
736 s->iop.write_point = hash_long((unsigned long) current, 16);
737 s->iop.write_prio = 0;
738 s->iop.status = 0;
739 s->iop.flags = 0;
740 s->iop.flush_journal = op_is_flush(bio->bi_opf);
741 s->iop.wq = bcache_wq;
742
743 return s;
744}
745
746
747
748static void cached_dev_bio_complete(struct closure *cl)
749{
750 struct search *s = container_of(cl, struct search, cl);
751 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
752
753 cached_dev_put(dc);
754 search_free(cl);
755}
756
757
758
759static void cached_dev_read_error_done(struct closure *cl)
760{
761 struct search *s = container_of(cl, struct search, cl);
762
763 if (s->iop.replace_collision)
764 bch_mark_cache_miss_collision(s->iop.c, s->d);
765
766 if (s->iop.bio)
767 bio_free_pages(s->iop.bio);
768
769 cached_dev_bio_complete(cl);
770}
771
772static void cached_dev_read_error(struct closure *cl)
773{
774 struct search *s = container_of(cl, struct search, cl);
775 struct bio *bio = &s->bio.bio;
776
777
778
779
780
781
782
783
784 if (s->recoverable && !s->read_dirty_data) {
785
786 trace_bcache_read_retry(s->orig_bio);
787
788 s->iop.status = 0;
789 do_bio_hook(s, s->orig_bio, backing_request_endio);
790
791
792
793
794 closure_bio_submit(s->iop.c, bio, cl);
795 }
796
797 continue_at(cl, cached_dev_read_error_done, NULL);
798}
799
800static void cached_dev_cache_miss_done(struct closure *cl)
801{
802 struct search *s = container_of(cl, struct search, cl);
803 struct bcache_device *d = s->d;
804
805 if (s->iop.replace_collision)
806 bch_mark_cache_miss_collision(s->iop.c, s->d);
807
808 if (s->iop.bio)
809 bio_free_pages(s->iop.bio);
810
811 cached_dev_bio_complete(cl);
812 closure_put(&d->cl);
813}
814
815static void cached_dev_read_done(struct closure *cl)
816{
817 struct search *s = container_of(cl, struct search, cl);
818 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
819
820
821
822
823
824
825
826
827
828 if (s->iop.bio) {
829 bio_reset(s->iop.bio);
830 s->iop.bio->bi_iter.bi_sector =
831 s->cache_miss->bi_iter.bi_sector;
832 bio_copy_dev(s->iop.bio, s->cache_miss);
833 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
834 bch_bio_map(s->iop.bio, NULL);
835
836 bio_copy_data(s->cache_miss, s->iop.bio);
837
838 bio_put(s->cache_miss);
839 s->cache_miss = NULL;
840 }
841
842 if (verify(dc) && s->recoverable && !s->read_dirty_data)
843 bch_data_verify(dc, s->orig_bio);
844
845 closure_get(&dc->disk.cl);
846 bio_complete(s);
847
848 if (s->iop.bio &&
849 !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
850 BUG_ON(!s->iop.replace);
851 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
852 }
853
854 continue_at(cl, cached_dev_cache_miss_done, NULL);
855}
856
857static void cached_dev_read_done_bh(struct closure *cl)
858{
859 struct search *s = container_of(cl, struct search, cl);
860 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
861
862 bch_mark_cache_accounting(s->iop.c, s->d,
863 !s->cache_missed, s->iop.bypass);
864 trace_bcache_read(s->orig_bio, !s->cache_missed, s->iop.bypass);
865
866 if (s->iop.status)
867 continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
868 else if (s->iop.bio || verify(dc))
869 continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
870 else
871 continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
872}
873
874static int cached_dev_cache_miss(struct btree *b, struct search *s,
875 struct bio *bio, unsigned int sectors)
876{
877 int ret = MAP_CONTINUE;
878 unsigned int reada = 0;
879 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
880 struct bio *miss, *cache_bio;
881
882 s->cache_missed = 1;
883
884 if (s->cache_miss || s->iop.bypass) {
885 miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
886 ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
887 goto out_submit;
888 }
889
890 if (!(bio->bi_opf & REQ_RAHEAD) &&
891 !(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
892 s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
893 reada = min_t(sector_t, dc->readahead >> 9,
894 get_capacity(bio->bi_disk) - bio_end_sector(bio));
895
896 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
897
898 s->iop.replace_key = KEY(s->iop.inode,
899 bio->bi_iter.bi_sector + s->insert_bio_sectors,
900 s->insert_bio_sectors);
901
902 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
903 if (ret)
904 return ret;
905
906 s->iop.replace = true;
907
908 miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
909
910
911 ret = miss == bio ? MAP_DONE : -EINTR;
912
913 cache_bio = bio_alloc_bioset(GFP_NOWAIT,
914 DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
915 &dc->disk.bio_split);
916 if (!cache_bio)
917 goto out_submit;
918
919 cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
920 bio_copy_dev(cache_bio, miss);
921 cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
922
923 cache_bio->bi_end_io = backing_request_endio;
924 cache_bio->bi_private = &s->cl;
925
926 bch_bio_map(cache_bio, NULL);
927 if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
928 goto out_put;
929
930 if (reada)
931 bch_mark_cache_readahead(s->iop.c, s->d);
932
933 s->cache_miss = miss;
934 s->iop.bio = cache_bio;
935 bio_get(cache_bio);
936
937 closure_bio_submit(s->iop.c, cache_bio, &s->cl);
938
939 return ret;
940out_put:
941 bio_put(cache_bio);
942out_submit:
943 miss->bi_end_io = backing_request_endio;
944 miss->bi_private = &s->cl;
945
946 closure_bio_submit(s->iop.c, miss, &s->cl);
947 return ret;
948}
949
950static void cached_dev_read(struct cached_dev *dc, struct search *s)
951{
952 struct closure *cl = &s->cl;
953
954 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
955 continue_at(cl, cached_dev_read_done_bh, NULL);
956}
957
958
959
960static void cached_dev_write_complete(struct closure *cl)
961{
962 struct search *s = container_of(cl, struct search, cl);
963 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
964
965 up_read_non_owner(&dc->writeback_lock);
966 cached_dev_bio_complete(cl);
967}
968
969static void cached_dev_write(struct cached_dev *dc, struct search *s)
970{
971 struct closure *cl = &s->cl;
972 struct bio *bio = &s->bio.bio;
973 struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
974 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
975
976 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
977
978 down_read_non_owner(&dc->writeback_lock);
979 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
980
981
982
983
984 s->iop.bypass = false;
985 s->iop.writeback = true;
986 }
987
988
989
990
991
992
993
994
995 if (bio_op(bio) == REQ_OP_DISCARD)
996 s->iop.bypass = true;
997
998 if (should_writeback(dc, s->orig_bio,
999 cache_mode(dc),
1000 s->iop.bypass)) {
1001 s->iop.bypass = false;
1002 s->iop.writeback = true;
1003 }
1004
1005 if (s->iop.bypass) {
1006 s->iop.bio = s->orig_bio;
1007 bio_get(s->iop.bio);
1008
1009 if (bio_op(bio) == REQ_OP_DISCARD &&
1010 !blk_queue_discard(bdev_get_queue(dc->bdev)))
1011 goto insert_data;
1012
1013
1014 bio->bi_end_io = backing_request_endio;
1015 closure_bio_submit(s->iop.c, bio, cl);
1016
1017 } else if (s->iop.writeback) {
1018 bch_writeback_add(dc);
1019 s->iop.bio = bio;
1020
1021 if (bio->bi_opf & REQ_PREFLUSH) {
1022
1023
1024
1025
1026 struct bio *flush;
1027
1028 flush = bio_alloc_bioset(GFP_NOIO, 0,
1029 &dc->disk.bio_split);
1030 if (!flush) {
1031 s->iop.status = BLK_STS_RESOURCE;
1032 goto insert_data;
1033 }
1034 bio_copy_dev(flush, bio);
1035 flush->bi_end_io = backing_request_endio;
1036 flush->bi_private = cl;
1037 flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
1038
1039 closure_bio_submit(s->iop.c, flush, cl);
1040 }
1041 } else {
1042 s->iop.bio = bio_clone_fast(bio, GFP_NOIO, &dc->disk.bio_split);
1043
1044 bio->bi_end_io = backing_request_endio;
1045 closure_bio_submit(s->iop.c, bio, cl);
1046 }
1047
1048insert_data:
1049 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1050 continue_at(cl, cached_dev_write_complete, NULL);
1051}
1052
1053static void cached_dev_nodata(struct closure *cl)
1054{
1055 struct search *s = container_of(cl, struct search, cl);
1056 struct bio *bio = &s->bio.bio;
1057
1058 if (s->iop.flush_journal)
1059 bch_journal_meta(s->iop.c, cl);
1060
1061
1062 bio->bi_end_io = backing_request_endio;
1063 closure_bio_submit(s->iop.c, bio, cl);
1064
1065 continue_at(cl, cached_dev_bio_complete, NULL);
1066}
1067
1068struct detached_dev_io_private {
1069 struct bcache_device *d;
1070 unsigned long start_time;
1071 bio_end_io_t *bi_end_io;
1072 void *bi_private;
1073};
1074
1075static void detached_dev_end_io(struct bio *bio)
1076{
1077 struct detached_dev_io_private *ddip;
1078
1079 ddip = bio->bi_private;
1080 bio->bi_end_io = ddip->bi_end_io;
1081 bio->bi_private = ddip->bi_private;
1082
1083 bio_end_io_acct(bio, ddip->start_time);
1084
1085 if (bio->bi_status) {
1086 struct cached_dev *dc = container_of(ddip->d,
1087 struct cached_dev, disk);
1088
1089 bch_count_backing_io_errors(dc, bio);
1090 }
1091
1092 kfree(ddip);
1093 bio->bi_end_io(bio);
1094}
1095
1096static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
1097{
1098 struct detached_dev_io_private *ddip;
1099 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1100
1101
1102
1103
1104
1105
1106 ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
1107 ddip->d = d;
1108 ddip->start_time = bio_start_io_acct(bio);
1109 ddip->bi_end_io = bio->bi_end_io;
1110 ddip->bi_private = bio->bi_private;
1111 bio->bi_end_io = detached_dev_end_io;
1112 bio->bi_private = ddip;
1113
1114 if ((bio_op(bio) == REQ_OP_DISCARD) &&
1115 !blk_queue_discard(bdev_get_queue(dc->bdev)))
1116 bio->bi_end_io(bio);
1117 else
1118 generic_make_request(bio);
1119}
1120
1121static void quit_max_writeback_rate(struct cache_set *c,
1122 struct cached_dev *this_dc)
1123{
1124 int i;
1125 struct bcache_device *d;
1126 struct cached_dev *dc;
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137 if (mutex_trylock(&bch_register_lock)) {
1138 for (i = 0; i < c->devices_max_used; i++) {
1139 if (!c->devices[i])
1140 continue;
1141
1142 if (UUID_FLASH_ONLY(&c->uuids[i]))
1143 continue;
1144
1145 d = c->devices[i];
1146 dc = container_of(d, struct cached_dev, disk);
1147
1148
1149
1150
1151
1152 atomic_long_set(&dc->writeback_rate.rate, 1);
1153 }
1154 mutex_unlock(&bch_register_lock);
1155 } else
1156 atomic_long_set(&this_dc->writeback_rate.rate, 1);
1157}
1158
1159
1160
1161blk_qc_t cached_dev_make_request(struct request_queue *q, struct bio *bio)
1162{
1163 struct search *s;
1164 struct bcache_device *d = bio->bi_disk->private_data;
1165 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1166 int rw = bio_data_dir(bio);
1167
1168 if (unlikely((d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags)) ||
1169 dc->io_disable)) {
1170 bio->bi_status = BLK_STS_IOERR;
1171 bio_endio(bio);
1172 return BLK_QC_T_NONE;
1173 }
1174
1175 if (likely(d->c)) {
1176 if (atomic_read(&d->c->idle_counter))
1177 atomic_set(&d->c->idle_counter, 0);
1178
1179
1180
1181
1182
1183
1184 if (unlikely(atomic_read(&d->c->at_max_writeback_rate) == 1)) {
1185 atomic_set(&d->c->at_max_writeback_rate, 0);
1186 quit_max_writeback_rate(d->c, dc);
1187 }
1188 }
1189
1190 bio_set_dev(bio, dc->bdev);
1191 bio->bi_iter.bi_sector += dc->sb.data_offset;
1192
1193 if (cached_dev_get(dc)) {
1194 s = search_alloc(bio, d);
1195 trace_bcache_request_start(s->d, bio);
1196
1197 if (!bio->bi_iter.bi_size) {
1198
1199
1200
1201
1202 continue_at_nobarrier(&s->cl,
1203 cached_dev_nodata,
1204 bcache_wq);
1205 } else {
1206 s->iop.bypass = check_should_bypass(dc, bio);
1207
1208 if (rw)
1209 cached_dev_write(dc, s);
1210 else
1211 cached_dev_read(dc, s);
1212 }
1213 } else
1214
1215 detached_dev_do_request(d, bio);
1216
1217 return BLK_QC_T_NONE;
1218}
1219
1220static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
1221 unsigned int cmd, unsigned long arg)
1222{
1223 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1224
1225 if (dc->io_disable)
1226 return -EIO;
1227
1228 return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
1229}
1230
1231static int cached_dev_congested(void *data, int bits)
1232{
1233 struct bcache_device *d = data;
1234 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1235 struct request_queue *q = bdev_get_queue(dc->bdev);
1236 int ret = 0;
1237
1238 if (bdi_congested(q->backing_dev_info, bits))
1239 return 1;
1240
1241 if (cached_dev_get(dc)) {
1242 unsigned int i;
1243 struct cache *ca;
1244
1245 for_each_cache(ca, d->c, i) {
1246 q = bdev_get_queue(ca->bdev);
1247 ret |= bdi_congested(q->backing_dev_info, bits);
1248 }
1249
1250 cached_dev_put(dc);
1251 }
1252
1253 return ret;
1254}
1255
1256void bch_cached_dev_request_init(struct cached_dev *dc)
1257{
1258 struct gendisk *g = dc->disk.disk;
1259
1260 g->queue->backing_dev_info->congested_fn = cached_dev_congested;
1261 dc->disk.cache_miss = cached_dev_cache_miss;
1262 dc->disk.ioctl = cached_dev_ioctl;
1263}
1264
1265
1266
1267static int flash_dev_cache_miss(struct btree *b, struct search *s,
1268 struct bio *bio, unsigned int sectors)
1269{
1270 unsigned int bytes = min(sectors, bio_sectors(bio)) << 9;
1271
1272 swap(bio->bi_iter.bi_size, bytes);
1273 zero_fill_bio(bio);
1274 swap(bio->bi_iter.bi_size, bytes);
1275
1276 bio_advance(bio, bytes);
1277
1278 if (!bio->bi_iter.bi_size)
1279 return MAP_DONE;
1280
1281 return MAP_CONTINUE;
1282}
1283
1284static void flash_dev_nodata(struct closure *cl)
1285{
1286 struct search *s = container_of(cl, struct search, cl);
1287
1288 if (s->iop.flush_journal)
1289 bch_journal_meta(s->iop.c, cl);
1290
1291 continue_at(cl, search_free, NULL);
1292}
1293
1294blk_qc_t flash_dev_make_request(struct request_queue *q, struct bio *bio)
1295{
1296 struct search *s;
1297 struct closure *cl;
1298 struct bcache_device *d = bio->bi_disk->private_data;
1299
1300 if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) {
1301 bio->bi_status = BLK_STS_IOERR;
1302 bio_endio(bio);
1303 return BLK_QC_T_NONE;
1304 }
1305
1306 s = search_alloc(bio, d);
1307 cl = &s->cl;
1308 bio = &s->bio.bio;
1309
1310 trace_bcache_request_start(s->d, bio);
1311
1312 if (!bio->bi_iter.bi_size) {
1313
1314
1315
1316
1317 continue_at_nobarrier(&s->cl,
1318 flash_dev_nodata,
1319 bcache_wq);
1320 return BLK_QC_T_NONE;
1321 } else if (bio_data_dir(bio)) {
1322 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1323 &KEY(d->id, bio->bi_iter.bi_sector, 0),
1324 &KEY(d->id, bio_end_sector(bio), 0));
1325
1326 s->iop.bypass = (bio_op(bio) == REQ_OP_DISCARD) != 0;
1327 s->iop.writeback = true;
1328 s->iop.bio = bio;
1329
1330 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1331 } else {
1332 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
1333 }
1334
1335 continue_at(cl, search_free, NULL);
1336 return BLK_QC_T_NONE;
1337}
1338
1339static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1340 unsigned int cmd, unsigned long arg)
1341{
1342 return -ENOTTY;
1343}
1344
1345static int flash_dev_congested(void *data, int bits)
1346{
1347 struct bcache_device *d = data;
1348 struct request_queue *q;
1349 struct cache *ca;
1350 unsigned int i;
1351 int ret = 0;
1352
1353 for_each_cache(ca, d->c, i) {
1354 q = bdev_get_queue(ca->bdev);
1355 ret |= bdi_congested(q->backing_dev_info, bits);
1356 }
1357
1358 return ret;
1359}
1360
1361void bch_flash_dev_request_init(struct bcache_device *d)
1362{
1363 struct gendisk *g = d->disk;
1364
1365 g->queue->backing_dev_info->congested_fn = flash_dev_congested;
1366 d->cache_miss = flash_dev_cache_miss;
1367 d->ioctl = flash_dev_ioctl;
1368}
1369
1370void bch_request_exit(void)
1371{
1372 kmem_cache_destroy(bch_search_cache);
1373}
1374
1375int __init bch_request_init(void)
1376{
1377 bch_search_cache = KMEM_CACHE(search, 0);
1378 if (!bch_search_cache)
1379 return -ENOMEM;
1380
1381 return 0;
1382}
1383