1
2
3
4
5
6
7
8
9
10#include "bcache.h"
11#include "btree.h"
12#include "debug.h"
13#include "request.h"
14#include "writeback.h"
15
16#include <linux/module.h>
17#include <linux/hash.h>
18#include <linux/random.h>
19#include <linux/backing-dev.h>
20
21#include <trace/events/bcache.h>
22
23#define CUTOFF_CACHE_ADD 95
24#define CUTOFF_CACHE_READA 90
25
26struct kmem_cache *bch_search_cache;
27
28static void bch_data_insert_start(struct closure *);
29
30static unsigned cache_mode(struct cached_dev *dc)
31{
32 return BDEV_CACHE_MODE(&dc->sb);
33}
34
35static bool verify(struct cached_dev *dc)
36{
37 return dc->verify;
38}
39
40static void bio_csum(struct bio *bio, struct bkey *k)
41{
42 struct bio_vec bv;
43 struct bvec_iter iter;
44 uint64_t csum = 0;
45
46 bio_for_each_segment(bv, bio, iter) {
47 void *d = kmap(bv.bv_page) + bv.bv_offset;
48 csum = bch_crc64_update(csum, d, bv.bv_len);
49 kunmap(bv.bv_page);
50 }
51
52 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
53}
54
55
56
57static void bch_data_insert_keys(struct closure *cl)
58{
59 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
60 atomic_t *journal_ref = NULL;
61 struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
62 int ret;
63
64
65
66
67
68
69
70
71#if 0
72 while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING)
73 closure_sync(&s->cl);
74#endif
75
76 if (!op->replace)
77 journal_ref = bch_journal(op->c, &op->insert_keys,
78 op->flush_journal ? cl : NULL);
79
80 ret = bch_btree_insert(op->c, &op->insert_keys,
81 journal_ref, replace_key);
82 if (ret == -ESRCH) {
83 op->replace_collision = true;
84 } else if (ret) {
85 op->status = BLK_STS_RESOURCE;
86 op->insert_data_done = true;
87 }
88
89 if (journal_ref)
90 atomic_dec_bug(journal_ref);
91
92 if (!op->insert_data_done) {
93 continue_at(cl, bch_data_insert_start, op->wq);
94 return;
95 }
96
97 bch_keylist_free(&op->insert_keys);
98 closure_return(cl);
99}
100
101static int bch_keylist_realloc(struct keylist *l, unsigned u64s,
102 struct cache_set *c)
103{
104 size_t oldsize = bch_keylist_nkeys(l);
105 size_t newsize = oldsize + u64s;
106
107
108
109
110
111
112
113 if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
114 return -ENOMEM;
115
116 return __bch_keylist_realloc(l, u64s);
117}
118
119static void bch_data_invalidate(struct closure *cl)
120{
121 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
122 struct bio *bio = op->bio;
123
124 pr_debug("invalidating %i sectors from %llu",
125 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
126
127 while (bio_sectors(bio)) {
128 unsigned sectors = min(bio_sectors(bio),
129 1U << (KEY_SIZE_BITS - 1));
130
131 if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
132 goto out;
133
134 bio->bi_iter.bi_sector += sectors;
135 bio->bi_iter.bi_size -= sectors << 9;
136
137 bch_keylist_add(&op->insert_keys,
138 &KEY(op->inode, bio->bi_iter.bi_sector, sectors));
139 }
140
141 op->insert_data_done = true;
142
143 bio_put(bio);
144out:
145 continue_at(cl, bch_data_insert_keys, op->wq);
146}
147
148static void bch_data_insert_error(struct closure *cl)
149{
150 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
151
152
153
154
155
156
157
158
159
160
161 struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
162
163 while (src != op->insert_keys.top) {
164 struct bkey *n = bkey_next(src);
165
166 SET_KEY_PTRS(src, 0);
167 memmove(dst, src, bkey_bytes(src));
168
169 dst = bkey_next(dst);
170 src = n;
171 }
172
173 op->insert_keys.top = dst;
174
175 bch_data_insert_keys(cl);
176}
177
178static void bch_data_insert_endio(struct bio *bio)
179{
180 struct closure *cl = bio->bi_private;
181 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
182
183 if (bio->bi_status) {
184
185 if (op->writeback)
186 op->status = bio->bi_status;
187 else if (!op->replace)
188 set_closure_fn(cl, bch_data_insert_error, op->wq);
189 else
190 set_closure_fn(cl, NULL, NULL);
191 }
192
193 bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
194}
195
196static void bch_data_insert_start(struct closure *cl)
197{
198 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
199 struct bio *bio = op->bio, *n;
200
201 if (op->bypass)
202 return bch_data_invalidate(cl);
203
204 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
205 wake_up_gc(op->c);
206
207
208
209
210
211 bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
212
213 do {
214 unsigned i;
215 struct bkey *k;
216 struct bio_set *split = &op->c->bio_split;
217
218
219 if (bch_keylist_realloc(&op->insert_keys,
220 3 + (op->csum ? 1 : 0),
221 op->c)) {
222 continue_at(cl, bch_data_insert_keys, op->wq);
223 return;
224 }
225
226 k = op->insert_keys.top;
227 bkey_init(k);
228 SET_KEY_INODE(k, op->inode);
229 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
230
231 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
232 op->write_point, op->write_prio,
233 op->writeback))
234 goto err;
235
236 n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
237
238 n->bi_end_io = bch_data_insert_endio;
239 n->bi_private = cl;
240
241 if (op->writeback) {
242 SET_KEY_DIRTY(k, true);
243
244 for (i = 0; i < KEY_PTRS(k); i++)
245 SET_GC_MARK(PTR_BUCKET(op->c, k, i),
246 GC_MARK_DIRTY);
247 }
248
249 SET_KEY_CSUM(k, op->csum);
250 if (KEY_CSUM(k))
251 bio_csum(n, k);
252
253 trace_bcache_cache_insert(k);
254 bch_keylist_push(&op->insert_keys);
255
256 bio_set_op_attrs(n, REQ_OP_WRITE, 0);
257 bch_submit_bbio(n, op->c, k, 0);
258 } while (n != bio);
259
260 op->insert_data_done = true;
261 continue_at(cl, bch_data_insert_keys, op->wq);
262 return;
263err:
264
265 BUG_ON(op->writeback);
266
267
268
269
270
271
272
273 if (!op->replace) {
274
275
276
277
278
279
280 op->bypass = true;
281 return bch_data_invalidate(cl);
282 } else {
283
284
285
286
287 op->insert_data_done = true;
288 bio_put(bio);
289
290 if (!bch_keylist_empty(&op->insert_keys))
291 continue_at(cl, bch_data_insert_keys, op->wq);
292 else
293 closure_return(cl);
294 }
295}
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317void bch_data_insert(struct closure *cl)
318{
319 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
320
321 trace_bcache_write(op->c, op->inode, op->bio,
322 op->writeback, op->bypass);
323
324 bch_keylist_init(&op->insert_keys);
325 bio_get(op->bio);
326 bch_data_insert_start(cl);
327}
328
329
330
331unsigned bch_get_congested(struct cache_set *c)
332{
333 int i;
334 long rand;
335
336 if (!c->congested_read_threshold_us &&
337 !c->congested_write_threshold_us)
338 return 0;
339
340 i = (local_clock_us() - c->congested_last_us) / 1024;
341 if (i < 0)
342 return 0;
343
344 i += atomic_read(&c->congested);
345 if (i >= 0)
346 return 0;
347
348 i += CONGESTED_MAX;
349
350 if (i > 0)
351 i = fract_exp_two(i, 6);
352
353 rand = get_random_int();
354 i -= bitmap_weight(&rand, BITS_PER_LONG);
355
356 return i > 0 ? i : 1;
357}
358
359static void add_sequential(struct task_struct *t)
360{
361 ewma_add(t->sequential_io_avg,
362 t->sequential_io, 8, 0);
363
364 t->sequential_io = 0;
365}
366
367static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
368{
369 return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
370}
371
372static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
373{
374 struct cache_set *c = dc->disk.c;
375 unsigned mode = cache_mode(dc);
376 unsigned sectors, congested = bch_get_congested(c);
377 struct task_struct *task = current;
378 struct io *i;
379
380 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
381 c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
382 (bio_op(bio) == REQ_OP_DISCARD))
383 goto skip;
384
385 if (mode == CACHE_MODE_NONE ||
386 (mode == CACHE_MODE_WRITEAROUND &&
387 op_is_write(bio_op(bio))))
388 goto skip;
389
390
391
392
393
394 if (bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND) &&
395 !(bio->bi_opf & REQ_META))
396 goto skip;
397
398 if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
399 bio_sectors(bio) & (c->sb.block_size - 1)) {
400 pr_debug("skipping unaligned io");
401 goto skip;
402 }
403
404 if (bypass_torture_test(dc)) {
405 if ((get_random_int() & 3) == 3)
406 goto skip;
407 else
408 goto rescale;
409 }
410
411 if (!congested && !dc->sequential_cutoff)
412 goto rescale;
413
414 spin_lock(&dc->io_lock);
415
416 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
417 if (i->last == bio->bi_iter.bi_sector &&
418 time_before(jiffies, i->jiffies))
419 goto found;
420
421 i = list_first_entry(&dc->io_lru, struct io, lru);
422
423 add_sequential(task);
424 i->sequential = 0;
425found:
426 if (i->sequential + bio->bi_iter.bi_size > i->sequential)
427 i->sequential += bio->bi_iter.bi_size;
428
429 i->last = bio_end_sector(bio);
430 i->jiffies = jiffies + msecs_to_jiffies(5000);
431 task->sequential_io = i->sequential;
432
433 hlist_del(&i->hash);
434 hlist_add_head(&i->hash, iohash(dc, i->last));
435 list_move_tail(&i->lru, &dc->io_lru);
436
437 spin_unlock(&dc->io_lock);
438
439 sectors = max(task->sequential_io,
440 task->sequential_io_avg) >> 9;
441
442 if (dc->sequential_cutoff &&
443 sectors >= dc->sequential_cutoff >> 9) {
444 trace_bcache_bypass_sequential(bio);
445 goto skip;
446 }
447
448 if (congested && sectors >= congested) {
449 trace_bcache_bypass_congested(bio);
450 goto skip;
451 }
452
453rescale:
454 bch_rescale_priorities(c, bio_sectors(bio));
455 return false;
456skip:
457 bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
458 return true;
459}
460
461
462
463struct search {
464
465 struct closure cl;
466
467 struct bbio bio;
468 struct bio *orig_bio;
469 struct bio *cache_miss;
470 struct bcache_device *d;
471
472 unsigned insert_bio_sectors;
473 unsigned recoverable:1;
474 unsigned write:1;
475 unsigned read_dirty_data:1;
476 unsigned cache_missed:1;
477
478 unsigned long start_time;
479
480 struct btree_op op;
481 struct data_insert_op iop;
482};
483
484static void bch_cache_read_endio(struct bio *bio)
485{
486 struct bbio *b = container_of(bio, struct bbio, bio);
487 struct closure *cl = bio->bi_private;
488 struct search *s = container_of(cl, struct search, cl);
489
490
491
492
493
494
495
496
497 if (bio->bi_status)
498 s->iop.status = bio->bi_status;
499 else if (!KEY_DIRTY(&b->key) &&
500 ptr_stale(s->iop.c, &b->key, 0)) {
501 atomic_long_inc(&s->iop.c->cache_read_races);
502 s->iop.status = BLK_STS_IOERR;
503 }
504
505 bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache");
506}
507
508
509
510
511
512static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
513{
514 struct search *s = container_of(op, struct search, op);
515 struct bio *n, *bio = &s->bio.bio;
516 struct bkey *bio_key;
517 unsigned ptr;
518
519 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
520 return MAP_CONTINUE;
521
522 if (KEY_INODE(k) != s->iop.inode ||
523 KEY_START(k) > bio->bi_iter.bi_sector) {
524 unsigned bio_sectors = bio_sectors(bio);
525 unsigned sectors = KEY_INODE(k) == s->iop.inode
526 ? min_t(uint64_t, INT_MAX,
527 KEY_START(k) - bio->bi_iter.bi_sector)
528 : INT_MAX;
529
530 int ret = s->d->cache_miss(b, s, bio, sectors);
531 if (ret != MAP_CONTINUE)
532 return ret;
533
534
535 BUG_ON(bio_sectors <= sectors);
536 }
537
538 if (!KEY_SIZE(k))
539 return MAP_CONTINUE;
540
541
542 ptr = 0;
543
544 PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
545
546 if (KEY_DIRTY(k))
547 s->read_dirty_data = true;
548
549 n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
550 KEY_OFFSET(k) - bio->bi_iter.bi_sector),
551 GFP_NOIO, &s->d->bio_split);
552
553 bio_key = &container_of(n, struct bbio, bio)->key;
554 bch_bkey_copy_single_ptr(bio_key, k, ptr);
555
556 bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
557 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
558
559 n->bi_end_io = bch_cache_read_endio;
560 n->bi_private = &s->cl;
561
562
563
564
565
566
567
568
569
570
571
572
573 __bch_submit_bbio(n, b->c);
574 return n == bio ? MAP_DONE : MAP_CONTINUE;
575}
576
577static void cache_lookup(struct closure *cl)
578{
579 struct search *s = container_of(cl, struct search, iop.cl);
580 struct bio *bio = &s->bio.bio;
581 struct cached_dev *dc;
582 int ret;
583
584 bch_btree_op_init(&s->op, -1);
585
586 ret = bch_btree_map_keys(&s->op, s->iop.c,
587 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
588 cache_lookup_fn, MAP_END_KEY);
589 if (ret == -EAGAIN) {
590 continue_at(cl, cache_lookup, bcache_wq);
591 return;
592 }
593
594
595
596
597
598
599
600
601
602
603 if (ret < 0) {
604 BUG_ON(ret == -EINTR);
605 if (s->d && s->d->c &&
606 !UUID_FLASH_ONLY(&s->d->c->uuids[s->d->id])) {
607 dc = container_of(s->d, struct cached_dev, disk);
608 if (dc && atomic_read(&dc->has_dirty))
609 s->recoverable = false;
610 }
611 if (!s->iop.status)
612 s->iop.status = BLK_STS_IOERR;
613 }
614
615 closure_return(cl);
616}
617
618
619
620static void request_endio(struct bio *bio)
621{
622 struct closure *cl = bio->bi_private;
623
624 if (bio->bi_status) {
625 struct search *s = container_of(cl, struct search, cl);
626 s->iop.status = bio->bi_status;
627
628 s->recoverable = false;
629 }
630
631 bio_put(bio);
632 closure_put(cl);
633}
634
635static void backing_request_endio(struct bio *bio)
636{
637 struct closure *cl = bio->bi_private;
638
639 if (bio->bi_status) {
640 struct search *s = container_of(cl, struct search, cl);
641 struct cached_dev *dc = container_of(s->d,
642 struct cached_dev, disk);
643
644
645
646
647
648
649
650 if (unlikely(s->iop.writeback &&
651 bio->bi_opf & REQ_PREFLUSH)) {
652 pr_err("Can't flush %s: returned bi_status %i",
653 dc->backing_dev_name, bio->bi_status);
654 } else {
655
656 s->iop.status = bio->bi_status;
657 }
658 s->recoverable = false;
659
660 bch_count_backing_io_errors(dc, bio);
661 }
662
663 bio_put(bio);
664 closure_put(cl);
665}
666
667static void bio_complete(struct search *s)
668{
669 if (s->orig_bio) {
670 generic_end_io_acct(s->d->disk->queue,
671 bio_data_dir(s->orig_bio),
672 &s->d->disk->part0, s->start_time);
673
674 trace_bcache_request_end(s->d, s->orig_bio);
675 s->orig_bio->bi_status = s->iop.status;
676 bio_endio(s->orig_bio);
677 s->orig_bio = NULL;
678 }
679}
680
681static void do_bio_hook(struct search *s,
682 struct bio *orig_bio,
683 bio_end_io_t *end_io_fn)
684{
685 struct bio *bio = &s->bio.bio;
686
687 bio_init(bio, NULL, 0);
688 __bio_clone_fast(bio, orig_bio);
689
690
691
692
693
694
695 bio->bi_end_io = end_io_fn;
696 bio->bi_private = &s->cl;
697
698 bio_cnt_set(bio, 3);
699}
700
701static void search_free(struct closure *cl)
702{
703 struct search *s = container_of(cl, struct search, cl);
704
705 if (s->iop.bio)
706 bio_put(s->iop.bio);
707
708 bio_complete(s);
709 closure_debug_destroy(cl);
710 mempool_free(s, &s->d->c->search);
711}
712
713static inline struct search *search_alloc(struct bio *bio,
714 struct bcache_device *d)
715{
716 struct search *s;
717
718 s = mempool_alloc(&d->c->search, GFP_NOIO);
719
720 closure_init(&s->cl, NULL);
721 do_bio_hook(s, bio, request_endio);
722
723 s->orig_bio = bio;
724 s->cache_miss = NULL;
725 s->cache_missed = 0;
726 s->d = d;
727 s->recoverable = 1;
728 s->write = op_is_write(bio_op(bio));
729 s->read_dirty_data = 0;
730 s->start_time = jiffies;
731
732 s->iop.c = d->c;
733 s->iop.bio = NULL;
734 s->iop.inode = d->id;
735 s->iop.write_point = hash_long((unsigned long) current, 16);
736 s->iop.write_prio = 0;
737 s->iop.status = 0;
738 s->iop.flags = 0;
739 s->iop.flush_journal = op_is_flush(bio->bi_opf);
740 s->iop.wq = bcache_wq;
741
742 return s;
743}
744
745
746
747static void cached_dev_bio_complete(struct closure *cl)
748{
749 struct search *s = container_of(cl, struct search, cl);
750 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
751
752 search_free(cl);
753 cached_dev_put(dc);
754}
755
756
757
758static void cached_dev_cache_miss_done(struct closure *cl)
759{
760 struct search *s = container_of(cl, struct search, cl);
761
762 if (s->iop.replace_collision)
763 bch_mark_cache_miss_collision(s->iop.c, s->d);
764
765 if (s->iop.bio)
766 bio_free_pages(s->iop.bio);
767
768 cached_dev_bio_complete(cl);
769}
770
771static void cached_dev_read_error(struct closure *cl)
772{
773 struct search *s = container_of(cl, struct search, cl);
774 struct bio *bio = &s->bio.bio;
775
776
777
778
779
780
781
782
783 if (s->recoverable && !s->read_dirty_data) {
784
785 trace_bcache_read_retry(s->orig_bio);
786
787 s->iop.status = 0;
788 do_bio_hook(s, s->orig_bio, backing_request_endio);
789
790
791
792
793 closure_bio_submit(s->iop.c, bio, cl);
794 }
795
796 continue_at(cl, cached_dev_cache_miss_done, NULL);
797}
798
799static void cached_dev_read_done(struct closure *cl)
800{
801 struct search *s = container_of(cl, struct search, cl);
802 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
803
804
805
806
807
808
809
810
811
812 if (s->iop.bio) {
813 bio_reset(s->iop.bio);
814 s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
815 bio_copy_dev(s->iop.bio, s->cache_miss);
816 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
817 bch_bio_map(s->iop.bio, NULL);
818
819 bio_copy_data(s->cache_miss, s->iop.bio);
820
821 bio_put(s->cache_miss);
822 s->cache_miss = NULL;
823 }
824
825 if (verify(dc) && s->recoverable && !s->read_dirty_data)
826 bch_data_verify(dc, s->orig_bio);
827
828 bio_complete(s);
829
830 if (s->iop.bio &&
831 !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
832 BUG_ON(!s->iop.replace);
833 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
834 }
835
836 continue_at(cl, cached_dev_cache_miss_done, NULL);
837}
838
839static void cached_dev_read_done_bh(struct closure *cl)
840{
841 struct search *s = container_of(cl, struct search, cl);
842 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
843
844 bch_mark_cache_accounting(s->iop.c, s->d,
845 !s->cache_missed, s->iop.bypass);
846 trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
847
848 if (s->iop.status)
849 continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
850 else if (s->iop.bio || verify(dc))
851 continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
852 else
853 continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
854}
855
856static int cached_dev_cache_miss(struct btree *b, struct search *s,
857 struct bio *bio, unsigned sectors)
858{
859 int ret = MAP_CONTINUE;
860 unsigned reada = 0;
861 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
862 struct bio *miss, *cache_bio;
863
864 s->cache_missed = 1;
865
866 if (s->cache_miss || s->iop.bypass) {
867 miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
868 ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
869 goto out_submit;
870 }
871
872 if (!(bio->bi_opf & REQ_RAHEAD) &&
873 !(bio->bi_opf & REQ_META) &&
874 s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
875 reada = min_t(sector_t, dc->readahead >> 9,
876 get_capacity(bio->bi_disk) - bio_end_sector(bio));
877
878 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
879
880 s->iop.replace_key = KEY(s->iop.inode,
881 bio->bi_iter.bi_sector + s->insert_bio_sectors,
882 s->insert_bio_sectors);
883
884 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
885 if (ret)
886 return ret;
887
888 s->iop.replace = true;
889
890 miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
891
892
893 ret = miss == bio ? MAP_DONE : -EINTR;
894
895 cache_bio = bio_alloc_bioset(GFP_NOWAIT,
896 DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
897 &dc->disk.bio_split);
898 if (!cache_bio)
899 goto out_submit;
900
901 cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
902 bio_copy_dev(cache_bio, miss);
903 cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
904
905 cache_bio->bi_end_io = backing_request_endio;
906 cache_bio->bi_private = &s->cl;
907
908 bch_bio_map(cache_bio, NULL);
909 if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
910 goto out_put;
911
912 if (reada)
913 bch_mark_cache_readahead(s->iop.c, s->d);
914
915 s->cache_miss = miss;
916 s->iop.bio = cache_bio;
917 bio_get(cache_bio);
918
919 closure_bio_submit(s->iop.c, cache_bio, &s->cl);
920
921 return ret;
922out_put:
923 bio_put(cache_bio);
924out_submit:
925 miss->bi_end_io = backing_request_endio;
926 miss->bi_private = &s->cl;
927
928 closure_bio_submit(s->iop.c, miss, &s->cl);
929 return ret;
930}
931
932static void cached_dev_read(struct cached_dev *dc, struct search *s)
933{
934 struct closure *cl = &s->cl;
935
936 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
937 continue_at(cl, cached_dev_read_done_bh, NULL);
938}
939
940
941
942static void cached_dev_write_complete(struct closure *cl)
943{
944 struct search *s = container_of(cl, struct search, cl);
945 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
946
947 up_read_non_owner(&dc->writeback_lock);
948 cached_dev_bio_complete(cl);
949}
950
951static void cached_dev_write(struct cached_dev *dc, struct search *s)
952{
953 struct closure *cl = &s->cl;
954 struct bio *bio = &s->bio.bio;
955 struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
956 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
957
958 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
959
960 down_read_non_owner(&dc->writeback_lock);
961 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
962
963
964
965
966 s->iop.bypass = false;
967 s->iop.writeback = true;
968 }
969
970
971
972
973
974
975
976
977 if (bio_op(bio) == REQ_OP_DISCARD)
978 s->iop.bypass = true;
979
980 if (should_writeback(dc, s->orig_bio,
981 cache_mode(dc),
982 s->iop.bypass)) {
983 s->iop.bypass = false;
984 s->iop.writeback = true;
985 }
986
987 if (s->iop.bypass) {
988 s->iop.bio = s->orig_bio;
989 bio_get(s->iop.bio);
990
991 if (bio_op(bio) == REQ_OP_DISCARD &&
992 !blk_queue_discard(bdev_get_queue(dc->bdev)))
993 goto insert_data;
994
995
996 bio->bi_end_io = backing_request_endio;
997 closure_bio_submit(s->iop.c, bio, cl);
998
999 } else if (s->iop.writeback) {
1000 bch_writeback_add(dc);
1001 s->iop.bio = bio;
1002
1003 if (bio->bi_opf & REQ_PREFLUSH) {
1004
1005
1006
1007
1008 struct bio *flush;
1009
1010 flush = bio_alloc_bioset(GFP_NOIO, 0,
1011 &dc->disk.bio_split);
1012 if (!flush) {
1013 s->iop.status = BLK_STS_RESOURCE;
1014 goto insert_data;
1015 }
1016 bio_copy_dev(flush, bio);
1017 flush->bi_end_io = backing_request_endio;
1018 flush->bi_private = cl;
1019 flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
1020
1021 closure_bio_submit(s->iop.c, flush, cl);
1022 }
1023 } else {
1024 s->iop.bio = bio_clone_fast(bio, GFP_NOIO, &dc->disk.bio_split);
1025
1026 bio->bi_end_io = backing_request_endio;
1027 closure_bio_submit(s->iop.c, bio, cl);
1028 }
1029
1030insert_data:
1031 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1032 continue_at(cl, cached_dev_write_complete, NULL);
1033}
1034
1035static void cached_dev_nodata(struct closure *cl)
1036{
1037 struct search *s = container_of(cl, struct search, cl);
1038 struct bio *bio = &s->bio.bio;
1039
1040 if (s->iop.flush_journal)
1041 bch_journal_meta(s->iop.c, cl);
1042
1043
1044 bio->bi_end_io = backing_request_endio;
1045 closure_bio_submit(s->iop.c, bio, cl);
1046
1047 continue_at(cl, cached_dev_bio_complete, NULL);
1048}
1049
1050struct detached_dev_io_private {
1051 struct bcache_device *d;
1052 unsigned long start_time;
1053 bio_end_io_t *bi_end_io;
1054 void *bi_private;
1055};
1056
1057static void detached_dev_end_io(struct bio *bio)
1058{
1059 struct detached_dev_io_private *ddip;
1060
1061 ddip = bio->bi_private;
1062 bio->bi_end_io = ddip->bi_end_io;
1063 bio->bi_private = ddip->bi_private;
1064
1065 generic_end_io_acct(ddip->d->disk->queue,
1066 bio_data_dir(bio),
1067 &ddip->d->disk->part0, ddip->start_time);
1068
1069 if (bio->bi_status) {
1070 struct cached_dev *dc = container_of(ddip->d,
1071 struct cached_dev, disk);
1072
1073 bch_count_backing_io_errors(dc, bio);
1074 }
1075
1076 kfree(ddip);
1077 bio->bi_end_io(bio);
1078}
1079
1080static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
1081{
1082 struct detached_dev_io_private *ddip;
1083 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1084
1085
1086
1087
1088
1089
1090 ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
1091 ddip->d = d;
1092 ddip->start_time = jiffies;
1093 ddip->bi_end_io = bio->bi_end_io;
1094 ddip->bi_private = bio->bi_private;
1095 bio->bi_end_io = detached_dev_end_io;
1096 bio->bi_private = ddip;
1097
1098 if ((bio_op(bio) == REQ_OP_DISCARD) &&
1099 !blk_queue_discard(bdev_get_queue(dc->bdev)))
1100 bio->bi_end_io(bio);
1101 else
1102 generic_make_request(bio);
1103}
1104
1105
1106
1107static blk_qc_t cached_dev_make_request(struct request_queue *q,
1108 struct bio *bio)
1109{
1110 struct search *s;
1111 struct bcache_device *d = bio->bi_disk->private_data;
1112 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1113 int rw = bio_data_dir(bio);
1114
1115 if (unlikely((d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags)) ||
1116 dc->io_disable)) {
1117 bio->bi_status = BLK_STS_IOERR;
1118 bio_endio(bio);
1119 return BLK_QC_T_NONE;
1120 }
1121
1122 atomic_set(&dc->backing_idle, 0);
1123 generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0);
1124
1125 bio_set_dev(bio, dc->bdev);
1126 bio->bi_iter.bi_sector += dc->sb.data_offset;
1127
1128 if (cached_dev_get(dc)) {
1129 s = search_alloc(bio, d);
1130 trace_bcache_request_start(s->d, bio);
1131
1132 if (!bio->bi_iter.bi_size) {
1133
1134
1135
1136
1137 continue_at_nobarrier(&s->cl,
1138 cached_dev_nodata,
1139 bcache_wq);
1140 } else {
1141 s->iop.bypass = check_should_bypass(dc, bio);
1142
1143 if (rw)
1144 cached_dev_write(dc, s);
1145 else
1146 cached_dev_read(dc, s);
1147 }
1148 } else
1149
1150 detached_dev_do_request(d, bio);
1151
1152 return BLK_QC_T_NONE;
1153}
1154
1155static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
1156 unsigned int cmd, unsigned long arg)
1157{
1158 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1159 return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
1160}
1161
1162static int cached_dev_congested(void *data, int bits)
1163{
1164 struct bcache_device *d = data;
1165 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1166 struct request_queue *q = bdev_get_queue(dc->bdev);
1167 int ret = 0;
1168
1169 if (bdi_congested(q->backing_dev_info, bits))
1170 return 1;
1171
1172 if (cached_dev_get(dc)) {
1173 unsigned i;
1174 struct cache *ca;
1175
1176 for_each_cache(ca, d->c, i) {
1177 q = bdev_get_queue(ca->bdev);
1178 ret |= bdi_congested(q->backing_dev_info, bits);
1179 }
1180
1181 cached_dev_put(dc);
1182 }
1183
1184 return ret;
1185}
1186
1187void bch_cached_dev_request_init(struct cached_dev *dc)
1188{
1189 struct gendisk *g = dc->disk.disk;
1190
1191 g->queue->make_request_fn = cached_dev_make_request;
1192 g->queue->backing_dev_info->congested_fn = cached_dev_congested;
1193 dc->disk.cache_miss = cached_dev_cache_miss;
1194 dc->disk.ioctl = cached_dev_ioctl;
1195}
1196
1197
1198
1199static int flash_dev_cache_miss(struct btree *b, struct search *s,
1200 struct bio *bio, unsigned sectors)
1201{
1202 unsigned bytes = min(sectors, bio_sectors(bio)) << 9;
1203
1204 swap(bio->bi_iter.bi_size, bytes);
1205 zero_fill_bio(bio);
1206 swap(bio->bi_iter.bi_size, bytes);
1207
1208 bio_advance(bio, bytes);
1209
1210 if (!bio->bi_iter.bi_size)
1211 return MAP_DONE;
1212
1213 return MAP_CONTINUE;
1214}
1215
1216static void flash_dev_nodata(struct closure *cl)
1217{
1218 struct search *s = container_of(cl, struct search, cl);
1219
1220 if (s->iop.flush_journal)
1221 bch_journal_meta(s->iop.c, cl);
1222
1223 continue_at(cl, search_free, NULL);
1224}
1225
1226static blk_qc_t flash_dev_make_request(struct request_queue *q,
1227 struct bio *bio)
1228{
1229 struct search *s;
1230 struct closure *cl;
1231 struct bcache_device *d = bio->bi_disk->private_data;
1232 int rw = bio_data_dir(bio);
1233
1234 if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) {
1235 bio->bi_status = BLK_STS_IOERR;
1236 bio_endio(bio);
1237 return BLK_QC_T_NONE;
1238 }
1239
1240 generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0);
1241
1242 s = search_alloc(bio, d);
1243 cl = &s->cl;
1244 bio = &s->bio.bio;
1245
1246 trace_bcache_request_start(s->d, bio);
1247
1248 if (!bio->bi_iter.bi_size) {
1249
1250
1251
1252
1253 continue_at_nobarrier(&s->cl,
1254 flash_dev_nodata,
1255 bcache_wq);
1256 return BLK_QC_T_NONE;
1257 } else if (rw) {
1258 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1259 &KEY(d->id, bio->bi_iter.bi_sector, 0),
1260 &KEY(d->id, bio_end_sector(bio), 0));
1261
1262 s->iop.bypass = (bio_op(bio) == REQ_OP_DISCARD) != 0;
1263 s->iop.writeback = true;
1264 s->iop.bio = bio;
1265
1266 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1267 } else {
1268 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
1269 }
1270
1271 continue_at(cl, search_free, NULL);
1272 return BLK_QC_T_NONE;
1273}
1274
1275static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1276 unsigned int cmd, unsigned long arg)
1277{
1278 return -ENOTTY;
1279}
1280
1281static int flash_dev_congested(void *data, int bits)
1282{
1283 struct bcache_device *d = data;
1284 struct request_queue *q;
1285 struct cache *ca;
1286 unsigned i;
1287 int ret = 0;
1288
1289 for_each_cache(ca, d->c, i) {
1290 q = bdev_get_queue(ca->bdev);
1291 ret |= bdi_congested(q->backing_dev_info, bits);
1292 }
1293
1294 return ret;
1295}
1296
1297void bch_flash_dev_request_init(struct bcache_device *d)
1298{
1299 struct gendisk *g = d->disk;
1300
1301 g->queue->make_request_fn = flash_dev_make_request;
1302 g->queue->backing_dev_info->congested_fn = flash_dev_congested;
1303 d->cache_miss = flash_dev_cache_miss;
1304 d->ioctl = flash_dev_ioctl;
1305}
1306
1307void bch_request_exit(void)
1308{
1309 if (bch_search_cache)
1310 kmem_cache_destroy(bch_search_cache);
1311}
1312
1313int __init bch_request_init(void)
1314{
1315 bch_search_cache = KMEM_CACHE(search, 0);
1316 if (!bch_search_cache)
1317 return -ENOMEM;
1318
1319 return 0;
1320}
1321