1
2
3
4
5
6
7
8
9
10#include "bcache.h"
11#include "btree.h"
12#include "debug.h"
13#include "request.h"
14#include "writeback.h"
15
16#include <linux/module.h>
17#include <linux/hash.h>
18#include <linux/random.h>
19#include <linux/backing-dev.h>
20
21#include <trace/events/bcache.h>
22
23#define CUTOFF_CACHE_ADD 95
24#define CUTOFF_CACHE_READA 90
25
26struct kmem_cache *bch_search_cache;
27
28static void bch_data_insert_start(struct closure *cl);
29
30static unsigned int cache_mode(struct cached_dev *dc)
31{
32 return BDEV_CACHE_MODE(&dc->sb);
33}
34
35static bool verify(struct cached_dev *dc)
36{
37 return dc->verify;
38}
39
40static void bio_csum(struct bio *bio, struct bkey *k)
41{
42 struct bio_vec bv;
43 struct bvec_iter iter;
44 uint64_t csum = 0;
45
46 bio_for_each_segment(bv, bio, iter) {
47 void *d = kmap(bv.bv_page) + bv.bv_offset;
48
49 csum = bch_crc64_update(csum, d, bv.bv_len);
50 kunmap(bv.bv_page);
51 }
52
53 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
54}
55
56
57
58static void bch_data_insert_keys(struct closure *cl)
59{
60 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
61 atomic_t *journal_ref = NULL;
62 struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
63 int ret;
64
65 if (!op->replace)
66 journal_ref = bch_journal(op->c, &op->insert_keys,
67 op->flush_journal ? cl : NULL);
68
69 ret = bch_btree_insert(op->c, &op->insert_keys,
70 journal_ref, replace_key);
71 if (ret == -ESRCH) {
72 op->replace_collision = true;
73 } else if (ret) {
74 op->status = BLK_STS_RESOURCE;
75 op->insert_data_done = true;
76 }
77
78 if (journal_ref)
79 atomic_dec_bug(journal_ref);
80
81 if (!op->insert_data_done) {
82 continue_at(cl, bch_data_insert_start, op->wq);
83 return;
84 }
85
86 bch_keylist_free(&op->insert_keys);
87 closure_return(cl);
88}
89
90static int bch_keylist_realloc(struct keylist *l, unsigned int u64s,
91 struct cache_set *c)
92{
93 size_t oldsize = bch_keylist_nkeys(l);
94 size_t newsize = oldsize + u64s;
95
96
97
98
99
100
101
102 if (newsize * sizeof(uint64_t) > block_bytes(c->cache) - sizeof(struct jset))
103 return -ENOMEM;
104
105 return __bch_keylist_realloc(l, u64s);
106}
107
108static void bch_data_invalidate(struct closure *cl)
109{
110 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
111 struct bio *bio = op->bio;
112
113 pr_debug("invalidating %i sectors from %llu\n",
114 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
115
116 while (bio_sectors(bio)) {
117 unsigned int sectors = min(bio_sectors(bio),
118 1U << (KEY_SIZE_BITS - 1));
119
120 if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
121 goto out;
122
123 bio->bi_iter.bi_sector += sectors;
124 bio->bi_iter.bi_size -= sectors << 9;
125
126 bch_keylist_add(&op->insert_keys,
127 &KEY(op->inode,
128 bio->bi_iter.bi_sector,
129 sectors));
130 }
131
132 op->insert_data_done = true;
133
134 bio_put(bio);
135out:
136 continue_at(cl, bch_data_insert_keys, op->wq);
137}
138
139static void bch_data_insert_error(struct closure *cl)
140{
141 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
142
143
144
145
146
147
148
149
150
151
152 struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
153
154 while (src != op->insert_keys.top) {
155 struct bkey *n = bkey_next(src);
156
157 SET_KEY_PTRS(src, 0);
158 memmove(dst, src, bkey_bytes(src));
159
160 dst = bkey_next(dst);
161 src = n;
162 }
163
164 op->insert_keys.top = dst;
165
166 bch_data_insert_keys(cl);
167}
168
169static void bch_data_insert_endio(struct bio *bio)
170{
171 struct closure *cl = bio->bi_private;
172 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
173
174 if (bio->bi_status) {
175
176 if (op->writeback)
177 op->status = bio->bi_status;
178 else if (!op->replace)
179 set_closure_fn(cl, bch_data_insert_error, op->wq);
180 else
181 set_closure_fn(cl, NULL, NULL);
182 }
183
184 bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
185}
186
187static void bch_data_insert_start(struct closure *cl)
188{
189 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
190 struct bio *bio = op->bio, *n;
191
192 if (op->bypass)
193 return bch_data_invalidate(cl);
194
195 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
196 wake_up_gc(op->c);
197
198
199
200
201
202 bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
203
204 do {
205 unsigned int i;
206 struct bkey *k;
207 struct bio_set *split = &op->c->bio_split;
208
209
210 if (bch_keylist_realloc(&op->insert_keys,
211 3 + (op->csum ? 1 : 0),
212 op->c)) {
213 continue_at(cl, bch_data_insert_keys, op->wq);
214 return;
215 }
216
217 k = op->insert_keys.top;
218 bkey_init(k);
219 SET_KEY_INODE(k, op->inode);
220 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
221
222 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
223 op->write_point, op->write_prio,
224 op->writeback))
225 goto err;
226
227 n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
228
229 n->bi_end_io = bch_data_insert_endio;
230 n->bi_private = cl;
231
232 if (op->writeback) {
233 SET_KEY_DIRTY(k, true);
234
235 for (i = 0; i < KEY_PTRS(k); i++)
236 SET_GC_MARK(PTR_BUCKET(op->c, k, i),
237 GC_MARK_DIRTY);
238 }
239
240 SET_KEY_CSUM(k, op->csum);
241 if (KEY_CSUM(k))
242 bio_csum(n, k);
243
244 trace_bcache_cache_insert(k);
245 bch_keylist_push(&op->insert_keys);
246
247 bio_set_op_attrs(n, REQ_OP_WRITE, 0);
248 bch_submit_bbio(n, op->c, k, 0);
249 } while (n != bio);
250
251 op->insert_data_done = true;
252 continue_at(cl, bch_data_insert_keys, op->wq);
253 return;
254err:
255
256 BUG_ON(op->writeback);
257
258
259
260
261
262
263
264 if (!op->replace) {
265
266
267
268
269
270
271 op->bypass = true;
272 return bch_data_invalidate(cl);
273 } else {
274
275
276
277
278 op->insert_data_done = true;
279 bio_put(bio);
280
281 if (!bch_keylist_empty(&op->insert_keys))
282 continue_at(cl, bch_data_insert_keys, op->wq);
283 else
284 closure_return(cl);
285 }
286}
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308void bch_data_insert(struct closure *cl)
309{
310 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
311
312 trace_bcache_write(op->c, op->inode, op->bio,
313 op->writeback, op->bypass);
314
315 bch_keylist_init(&op->insert_keys);
316 bio_get(op->bio);
317 bch_data_insert_start(cl);
318}
319
320
321
322
323
324unsigned int bch_get_congested(const struct cache_set *c)
325{
326 int i;
327
328 if (!c->congested_read_threshold_us &&
329 !c->congested_write_threshold_us)
330 return 0;
331
332 i = (local_clock_us() - c->congested_last_us) / 1024;
333 if (i < 0)
334 return 0;
335
336 i += atomic_read(&c->congested);
337 if (i >= 0)
338 return 0;
339
340 i += CONGESTED_MAX;
341
342 if (i > 0)
343 i = fract_exp_two(i, 6);
344
345 i -= hweight32(get_random_u32());
346
347 return i > 0 ? i : 1;
348}
349
350static void add_sequential(struct task_struct *t)
351{
352 ewma_add(t->sequential_io_avg,
353 t->sequential_io, 8, 0);
354
355 t->sequential_io = 0;
356}
357
358static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
359{
360 return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
361}
362
363static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
364{
365 struct cache_set *c = dc->disk.c;
366 unsigned int mode = cache_mode(dc);
367 unsigned int sectors, congested;
368 struct task_struct *task = current;
369 struct io *i;
370
371 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
372 c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
373 (bio_op(bio) == REQ_OP_DISCARD))
374 goto skip;
375
376 if (mode == CACHE_MODE_NONE ||
377 (mode == CACHE_MODE_WRITEAROUND &&
378 op_is_write(bio_op(bio))))
379 goto skip;
380
381
382
383
384
385
386
387
388
389
390
391 if ((bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND))) {
392 if (!(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
393 (dc->cache_readahead_policy != BCH_CACHE_READA_ALL))
394 goto skip;
395 }
396
397 if (bio->bi_iter.bi_sector & (c->cache->sb.block_size - 1) ||
398 bio_sectors(bio) & (c->cache->sb.block_size - 1)) {
399 pr_debug("skipping unaligned io\n");
400 goto skip;
401 }
402
403 if (bypass_torture_test(dc)) {
404 if ((get_random_int() & 3) == 3)
405 goto skip;
406 else
407 goto rescale;
408 }
409
410 congested = bch_get_congested(c);
411 if (!congested && !dc->sequential_cutoff)
412 goto rescale;
413
414 spin_lock(&dc->io_lock);
415
416 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
417 if (i->last == bio->bi_iter.bi_sector &&
418 time_before(jiffies, i->jiffies))
419 goto found;
420
421 i = list_first_entry(&dc->io_lru, struct io, lru);
422
423 add_sequential(task);
424 i->sequential = 0;
425found:
426 if (i->sequential + bio->bi_iter.bi_size > i->sequential)
427 i->sequential += bio->bi_iter.bi_size;
428
429 i->last = bio_end_sector(bio);
430 i->jiffies = jiffies + msecs_to_jiffies(5000);
431 task->sequential_io = i->sequential;
432
433 hlist_del(&i->hash);
434 hlist_add_head(&i->hash, iohash(dc, i->last));
435 list_move_tail(&i->lru, &dc->io_lru);
436
437 spin_unlock(&dc->io_lock);
438
439 sectors = max(task->sequential_io,
440 task->sequential_io_avg) >> 9;
441
442 if (dc->sequential_cutoff &&
443 sectors >= dc->sequential_cutoff >> 9) {
444 trace_bcache_bypass_sequential(bio);
445 goto skip;
446 }
447
448 if (congested && sectors >= congested) {
449 trace_bcache_bypass_congested(bio);
450 goto skip;
451 }
452
453rescale:
454 bch_rescale_priorities(c, bio_sectors(bio));
455 return false;
456skip:
457 bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
458 return true;
459}
460
461
462
463struct search {
464
465 struct closure cl;
466
467 struct bbio bio;
468 struct bio *orig_bio;
469 struct bio *cache_miss;
470 struct bcache_device *d;
471
472 unsigned int insert_bio_sectors;
473 unsigned int recoverable:1;
474 unsigned int write:1;
475 unsigned int read_dirty_data:1;
476 unsigned int cache_missed:1;
477
478 struct hd_struct *part;
479 unsigned long start_time;
480
481 struct btree_op op;
482 struct data_insert_op iop;
483};
484
485static void bch_cache_read_endio(struct bio *bio)
486{
487 struct bbio *b = container_of(bio, struct bbio, bio);
488 struct closure *cl = bio->bi_private;
489 struct search *s = container_of(cl, struct search, cl);
490
491
492
493
494
495
496
497
498 if (bio->bi_status)
499 s->iop.status = bio->bi_status;
500 else if (!KEY_DIRTY(&b->key) &&
501 ptr_stale(s->iop.c, &b->key, 0)) {
502 atomic_long_inc(&s->iop.c->cache_read_races);
503 s->iop.status = BLK_STS_IOERR;
504 }
505
506 bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache");
507}
508
509
510
511
512
513static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
514{
515 struct search *s = container_of(op, struct search, op);
516 struct bio *n, *bio = &s->bio.bio;
517 struct bkey *bio_key;
518 unsigned int ptr;
519
520 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
521 return MAP_CONTINUE;
522
523 if (KEY_INODE(k) != s->iop.inode ||
524 KEY_START(k) > bio->bi_iter.bi_sector) {
525 unsigned int bio_sectors = bio_sectors(bio);
526 unsigned int sectors = KEY_INODE(k) == s->iop.inode
527 ? min_t(uint64_t, INT_MAX,
528 KEY_START(k) - bio->bi_iter.bi_sector)
529 : INT_MAX;
530 int ret = s->d->cache_miss(b, s, bio, sectors);
531
532 if (ret != MAP_CONTINUE)
533 return ret;
534
535
536 BUG_ON(bio_sectors <= sectors);
537 }
538
539 if (!KEY_SIZE(k))
540 return MAP_CONTINUE;
541
542
543 ptr = 0;
544
545 PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
546
547 if (KEY_DIRTY(k))
548 s->read_dirty_data = true;
549
550 n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
551 KEY_OFFSET(k) - bio->bi_iter.bi_sector),
552 GFP_NOIO, &s->d->bio_split);
553
554 bio_key = &container_of(n, struct bbio, bio)->key;
555 bch_bkey_copy_single_ptr(bio_key, k, ptr);
556
557 bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
558 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
559
560 n->bi_end_io = bch_cache_read_endio;
561 n->bi_private = &s->cl;
562
563
564
565
566
567
568
569
570
571
572
573
574 __bch_submit_bbio(n, b->c);
575 return n == bio ? MAP_DONE : MAP_CONTINUE;
576}
577
578static void cache_lookup(struct closure *cl)
579{
580 struct search *s = container_of(cl, struct search, iop.cl);
581 struct bio *bio = &s->bio.bio;
582 struct cached_dev *dc;
583 int ret;
584
585 bch_btree_op_init(&s->op, -1);
586
587 ret = bch_btree_map_keys(&s->op, s->iop.c,
588 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
589 cache_lookup_fn, MAP_END_KEY);
590 if (ret == -EAGAIN) {
591 continue_at(cl, cache_lookup, bcache_wq);
592 return;
593 }
594
595
596
597
598
599
600
601
602
603
604 if (ret < 0) {
605 BUG_ON(ret == -EINTR);
606 if (s->d && s->d->c &&
607 !UUID_FLASH_ONLY(&s->d->c->uuids[s->d->id])) {
608 dc = container_of(s->d, struct cached_dev, disk);
609 if (dc && atomic_read(&dc->has_dirty))
610 s->recoverable = false;
611 }
612 if (!s->iop.status)
613 s->iop.status = BLK_STS_IOERR;
614 }
615
616 closure_return(cl);
617}
618
619
620
621static void request_endio(struct bio *bio)
622{
623 struct closure *cl = bio->bi_private;
624
625 if (bio->bi_status) {
626 struct search *s = container_of(cl, struct search, cl);
627
628 s->iop.status = bio->bi_status;
629
630 s->recoverable = false;
631 }
632
633 bio_put(bio);
634 closure_put(cl);
635}
636
637static void backing_request_endio(struct bio *bio)
638{
639 struct closure *cl = bio->bi_private;
640
641 if (bio->bi_status) {
642 struct search *s = container_of(cl, struct search, cl);
643 struct cached_dev *dc = container_of(s->d,
644 struct cached_dev, disk);
645
646
647
648
649
650
651
652 if (unlikely(s->iop.writeback &&
653 bio->bi_opf & REQ_PREFLUSH)) {
654 pr_err("Can't flush %s: returned bi_status %i\n",
655 dc->backing_dev_name, bio->bi_status);
656 } else {
657
658 s->iop.status = bio->bi_status;
659 }
660 s->recoverable = false;
661
662 bch_count_backing_io_errors(dc, bio);
663 }
664
665 bio_put(bio);
666 closure_put(cl);
667}
668
669static void bio_complete(struct search *s)
670{
671 if (s->orig_bio) {
672
673 part_end_io_acct(s->part, s->orig_bio, s->start_time);
674
675 trace_bcache_request_end(s->d, s->orig_bio);
676 s->orig_bio->bi_status = s->iop.status;
677 bio_endio(s->orig_bio);
678 s->orig_bio = NULL;
679 }
680}
681
682static void do_bio_hook(struct search *s,
683 struct bio *orig_bio,
684 bio_end_io_t *end_io_fn)
685{
686 struct bio *bio = &s->bio.bio;
687
688 bio_init(bio, NULL, 0);
689 __bio_clone_fast(bio, orig_bio);
690
691
692
693
694
695
696 bio->bi_end_io = end_io_fn;
697 bio->bi_private = &s->cl;
698
699 bio_cnt_set(bio, 3);
700}
701
702static void search_free(struct closure *cl)
703{
704 struct search *s = container_of(cl, struct search, cl);
705
706 atomic_dec(&s->iop.c->search_inflight);
707
708 if (s->iop.bio)
709 bio_put(s->iop.bio);
710
711 bio_complete(s);
712 closure_debug_destroy(cl);
713 mempool_free(s, &s->iop.c->search);
714}
715
716static inline struct search *search_alloc(struct bio *bio,
717 struct bcache_device *d)
718{
719 struct search *s;
720
721 s = mempool_alloc(&d->c->search, GFP_NOIO);
722
723 closure_init(&s->cl, NULL);
724 do_bio_hook(s, bio, request_endio);
725 atomic_inc(&d->c->search_inflight);
726
727 s->orig_bio = bio;
728 s->cache_miss = NULL;
729 s->cache_missed = 0;
730 s->d = d;
731 s->recoverable = 1;
732 s->write = op_is_write(bio_op(bio));
733 s->read_dirty_data = 0;
734
735 s->start_time = part_start_io_acct(d->disk, &s->part, bio);
736 s->iop.c = d->c;
737 s->iop.bio = NULL;
738 s->iop.inode = d->id;
739 s->iop.write_point = hash_long((unsigned long) current, 16);
740 s->iop.write_prio = 0;
741 s->iop.status = 0;
742 s->iop.flags = 0;
743 s->iop.flush_journal = op_is_flush(bio->bi_opf);
744 s->iop.wq = bcache_wq;
745
746 return s;
747}
748
749
750
751static void cached_dev_bio_complete(struct closure *cl)
752{
753 struct search *s = container_of(cl, struct search, cl);
754 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
755
756 cached_dev_put(dc);
757 search_free(cl);
758}
759
760
761
762static void cached_dev_read_error_done(struct closure *cl)
763{
764 struct search *s = container_of(cl, struct search, cl);
765
766 if (s->iop.replace_collision)
767 bch_mark_cache_miss_collision(s->iop.c, s->d);
768
769 if (s->iop.bio)
770 bio_free_pages(s->iop.bio);
771
772 cached_dev_bio_complete(cl);
773}
774
775static void cached_dev_read_error(struct closure *cl)
776{
777 struct search *s = container_of(cl, struct search, cl);
778 struct bio *bio = &s->bio.bio;
779
780
781
782
783
784
785
786
787 if (s->recoverable && !s->read_dirty_data) {
788
789 trace_bcache_read_retry(s->orig_bio);
790
791 s->iop.status = 0;
792 do_bio_hook(s, s->orig_bio, backing_request_endio);
793
794
795
796
797 closure_bio_submit(s->iop.c, bio, cl);
798 }
799
800 continue_at(cl, cached_dev_read_error_done, NULL);
801}
802
803static void cached_dev_cache_miss_done(struct closure *cl)
804{
805 struct search *s = container_of(cl, struct search, cl);
806 struct bcache_device *d = s->d;
807
808 if (s->iop.replace_collision)
809 bch_mark_cache_miss_collision(s->iop.c, s->d);
810
811 if (s->iop.bio)
812 bio_free_pages(s->iop.bio);
813
814 cached_dev_bio_complete(cl);
815 closure_put(&d->cl);
816}
817
818static void cached_dev_read_done(struct closure *cl)
819{
820 struct search *s = container_of(cl, struct search, cl);
821 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
822
823
824
825
826
827
828
829
830
831 if (s->iop.bio) {
832 bio_reset(s->iop.bio);
833 s->iop.bio->bi_iter.bi_sector =
834 s->cache_miss->bi_iter.bi_sector;
835 bio_copy_dev(s->iop.bio, s->cache_miss);
836 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
837 bch_bio_map(s->iop.bio, NULL);
838
839 bio_copy_data(s->cache_miss, s->iop.bio);
840
841 bio_put(s->cache_miss);
842 s->cache_miss = NULL;
843 }
844
845 if (verify(dc) && s->recoverable && !s->read_dirty_data)
846 bch_data_verify(dc, s->orig_bio);
847
848 closure_get(&dc->disk.cl);
849 bio_complete(s);
850
851 if (s->iop.bio &&
852 !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
853 BUG_ON(!s->iop.replace);
854 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
855 }
856
857 continue_at(cl, cached_dev_cache_miss_done, NULL);
858}
859
860static void cached_dev_read_done_bh(struct closure *cl)
861{
862 struct search *s = container_of(cl, struct search, cl);
863 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
864
865 bch_mark_cache_accounting(s->iop.c, s->d,
866 !s->cache_missed, s->iop.bypass);
867 trace_bcache_read(s->orig_bio, !s->cache_missed, s->iop.bypass);
868
869 if (s->iop.status)
870 continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
871 else if (s->iop.bio || verify(dc))
872 continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
873 else
874 continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
875}
876
877static int cached_dev_cache_miss(struct btree *b, struct search *s,
878 struct bio *bio, unsigned int sectors)
879{
880 int ret = MAP_CONTINUE;
881 unsigned int reada = 0;
882 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
883 struct bio *miss, *cache_bio;
884
885 s->cache_missed = 1;
886
887 if (s->cache_miss || s->iop.bypass) {
888 miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
889 ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
890 goto out_submit;
891 }
892
893 if (!(bio->bi_opf & REQ_RAHEAD) &&
894 !(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
895 s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
896 reada = min_t(sector_t, dc->readahead >> 9,
897 get_capacity(bio->bi_disk) - bio_end_sector(bio));
898
899 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
900
901 s->iop.replace_key = KEY(s->iop.inode,
902 bio->bi_iter.bi_sector + s->insert_bio_sectors,
903 s->insert_bio_sectors);
904
905 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
906 if (ret)
907 return ret;
908
909 s->iop.replace = true;
910
911 miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
912
913
914 ret = miss == bio ? MAP_DONE : -EINTR;
915
916 cache_bio = bio_alloc_bioset(GFP_NOWAIT,
917 DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
918 &dc->disk.bio_split);
919 if (!cache_bio)
920 goto out_submit;
921
922 cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
923 bio_copy_dev(cache_bio, miss);
924 cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
925
926 cache_bio->bi_end_io = backing_request_endio;
927 cache_bio->bi_private = &s->cl;
928
929 bch_bio_map(cache_bio, NULL);
930 if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
931 goto out_put;
932
933 if (reada)
934 bch_mark_cache_readahead(s->iop.c, s->d);
935
936 s->cache_miss = miss;
937 s->iop.bio = cache_bio;
938 bio_get(cache_bio);
939
940 closure_bio_submit(s->iop.c, cache_bio, &s->cl);
941
942 return ret;
943out_put:
944 bio_put(cache_bio);
945out_submit:
946 miss->bi_end_io = backing_request_endio;
947 miss->bi_private = &s->cl;
948
949 closure_bio_submit(s->iop.c, miss, &s->cl);
950 return ret;
951}
952
953static void cached_dev_read(struct cached_dev *dc, struct search *s)
954{
955 struct closure *cl = &s->cl;
956
957 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
958 continue_at(cl, cached_dev_read_done_bh, NULL);
959}
960
961
962
963static void cached_dev_write_complete(struct closure *cl)
964{
965 struct search *s = container_of(cl, struct search, cl);
966 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
967
968 up_read_non_owner(&dc->writeback_lock);
969 cached_dev_bio_complete(cl);
970}
971
972static void cached_dev_write(struct cached_dev *dc, struct search *s)
973{
974 struct closure *cl = &s->cl;
975 struct bio *bio = &s->bio.bio;
976 struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
977 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
978
979 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
980
981 down_read_non_owner(&dc->writeback_lock);
982 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
983
984
985
986
987 s->iop.bypass = false;
988 s->iop.writeback = true;
989 }
990
991
992
993
994
995
996
997
998 if (bio_op(bio) == REQ_OP_DISCARD)
999 s->iop.bypass = true;
1000
1001 if (should_writeback(dc, s->orig_bio,
1002 cache_mode(dc),
1003 s->iop.bypass)) {
1004 s->iop.bypass = false;
1005 s->iop.writeback = true;
1006 }
1007
1008 if (s->iop.bypass) {
1009 s->iop.bio = s->orig_bio;
1010 bio_get(s->iop.bio);
1011
1012 if (bio_op(bio) == REQ_OP_DISCARD &&
1013 !blk_queue_discard(bdev_get_queue(dc->bdev)))
1014 goto insert_data;
1015
1016
1017 bio->bi_end_io = backing_request_endio;
1018 closure_bio_submit(s->iop.c, bio, cl);
1019
1020 } else if (s->iop.writeback) {
1021 bch_writeback_add(dc);
1022 s->iop.bio = bio;
1023
1024 if (bio->bi_opf & REQ_PREFLUSH) {
1025
1026
1027
1028
1029 struct bio *flush;
1030
1031 flush = bio_alloc_bioset(GFP_NOIO, 0,
1032 &dc->disk.bio_split);
1033 if (!flush) {
1034 s->iop.status = BLK_STS_RESOURCE;
1035 goto insert_data;
1036 }
1037 bio_copy_dev(flush, bio);
1038 flush->bi_end_io = backing_request_endio;
1039 flush->bi_private = cl;
1040 flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
1041
1042 closure_bio_submit(s->iop.c, flush, cl);
1043 }
1044 } else {
1045 s->iop.bio = bio_clone_fast(bio, GFP_NOIO, &dc->disk.bio_split);
1046
1047 bio->bi_end_io = backing_request_endio;
1048 closure_bio_submit(s->iop.c, bio, cl);
1049 }
1050
1051insert_data:
1052 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1053 continue_at(cl, cached_dev_write_complete, NULL);
1054}
1055
1056static void cached_dev_nodata(struct closure *cl)
1057{
1058 struct search *s = container_of(cl, struct search, cl);
1059 struct bio *bio = &s->bio.bio;
1060
1061 if (s->iop.flush_journal)
1062 bch_journal_meta(s->iop.c, cl);
1063
1064
1065 bio->bi_end_io = backing_request_endio;
1066 closure_bio_submit(s->iop.c, bio, cl);
1067
1068 continue_at(cl, cached_dev_bio_complete, NULL);
1069}
1070
1071struct detached_dev_io_private {
1072 struct bcache_device *d;
1073 unsigned long start_time;
1074 bio_end_io_t *bi_end_io;
1075 void *bi_private;
1076 struct hd_struct *part;
1077};
1078
1079static void detached_dev_end_io(struct bio *bio)
1080{
1081 struct detached_dev_io_private *ddip;
1082
1083 ddip = bio->bi_private;
1084 bio->bi_end_io = ddip->bi_end_io;
1085 bio->bi_private = ddip->bi_private;
1086
1087
1088 part_end_io_acct(ddip->part, bio, ddip->start_time);
1089
1090 if (bio->bi_status) {
1091 struct cached_dev *dc = container_of(ddip->d,
1092 struct cached_dev, disk);
1093
1094 bch_count_backing_io_errors(dc, bio);
1095 }
1096
1097 kfree(ddip);
1098 bio->bi_end_io(bio);
1099}
1100
1101static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
1102{
1103 struct detached_dev_io_private *ddip;
1104 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1105
1106
1107
1108
1109
1110
1111 ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
1112 ddip->d = d;
1113
1114 ddip->start_time = part_start_io_acct(d->disk, &ddip->part, bio);
1115 ddip->bi_end_io = bio->bi_end_io;
1116 ddip->bi_private = bio->bi_private;
1117 bio->bi_end_io = detached_dev_end_io;
1118 bio->bi_private = ddip;
1119
1120 if ((bio_op(bio) == REQ_OP_DISCARD) &&
1121 !blk_queue_discard(bdev_get_queue(dc->bdev)))
1122 bio->bi_end_io(bio);
1123 else
1124 submit_bio_noacct(bio);
1125}
1126
1127static void quit_max_writeback_rate(struct cache_set *c,
1128 struct cached_dev *this_dc)
1129{
1130 int i;
1131 struct bcache_device *d;
1132 struct cached_dev *dc;
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143 if (mutex_trylock(&bch_register_lock)) {
1144 for (i = 0; i < c->devices_max_used; i++) {
1145 if (!c->devices[i])
1146 continue;
1147
1148 if (UUID_FLASH_ONLY(&c->uuids[i]))
1149 continue;
1150
1151 d = c->devices[i];
1152 dc = container_of(d, struct cached_dev, disk);
1153
1154
1155
1156
1157
1158 atomic_long_set(&dc->writeback_rate.rate, 1);
1159 }
1160 mutex_unlock(&bch_register_lock);
1161 } else
1162 atomic_long_set(&this_dc->writeback_rate.rate, 1);
1163}
1164
1165
1166
1167blk_qc_t cached_dev_submit_bio(struct bio *bio)
1168{
1169 struct search *s;
1170 struct bcache_device *d = bio->bi_disk->private_data;
1171 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1172 int rw = bio_data_dir(bio);
1173
1174 if (unlikely((d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags)) ||
1175 dc->io_disable)) {
1176 bio->bi_status = BLK_STS_IOERR;
1177 bio_endio(bio);
1178 return BLK_QC_T_NONE;
1179 }
1180
1181 if (likely(d->c)) {
1182 if (atomic_read(&d->c->idle_counter))
1183 atomic_set(&d->c->idle_counter, 0);
1184
1185
1186
1187
1188
1189
1190 if (unlikely(atomic_read(&d->c->at_max_writeback_rate) == 1)) {
1191 atomic_set(&d->c->at_max_writeback_rate, 0);
1192 quit_max_writeback_rate(d->c, dc);
1193 }
1194 }
1195
1196 bio_set_dev(bio, dc->bdev);
1197 bio->bi_iter.bi_sector += dc->sb.data_offset;
1198
1199 if (cached_dev_get(dc)) {
1200 s = search_alloc(bio, d);
1201 trace_bcache_request_start(s->d, bio);
1202
1203 if (!bio->bi_iter.bi_size) {
1204
1205
1206
1207
1208 continue_at_nobarrier(&s->cl,
1209 cached_dev_nodata,
1210 bcache_wq);
1211 } else {
1212 s->iop.bypass = check_should_bypass(dc, bio);
1213
1214 if (rw)
1215 cached_dev_write(dc, s);
1216 else
1217 cached_dev_read(dc, s);
1218 }
1219 } else
1220
1221 detached_dev_do_request(d, bio);
1222
1223 return BLK_QC_T_NONE;
1224}
1225
1226static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
1227 unsigned int cmd, unsigned long arg)
1228{
1229 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1230
1231 if (dc->io_disable)
1232 return -EIO;
1233
1234 return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
1235}
1236
1237void bch_cached_dev_request_init(struct cached_dev *dc)
1238{
1239 dc->disk.cache_miss = cached_dev_cache_miss;
1240 dc->disk.ioctl = cached_dev_ioctl;
1241}
1242
1243
1244
1245static int flash_dev_cache_miss(struct btree *b, struct search *s,
1246 struct bio *bio, unsigned int sectors)
1247{
1248 unsigned int bytes = min(sectors, bio_sectors(bio)) << 9;
1249
1250 swap(bio->bi_iter.bi_size, bytes);
1251 zero_fill_bio(bio);
1252 swap(bio->bi_iter.bi_size, bytes);
1253
1254 bio_advance(bio, bytes);
1255
1256 if (!bio->bi_iter.bi_size)
1257 return MAP_DONE;
1258
1259 return MAP_CONTINUE;
1260}
1261
1262static void flash_dev_nodata(struct closure *cl)
1263{
1264 struct search *s = container_of(cl, struct search, cl);
1265
1266 if (s->iop.flush_journal)
1267 bch_journal_meta(s->iop.c, cl);
1268
1269 continue_at(cl, search_free, NULL);
1270}
1271
1272blk_qc_t flash_dev_submit_bio(struct bio *bio)
1273{
1274 struct search *s;
1275 struct closure *cl;
1276 struct bcache_device *d = bio->bi_disk->private_data;
1277
1278 if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) {
1279 bio->bi_status = BLK_STS_IOERR;
1280 bio_endio(bio);
1281 return BLK_QC_T_NONE;
1282 }
1283
1284 s = search_alloc(bio, d);
1285 cl = &s->cl;
1286 bio = &s->bio.bio;
1287
1288 trace_bcache_request_start(s->d, bio);
1289
1290 if (!bio->bi_iter.bi_size) {
1291
1292
1293
1294 continue_at_nobarrier(&s->cl,
1295 flash_dev_nodata,
1296 bcache_wq);
1297 return BLK_QC_T_NONE;
1298 } else if (bio_data_dir(bio)) {
1299 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1300 &KEY(d->id, bio->bi_iter.bi_sector, 0),
1301 &KEY(d->id, bio_end_sector(bio), 0));
1302
1303 s->iop.bypass = (bio_op(bio) == REQ_OP_DISCARD) != 0;
1304 s->iop.writeback = true;
1305 s->iop.bio = bio;
1306
1307 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1308 } else {
1309 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
1310 }
1311
1312 continue_at(cl, search_free, NULL);
1313 return BLK_QC_T_NONE;
1314}
1315
1316static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1317 unsigned int cmd, unsigned long arg)
1318{
1319 return -ENOTTY;
1320}
1321
1322void bch_flash_dev_request_init(struct bcache_device *d)
1323{
1324 d->cache_miss = flash_dev_cache_miss;
1325 d->ioctl = flash_dev_ioctl;
1326}
1327
1328void bch_request_exit(void)
1329{
1330 kmem_cache_destroy(bch_search_cache);
1331}
1332
1333int __init bch_request_init(void)
1334{
1335 bch_search_cache = KMEM_CACHE(search, 0);
1336 if (!bch_search_cache)
1337 return -ENOMEM;
1338
1339 return 0;
1340}
1341