linux/drivers/md/bcache/request.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Main bcache entry point - handle a read or a write request and decide what to
   4 * do with it; the make_request functions are called by the block layer.
   5 *
   6 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
   7 * Copyright 2012 Google, Inc.
   8 */
   9
  10#include "bcache.h"
  11#include "btree.h"
  12#include "debug.h"
  13#include "request.h"
  14#include "writeback.h"
  15
  16#include <linux/module.h>
  17#include <linux/hash.h>
  18#include <linux/random.h>
  19#include <linux/backing-dev.h>
  20
  21#include <trace/events/bcache.h>
  22
  23#define CUTOFF_CACHE_ADD        95
  24#define CUTOFF_CACHE_READA      90
  25
  26struct kmem_cache *bch_search_cache;
  27
  28static void bch_data_insert_start(struct closure *cl);
  29
  30static unsigned int cache_mode(struct cached_dev *dc)
  31{
  32        return BDEV_CACHE_MODE(&dc->sb);
  33}
  34
  35static bool verify(struct cached_dev *dc)
  36{
  37        return dc->verify;
  38}
  39
  40static void bio_csum(struct bio *bio, struct bkey *k)
  41{
  42        struct bio_vec bv;
  43        struct bvec_iter iter;
  44        uint64_t csum = 0;
  45
  46        bio_for_each_segment(bv, bio, iter) {
  47                void *d = kmap(bv.bv_page) + bv.bv_offset;
  48
  49                csum = bch_crc64_update(csum, d, bv.bv_len);
  50                kunmap(bv.bv_page);
  51        }
  52
  53        k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
  54}
  55
  56/* Insert data into cache */
  57
  58static void bch_data_insert_keys(struct closure *cl)
  59{
  60        struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
  61        atomic_t *journal_ref = NULL;
  62        struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
  63        int ret;
  64
  65        if (!op->replace)
  66                journal_ref = bch_journal(op->c, &op->insert_keys,
  67                                          op->flush_journal ? cl : NULL);
  68
  69        ret = bch_btree_insert(op->c, &op->insert_keys,
  70                               journal_ref, replace_key);
  71        if (ret == -ESRCH) {
  72                op->replace_collision = true;
  73        } else if (ret) {
  74                op->status              = BLK_STS_RESOURCE;
  75                op->insert_data_done    = true;
  76        }
  77
  78        if (journal_ref)
  79                atomic_dec_bug(journal_ref);
  80
  81        if (!op->insert_data_done) {
  82                continue_at(cl, bch_data_insert_start, op->wq);
  83                return;
  84        }
  85
  86        bch_keylist_free(&op->insert_keys);
  87        closure_return(cl);
  88}
  89
  90static int bch_keylist_realloc(struct keylist *l, unsigned int u64s,
  91                               struct cache_set *c)
  92{
  93        size_t oldsize = bch_keylist_nkeys(l);
  94        size_t newsize = oldsize + u64s;
  95
  96        /*
  97         * The journalling code doesn't handle the case where the keys to insert
  98         * is bigger than an empty write: If we just return -ENOMEM here,
  99         * bch_data_insert_keys() will insert the keys created so far
 100         * and finish the rest when the keylist is empty.
 101         */
 102        if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
 103                return -ENOMEM;
 104
 105        return __bch_keylist_realloc(l, u64s);
 106}
 107
 108static void bch_data_invalidate(struct closure *cl)
 109{
 110        struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 111        struct bio *bio = op->bio;
 112
 113        pr_debug("invalidating %i sectors from %llu",
 114                 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
 115
 116        while (bio_sectors(bio)) {
 117                unsigned int sectors = min(bio_sectors(bio),
 118                                       1U << (KEY_SIZE_BITS - 1));
 119
 120                if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
 121                        goto out;
 122
 123                bio->bi_iter.bi_sector  += sectors;
 124                bio->bi_iter.bi_size    -= sectors << 9;
 125
 126                bch_keylist_add(&op->insert_keys,
 127                                &KEY(op->inode,
 128                                     bio->bi_iter.bi_sector,
 129                                     sectors));
 130        }
 131
 132        op->insert_data_done = true;
 133        /* get in bch_data_insert() */
 134        bio_put(bio);
 135out:
 136        continue_at(cl, bch_data_insert_keys, op->wq);
 137}
 138
 139static void bch_data_insert_error(struct closure *cl)
 140{
 141        struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 142
 143        /*
 144         * Our data write just errored, which means we've got a bunch of keys to
 145         * insert that point to data that wasn't successfully written.
 146         *
 147         * We don't have to insert those keys but we still have to invalidate
 148         * that region of the cache - so, if we just strip off all the pointers
 149         * from the keys we'll accomplish just that.
 150         */
 151
 152        struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
 153
 154        while (src != op->insert_keys.top) {
 155                struct bkey *n = bkey_next(src);
 156
 157                SET_KEY_PTRS(src, 0);
 158                memmove(dst, src, bkey_bytes(src));
 159
 160                dst = bkey_next(dst);
 161                src = n;
 162        }
 163
 164        op->insert_keys.top = dst;
 165
 166        bch_data_insert_keys(cl);
 167}
 168
 169static void bch_data_insert_endio(struct bio *bio)
 170{
 171        struct closure *cl = bio->bi_private;
 172        struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 173
 174        if (bio->bi_status) {
 175                /* TODO: We could try to recover from this. */
 176                if (op->writeback)
 177                        op->status = bio->bi_status;
 178                else if (!op->replace)
 179                        set_closure_fn(cl, bch_data_insert_error, op->wq);
 180                else
 181                        set_closure_fn(cl, NULL, NULL);
 182        }
 183
 184        bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
 185}
 186
 187static void bch_data_insert_start(struct closure *cl)
 188{
 189        struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 190        struct bio *bio = op->bio, *n;
 191
 192        if (op->bypass)
 193                return bch_data_invalidate(cl);
 194
 195        if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
 196                wake_up_gc(op->c);
 197
 198        /*
 199         * Journal writes are marked REQ_PREFLUSH; if the original write was a
 200         * flush, it'll wait on the journal write.
 201         */
 202        bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
 203
 204        do {
 205                unsigned int i;
 206                struct bkey *k;
 207                struct bio_set *split = &op->c->bio_split;
 208
 209                /* 1 for the device pointer and 1 for the chksum */
 210                if (bch_keylist_realloc(&op->insert_keys,
 211                                        3 + (op->csum ? 1 : 0),
 212                                        op->c)) {
 213                        continue_at(cl, bch_data_insert_keys, op->wq);
 214                        return;
 215                }
 216
 217                k = op->insert_keys.top;
 218                bkey_init(k);
 219                SET_KEY_INODE(k, op->inode);
 220                SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
 221
 222                if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
 223                                       op->write_point, op->write_prio,
 224                                       op->writeback))
 225                        goto err;
 226
 227                n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
 228
 229                n->bi_end_io    = bch_data_insert_endio;
 230                n->bi_private   = cl;
 231
 232                if (op->writeback) {
 233                        SET_KEY_DIRTY(k, true);
 234
 235                        for (i = 0; i < KEY_PTRS(k); i++)
 236                                SET_GC_MARK(PTR_BUCKET(op->c, k, i),
 237                                            GC_MARK_DIRTY);
 238                }
 239
 240                SET_KEY_CSUM(k, op->csum);
 241                if (KEY_CSUM(k))
 242                        bio_csum(n, k);
 243
 244                trace_bcache_cache_insert(k);
 245                bch_keylist_push(&op->insert_keys);
 246
 247                bio_set_op_attrs(n, REQ_OP_WRITE, 0);
 248                bch_submit_bbio(n, op->c, k, 0);
 249        } while (n != bio);
 250
 251        op->insert_data_done = true;
 252        continue_at(cl, bch_data_insert_keys, op->wq);
 253        return;
 254err:
 255        /* bch_alloc_sectors() blocks if s->writeback = true */
 256        BUG_ON(op->writeback);
 257
 258        /*
 259         * But if it's not a writeback write we'd rather just bail out if
 260         * there aren't any buckets ready to write to - it might take awhile and
 261         * we might be starving btree writes for gc or something.
 262         */
 263
 264        if (!op->replace) {
 265                /*
 266                 * Writethrough write: We can't complete the write until we've
 267                 * updated the index. But we don't want to delay the write while
 268                 * we wait for buckets to be freed up, so just invalidate the
 269                 * rest of the write.
 270                 */
 271                op->bypass = true;
 272                return bch_data_invalidate(cl);
 273        } else {
 274                /*
 275                 * From a cache miss, we can just insert the keys for the data
 276                 * we have written or bail out if we didn't do anything.
 277                 */
 278                op->insert_data_done = true;
 279                bio_put(bio);
 280
 281                if (!bch_keylist_empty(&op->insert_keys))
 282                        continue_at(cl, bch_data_insert_keys, op->wq);
 283                else
 284                        closure_return(cl);
 285        }
 286}
 287
 288/**
 289 * bch_data_insert - stick some data in the cache
 290 * @cl: closure pointer.
 291 *
 292 * This is the starting point for any data to end up in a cache device; it could
 293 * be from a normal write, or a writeback write, or a write to a flash only
 294 * volume - it's also used by the moving garbage collector to compact data in
 295 * mostly empty buckets.
 296 *
 297 * It first writes the data to the cache, creating a list of keys to be inserted
 298 * (if the data had to be fragmented there will be multiple keys); after the
 299 * data is written it calls bch_journal, and after the keys have been added to
 300 * the next journal write they're inserted into the btree.
 301 *
 302 * It inserts the data in op->bio; bi_sector is used for the key offset,
 303 * and op->inode is used for the key inode.
 304 *
 305 * If op->bypass is true, instead of inserting the data it invalidates the
 306 * region of the cache represented by op->bio and op->inode.
 307 */
 308void bch_data_insert(struct closure *cl)
 309{
 310        struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 311
 312        trace_bcache_write(op->c, op->inode, op->bio,
 313                           op->writeback, op->bypass);
 314
 315        bch_keylist_init(&op->insert_keys);
 316        bio_get(op->bio);
 317        bch_data_insert_start(cl);
 318}
 319
 320/*
 321 * Congested?  Return 0 (not congested) or the limit (in sectors)
 322 * beyond which we should bypass the cache due to congestion.
 323 */
 324unsigned int bch_get_congested(const struct cache_set *c)
 325{
 326        int i;
 327
 328        if (!c->congested_read_threshold_us &&
 329            !c->congested_write_threshold_us)
 330                return 0;
 331
 332        i = (local_clock_us() - c->congested_last_us) / 1024;
 333        if (i < 0)
 334                return 0;
 335
 336        i += atomic_read(&c->congested);
 337        if (i >= 0)
 338                return 0;
 339
 340        i += CONGESTED_MAX;
 341
 342        if (i > 0)
 343                i = fract_exp_two(i, 6);
 344
 345        i -= hweight32(get_random_u32());
 346
 347        return i > 0 ? i : 1;
 348}
 349
 350static void add_sequential(struct task_struct *t)
 351{
 352        ewma_add(t->sequential_io_avg,
 353                 t->sequential_io, 8, 0);
 354
 355        t->sequential_io = 0;
 356}
 357
 358static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
 359{
 360        return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
 361}
 362
 363static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
 364{
 365        struct cache_set *c = dc->disk.c;
 366        unsigned int mode = cache_mode(dc);
 367        unsigned int sectors, congested;
 368        struct task_struct *task = current;
 369        struct io *i;
 370
 371        if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
 372            c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
 373            (bio_op(bio) == REQ_OP_DISCARD))
 374                goto skip;
 375
 376        if (mode == CACHE_MODE_NONE ||
 377            (mode == CACHE_MODE_WRITEAROUND &&
 378             op_is_write(bio_op(bio))))
 379                goto skip;
 380
 381        /*
 382         * If the bio is for read-ahead or background IO, bypass it or
 383         * not depends on the following situations,
 384         * - If the IO is for meta data, always cache it and no bypass
 385         * - If the IO is not meta data, check dc->cache_reada_policy,
 386         *      BCH_CACHE_READA_ALL: cache it and not bypass
 387         *      BCH_CACHE_READA_META_ONLY: not cache it and bypass
 388         * That is, read-ahead request for metadata always get cached
 389         * (eg, for gfs2 or xfs).
 390         */
 391        if ((bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND))) {
 392                if (!(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
 393                    (dc->cache_readahead_policy != BCH_CACHE_READA_ALL))
 394                        goto skip;
 395        }
 396
 397        if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
 398            bio_sectors(bio) & (c->sb.block_size - 1)) {
 399                pr_debug("skipping unaligned io");
 400                goto skip;
 401        }
 402
 403        if (bypass_torture_test(dc)) {
 404                if ((get_random_int() & 3) == 3)
 405                        goto skip;
 406                else
 407                        goto rescale;
 408        }
 409
 410        congested = bch_get_congested(c);
 411        if (!congested && !dc->sequential_cutoff)
 412                goto rescale;
 413
 414        spin_lock(&dc->io_lock);
 415
 416        hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
 417                if (i->last == bio->bi_iter.bi_sector &&
 418                    time_before(jiffies, i->jiffies))
 419                        goto found;
 420
 421        i = list_first_entry(&dc->io_lru, struct io, lru);
 422
 423        add_sequential(task);
 424        i->sequential = 0;
 425found:
 426        if (i->sequential + bio->bi_iter.bi_size > i->sequential)
 427                i->sequential   += bio->bi_iter.bi_size;
 428
 429        i->last                  = bio_end_sector(bio);
 430        i->jiffies               = jiffies + msecs_to_jiffies(5000);
 431        task->sequential_io      = i->sequential;
 432
 433        hlist_del(&i->hash);
 434        hlist_add_head(&i->hash, iohash(dc, i->last));
 435        list_move_tail(&i->lru, &dc->io_lru);
 436
 437        spin_unlock(&dc->io_lock);
 438
 439        sectors = max(task->sequential_io,
 440                      task->sequential_io_avg) >> 9;
 441
 442        if (dc->sequential_cutoff &&
 443            sectors >= dc->sequential_cutoff >> 9) {
 444                trace_bcache_bypass_sequential(bio);
 445                goto skip;
 446        }
 447
 448        if (congested && sectors >= congested) {
 449                trace_bcache_bypass_congested(bio);
 450                goto skip;
 451        }
 452
 453rescale:
 454        bch_rescale_priorities(c, bio_sectors(bio));
 455        return false;
 456skip:
 457        bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
 458        return true;
 459}
 460
 461/* Cache lookup */
 462
 463struct search {
 464        /* Stack frame for bio_complete */
 465        struct closure          cl;
 466
 467        struct bbio             bio;
 468        struct bio              *orig_bio;
 469        struct bio              *cache_miss;
 470        struct bcache_device    *d;
 471
 472        unsigned int            insert_bio_sectors;
 473        unsigned int            recoverable:1;
 474        unsigned int            write:1;
 475        unsigned int            read_dirty_data:1;
 476        unsigned int            cache_missed:1;
 477
 478        unsigned long           start_time;
 479
 480        struct btree_op         op;
 481        struct data_insert_op   iop;
 482};
 483
 484static void bch_cache_read_endio(struct bio *bio)
 485{
 486        struct bbio *b = container_of(bio, struct bbio, bio);
 487        struct closure *cl = bio->bi_private;
 488        struct search *s = container_of(cl, struct search, cl);
 489
 490        /*
 491         * If the bucket was reused while our bio was in flight, we might have
 492         * read the wrong data. Set s->error but not error so it doesn't get
 493         * counted against the cache device, but we'll still reread the data
 494         * from the backing device.
 495         */
 496
 497        if (bio->bi_status)
 498                s->iop.status = bio->bi_status;
 499        else if (!KEY_DIRTY(&b->key) &&
 500                 ptr_stale(s->iop.c, &b->key, 0)) {
 501                atomic_long_inc(&s->iop.c->cache_read_races);
 502                s->iop.status = BLK_STS_IOERR;
 503        }
 504
 505        bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache");
 506}
 507
 508/*
 509 * Read from a single key, handling the initial cache miss if the key starts in
 510 * the middle of the bio
 511 */
 512static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
 513{
 514        struct search *s = container_of(op, struct search, op);
 515        struct bio *n, *bio = &s->bio.bio;
 516        struct bkey *bio_key;
 517        unsigned int ptr;
 518
 519        if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
 520                return MAP_CONTINUE;
 521
 522        if (KEY_INODE(k) != s->iop.inode ||
 523            KEY_START(k) > bio->bi_iter.bi_sector) {
 524                unsigned int bio_sectors = bio_sectors(bio);
 525                unsigned int sectors = KEY_INODE(k) == s->iop.inode
 526                        ? min_t(uint64_t, INT_MAX,
 527                                KEY_START(k) - bio->bi_iter.bi_sector)
 528                        : INT_MAX;
 529                int ret = s->d->cache_miss(b, s, bio, sectors);
 530
 531                if (ret != MAP_CONTINUE)
 532                        return ret;
 533
 534                /* if this was a complete miss we shouldn't get here */
 535                BUG_ON(bio_sectors <= sectors);
 536        }
 537
 538        if (!KEY_SIZE(k))
 539                return MAP_CONTINUE;
 540
 541        /* XXX: figure out best pointer - for multiple cache devices */
 542        ptr = 0;
 543
 544        PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
 545
 546        if (KEY_DIRTY(k))
 547                s->read_dirty_data = true;
 548
 549        n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
 550                                      KEY_OFFSET(k) - bio->bi_iter.bi_sector),
 551                           GFP_NOIO, &s->d->bio_split);
 552
 553        bio_key = &container_of(n, struct bbio, bio)->key;
 554        bch_bkey_copy_single_ptr(bio_key, k, ptr);
 555
 556        bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
 557        bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
 558
 559        n->bi_end_io    = bch_cache_read_endio;
 560        n->bi_private   = &s->cl;
 561
 562        /*
 563         * The bucket we're reading from might be reused while our bio
 564         * is in flight, and we could then end up reading the wrong
 565         * data.
 566         *
 567         * We guard against this by checking (in cache_read_endio()) if
 568         * the pointer is stale again; if so, we treat it as an error
 569         * and reread from the backing device (but we don't pass that
 570         * error up anywhere).
 571         */
 572
 573        __bch_submit_bbio(n, b->c);
 574        return n == bio ? MAP_DONE : MAP_CONTINUE;
 575}
 576
 577static void cache_lookup(struct closure *cl)
 578{
 579        struct search *s = container_of(cl, struct search, iop.cl);
 580        struct bio *bio = &s->bio.bio;
 581        struct cached_dev *dc;
 582        int ret;
 583
 584        bch_btree_op_init(&s->op, -1);
 585
 586        ret = bch_btree_map_keys(&s->op, s->iop.c,
 587                                 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
 588                                 cache_lookup_fn, MAP_END_KEY);
 589        if (ret == -EAGAIN) {
 590                continue_at(cl, cache_lookup, bcache_wq);
 591                return;
 592        }
 593
 594        /*
 595         * We might meet err when searching the btree, If that happens, we will
 596         * get negative ret, in this scenario we should not recover data from
 597         * backing device (when cache device is dirty) because we don't know
 598         * whether bkeys the read request covered are all clean.
 599         *
 600         * And after that happened, s->iop.status is still its initial value
 601         * before we submit s->bio.bio
 602         */
 603        if (ret < 0) {
 604                BUG_ON(ret == -EINTR);
 605                if (s->d && s->d->c &&
 606                                !UUID_FLASH_ONLY(&s->d->c->uuids[s->d->id])) {
 607                        dc = container_of(s->d, struct cached_dev, disk);
 608                        if (dc && atomic_read(&dc->has_dirty))
 609                                s->recoverable = false;
 610                }
 611                if (!s->iop.status)
 612                        s->iop.status = BLK_STS_IOERR;
 613        }
 614
 615        closure_return(cl);
 616}
 617
 618/* Common code for the make_request functions */
 619
 620static void request_endio(struct bio *bio)
 621{
 622        struct closure *cl = bio->bi_private;
 623
 624        if (bio->bi_status) {
 625                struct search *s = container_of(cl, struct search, cl);
 626
 627                s->iop.status = bio->bi_status;
 628                /* Only cache read errors are recoverable */
 629                s->recoverable = false;
 630        }
 631
 632        bio_put(bio);
 633        closure_put(cl);
 634}
 635
 636static void backing_request_endio(struct bio *bio)
 637{
 638        struct closure *cl = bio->bi_private;
 639
 640        if (bio->bi_status) {
 641                struct search *s = container_of(cl, struct search, cl);
 642                struct cached_dev *dc = container_of(s->d,
 643                                                     struct cached_dev, disk);
 644                /*
 645                 * If a bio has REQ_PREFLUSH for writeback mode, it is
 646                 * speically assembled in cached_dev_write() for a non-zero
 647                 * write request which has REQ_PREFLUSH. we don't set
 648                 * s->iop.status by this failure, the status will be decided
 649                 * by result of bch_data_insert() operation.
 650                 */
 651                if (unlikely(s->iop.writeback &&
 652                             bio->bi_opf & REQ_PREFLUSH)) {
 653                        pr_err("Can't flush %s: returned bi_status %i",
 654                                dc->backing_dev_name, bio->bi_status);
 655                } else {
 656                        /* set to orig_bio->bi_status in bio_complete() */
 657                        s->iop.status = bio->bi_status;
 658                }
 659                s->recoverable = false;
 660                /* should count I/O error for backing device here */
 661                bch_count_backing_io_errors(dc, bio);
 662        }
 663
 664        bio_put(bio);
 665        closure_put(cl);
 666}
 667
 668static void bio_complete(struct search *s)
 669{
 670        if (s->orig_bio) {
 671                generic_end_io_acct(s->d->disk->queue, bio_op(s->orig_bio),
 672                                    &s->d->disk->part0, s->start_time);
 673
 674                trace_bcache_request_end(s->d, s->orig_bio);
 675                s->orig_bio->bi_status = s->iop.status;
 676                bio_endio(s->orig_bio);
 677                s->orig_bio = NULL;
 678        }
 679}
 680
 681static void do_bio_hook(struct search *s,
 682                        struct bio *orig_bio,
 683                        bio_end_io_t *end_io_fn)
 684{
 685        struct bio *bio = &s->bio.bio;
 686
 687        bio_init(bio, NULL, 0);
 688        __bio_clone_fast(bio, orig_bio);
 689        /*
 690         * bi_end_io can be set separately somewhere else, e.g. the
 691         * variants in,
 692         * - cache_bio->bi_end_io from cached_dev_cache_miss()
 693         * - n->bi_end_io from cache_lookup_fn()
 694         */
 695        bio->bi_end_io          = end_io_fn;
 696        bio->bi_private         = &s->cl;
 697
 698        bio_cnt_set(bio, 3);
 699}
 700
 701static void search_free(struct closure *cl)
 702{
 703        struct search *s = container_of(cl, struct search, cl);
 704
 705        atomic_dec(&s->iop.c->search_inflight);
 706
 707        if (s->iop.bio)
 708                bio_put(s->iop.bio);
 709
 710        bio_complete(s);
 711        closure_debug_destroy(cl);
 712        mempool_free(s, &s->iop.c->search);
 713}
 714
 715static inline struct search *search_alloc(struct bio *bio,
 716                                          struct bcache_device *d)
 717{
 718        struct search *s;
 719
 720        s = mempool_alloc(&d->c->search, GFP_NOIO);
 721
 722        closure_init(&s->cl, NULL);
 723        do_bio_hook(s, bio, request_endio);
 724        atomic_inc(&d->c->search_inflight);
 725
 726        s->orig_bio             = bio;
 727        s->cache_miss           = NULL;
 728        s->cache_missed         = 0;
 729        s->d                    = d;
 730        s->recoverable          = 1;
 731        s->write                = op_is_write(bio_op(bio));
 732        s->read_dirty_data      = 0;
 733        s->start_time           = jiffies;
 734
 735        s->iop.c                = d->c;
 736        s->iop.bio              = NULL;
 737        s->iop.inode            = d->id;
 738        s->iop.write_point      = hash_long((unsigned long) current, 16);
 739        s->iop.write_prio       = 0;
 740        s->iop.status           = 0;
 741        s->iop.flags            = 0;
 742        s->iop.flush_journal    = op_is_flush(bio->bi_opf);
 743        s->iop.wq               = bcache_wq;
 744
 745        return s;
 746}
 747
 748/* Cached devices */
 749
 750static void cached_dev_bio_complete(struct closure *cl)
 751{
 752        struct search *s = container_of(cl, struct search, cl);
 753        struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 754
 755        cached_dev_put(dc);
 756        search_free(cl);
 757}
 758
 759/* Process reads */
 760
 761static void cached_dev_read_error_done(struct closure *cl)
 762{
 763        struct search *s = container_of(cl, struct search, cl);
 764
 765        if (s->iop.replace_collision)
 766                bch_mark_cache_miss_collision(s->iop.c, s->d);
 767
 768        if (s->iop.bio)
 769                bio_free_pages(s->iop.bio);
 770
 771        cached_dev_bio_complete(cl);
 772}
 773
 774static void cached_dev_read_error(struct closure *cl)
 775{
 776        struct search *s = container_of(cl, struct search, cl);
 777        struct bio *bio = &s->bio.bio;
 778
 779        /*
 780         * If read request hit dirty data (s->read_dirty_data is true),
 781         * then recovery a failed read request from cached device may
 782         * get a stale data back. So read failure recovery is only
 783         * permitted when read request hit clean data in cache device,
 784         * or when cache read race happened.
 785         */
 786        if (s->recoverable && !s->read_dirty_data) {
 787                /* Retry from the backing device: */
 788                trace_bcache_read_retry(s->orig_bio);
 789
 790                s->iop.status = 0;
 791                do_bio_hook(s, s->orig_bio, backing_request_endio);
 792
 793                /* XXX: invalidate cache */
 794
 795                /* I/O request sent to backing device */
 796                closure_bio_submit(s->iop.c, bio, cl);
 797        }
 798
 799        continue_at(cl, cached_dev_read_error_done, NULL);
 800}
 801
 802static void cached_dev_cache_miss_done(struct closure *cl)
 803{
 804        struct search *s = container_of(cl, struct search, cl);
 805        struct bcache_device *d = s->d;
 806
 807        if (s->iop.replace_collision)
 808                bch_mark_cache_miss_collision(s->iop.c, s->d);
 809
 810        if (s->iop.bio)
 811                bio_free_pages(s->iop.bio);
 812
 813        cached_dev_bio_complete(cl);
 814        closure_put(&d->cl);
 815}
 816
 817static void cached_dev_read_done(struct closure *cl)
 818{
 819        struct search *s = container_of(cl, struct search, cl);
 820        struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 821
 822        /*
 823         * We had a cache miss; cache_bio now contains data ready to be inserted
 824         * into the cache.
 825         *
 826         * First, we copy the data we just read from cache_bio's bounce buffers
 827         * to the buffers the original bio pointed to:
 828         */
 829
 830        if (s->iop.bio) {
 831                bio_reset(s->iop.bio);
 832                s->iop.bio->bi_iter.bi_sector =
 833                        s->cache_miss->bi_iter.bi_sector;
 834                bio_copy_dev(s->iop.bio, s->cache_miss);
 835                s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
 836                bch_bio_map(s->iop.bio, NULL);
 837
 838                bio_copy_data(s->cache_miss, s->iop.bio);
 839
 840                bio_put(s->cache_miss);
 841                s->cache_miss = NULL;
 842        }
 843
 844        if (verify(dc) && s->recoverable && !s->read_dirty_data)
 845                bch_data_verify(dc, s->orig_bio);
 846
 847        closure_get(&dc->disk.cl);
 848        bio_complete(s);
 849
 850        if (s->iop.bio &&
 851            !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
 852                BUG_ON(!s->iop.replace);
 853                closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
 854        }
 855
 856        continue_at(cl, cached_dev_cache_miss_done, NULL);
 857}
 858
 859static void cached_dev_read_done_bh(struct closure *cl)
 860{
 861        struct search *s = container_of(cl, struct search, cl);
 862        struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 863
 864        bch_mark_cache_accounting(s->iop.c, s->d,
 865                                  !s->cache_missed, s->iop.bypass);
 866        trace_bcache_read(s->orig_bio, !s->cache_missed, s->iop.bypass);
 867
 868        if (s->iop.status)
 869                continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
 870        else if (s->iop.bio || verify(dc))
 871                continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
 872        else
 873                continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
 874}
 875
 876static int cached_dev_cache_miss(struct btree *b, struct search *s,
 877                                 struct bio *bio, unsigned int sectors)
 878{
 879        int ret = MAP_CONTINUE;
 880        unsigned int reada = 0;
 881        struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 882        struct bio *miss, *cache_bio;
 883
 884        s->cache_missed = 1;
 885
 886        if (s->cache_miss || s->iop.bypass) {
 887                miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
 888                ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
 889                goto out_submit;
 890        }
 891
 892        if (!(bio->bi_opf & REQ_RAHEAD) &&
 893            !(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
 894            s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
 895                reada = min_t(sector_t, dc->readahead >> 9,
 896                              get_capacity(bio->bi_disk) - bio_end_sector(bio));
 897
 898        s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
 899
 900        s->iop.replace_key = KEY(s->iop.inode,
 901                                 bio->bi_iter.bi_sector + s->insert_bio_sectors,
 902                                 s->insert_bio_sectors);
 903
 904        ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
 905        if (ret)
 906                return ret;
 907
 908        s->iop.replace = true;
 909
 910        miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
 911
 912        /* btree_search_recurse()'s btree iterator is no good anymore */
 913        ret = miss == bio ? MAP_DONE : -EINTR;
 914
 915        cache_bio = bio_alloc_bioset(GFP_NOWAIT,
 916                        DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
 917                        &dc->disk.bio_split);
 918        if (!cache_bio)
 919                goto out_submit;
 920
 921        cache_bio->bi_iter.bi_sector    = miss->bi_iter.bi_sector;
 922        bio_copy_dev(cache_bio, miss);
 923        cache_bio->bi_iter.bi_size      = s->insert_bio_sectors << 9;
 924
 925        cache_bio->bi_end_io    = backing_request_endio;
 926        cache_bio->bi_private   = &s->cl;
 927
 928        bch_bio_map(cache_bio, NULL);
 929        if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
 930                goto out_put;
 931
 932        if (reada)
 933                bch_mark_cache_readahead(s->iop.c, s->d);
 934
 935        s->cache_miss   = miss;
 936        s->iop.bio      = cache_bio;
 937        bio_get(cache_bio);
 938        /* I/O request sent to backing device */
 939        closure_bio_submit(s->iop.c, cache_bio, &s->cl);
 940
 941        return ret;
 942out_put:
 943        bio_put(cache_bio);
 944out_submit:
 945        miss->bi_end_io         = backing_request_endio;
 946        miss->bi_private        = &s->cl;
 947        /* I/O request sent to backing device */
 948        closure_bio_submit(s->iop.c, miss, &s->cl);
 949        return ret;
 950}
 951
 952static void cached_dev_read(struct cached_dev *dc, struct search *s)
 953{
 954        struct closure *cl = &s->cl;
 955
 956        closure_call(&s->iop.cl, cache_lookup, NULL, cl);
 957        continue_at(cl, cached_dev_read_done_bh, NULL);
 958}
 959
 960/* Process writes */
 961
 962static void cached_dev_write_complete(struct closure *cl)
 963{
 964        struct search *s = container_of(cl, struct search, cl);
 965        struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 966
 967        up_read_non_owner(&dc->writeback_lock);
 968        cached_dev_bio_complete(cl);
 969}
 970
 971static void cached_dev_write(struct cached_dev *dc, struct search *s)
 972{
 973        struct closure *cl = &s->cl;
 974        struct bio *bio = &s->bio.bio;
 975        struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
 976        struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
 977
 978        bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
 979
 980        down_read_non_owner(&dc->writeback_lock);
 981        if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
 982                /*
 983                 * We overlap with some dirty data undergoing background
 984                 * writeback, force this write to writeback
 985                 */
 986                s->iop.bypass = false;
 987                s->iop.writeback = true;
 988        }
 989
 990        /*
 991         * Discards aren't _required_ to do anything, so skipping if
 992         * check_overlapping returned true is ok
 993         *
 994         * But check_overlapping drops dirty keys for which io hasn't started,
 995         * so we still want to call it.
 996         */
 997        if (bio_op(bio) == REQ_OP_DISCARD)
 998                s->iop.bypass = true;
 999
1000        if (should_writeback(dc, s->orig_bio,
1001                             cache_mode(dc),
1002                             s->iop.bypass)) {
1003                s->iop.bypass = false;
1004                s->iop.writeback = true;
1005        }
1006
1007        if (s->iop.bypass) {
1008                s->iop.bio = s->orig_bio;
1009                bio_get(s->iop.bio);
1010
1011                if (bio_op(bio) == REQ_OP_DISCARD &&
1012                    !blk_queue_discard(bdev_get_queue(dc->bdev)))
1013                        goto insert_data;
1014
1015                /* I/O request sent to backing device */
1016                bio->bi_end_io = backing_request_endio;
1017                closure_bio_submit(s->iop.c, bio, cl);
1018
1019        } else if (s->iop.writeback) {
1020                bch_writeback_add(dc);
1021                s->iop.bio = bio;
1022
1023                if (bio->bi_opf & REQ_PREFLUSH) {
1024                        /*
1025                         * Also need to send a flush to the backing
1026                         * device.
1027                         */
1028                        struct bio *flush;
1029
1030                        flush = bio_alloc_bioset(GFP_NOIO, 0,
1031                                                 &dc->disk.bio_split);
1032                        if (!flush) {
1033                                s->iop.status = BLK_STS_RESOURCE;
1034                                goto insert_data;
1035                        }
1036                        bio_copy_dev(flush, bio);
1037                        flush->bi_end_io = backing_request_endio;
1038                        flush->bi_private = cl;
1039                        flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
1040                        /* I/O request sent to backing device */
1041                        closure_bio_submit(s->iop.c, flush, cl);
1042                }
1043        } else {
1044                s->iop.bio = bio_clone_fast(bio, GFP_NOIO, &dc->disk.bio_split);
1045                /* I/O request sent to backing device */
1046                bio->bi_end_io = backing_request_endio;
1047                closure_bio_submit(s->iop.c, bio, cl);
1048        }
1049
1050insert_data:
1051        closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1052        continue_at(cl, cached_dev_write_complete, NULL);
1053}
1054
1055static void cached_dev_nodata(struct closure *cl)
1056{
1057        struct search *s = container_of(cl, struct search, cl);
1058        struct bio *bio = &s->bio.bio;
1059
1060        if (s->iop.flush_journal)
1061                bch_journal_meta(s->iop.c, cl);
1062
1063        /* If it's a flush, we send the flush to the backing device too */
1064        bio->bi_end_io = backing_request_endio;
1065        closure_bio_submit(s->iop.c, bio, cl);
1066
1067        continue_at(cl, cached_dev_bio_complete, NULL);
1068}
1069
1070struct detached_dev_io_private {
1071        struct bcache_device    *d;
1072        unsigned long           start_time;
1073        bio_end_io_t            *bi_end_io;
1074        void                    *bi_private;
1075};
1076
1077static void detached_dev_end_io(struct bio *bio)
1078{
1079        struct detached_dev_io_private *ddip;
1080
1081        ddip = bio->bi_private;
1082        bio->bi_end_io = ddip->bi_end_io;
1083        bio->bi_private = ddip->bi_private;
1084
1085        generic_end_io_acct(ddip->d->disk->queue, bio_op(bio),
1086                            &ddip->d->disk->part0, ddip->start_time);
1087
1088        if (bio->bi_status) {
1089                struct cached_dev *dc = container_of(ddip->d,
1090                                                     struct cached_dev, disk);
1091                /* should count I/O error for backing device here */
1092                bch_count_backing_io_errors(dc, bio);
1093        }
1094
1095        kfree(ddip);
1096        bio->bi_end_io(bio);
1097}
1098
1099static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
1100{
1101        struct detached_dev_io_private *ddip;
1102        struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1103
1104        /*
1105         * no need to call closure_get(&dc->disk.cl),
1106         * because upper layer had already opened bcache device,
1107         * which would call closure_get(&dc->disk.cl)
1108         */
1109        ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
1110        ddip->d = d;
1111        ddip->start_time = jiffies;
1112        ddip->bi_end_io = bio->bi_end_io;
1113        ddip->bi_private = bio->bi_private;
1114        bio->bi_end_io = detached_dev_end_io;
1115        bio->bi_private = ddip;
1116
1117        if ((bio_op(bio) == REQ_OP_DISCARD) &&
1118            !blk_queue_discard(bdev_get_queue(dc->bdev)))
1119                bio->bi_end_io(bio);
1120        else
1121                generic_make_request(bio);
1122}
1123
1124static void quit_max_writeback_rate(struct cache_set *c,
1125                                    struct cached_dev *this_dc)
1126{
1127        int i;
1128        struct bcache_device *d;
1129        struct cached_dev *dc;
1130
1131        /*
1132         * mutex bch_register_lock may compete with other parallel requesters,
1133         * or attach/detach operations on other backing device. Waiting to
1134         * the mutex lock may increase I/O request latency for seconds or more.
1135         * To avoid such situation, if mutext_trylock() failed, only writeback
1136         * rate of current cached device is set to 1, and __update_write_back()
1137         * will decide writeback rate of other cached devices (remember now
1138         * c->idle_counter is 0 already).
1139         */
1140        if (mutex_trylock(&bch_register_lock)) {
1141                for (i = 0; i < c->devices_max_used; i++) {
1142                        if (!c->devices[i])
1143                                continue;
1144
1145                        if (UUID_FLASH_ONLY(&c->uuids[i]))
1146                                continue;
1147
1148                        d = c->devices[i];
1149                        dc = container_of(d, struct cached_dev, disk);
1150                        /*
1151                         * set writeback rate to default minimum value,
1152                         * then let update_writeback_rate() to decide the
1153                         * upcoming rate.
1154                         */
1155                        atomic_long_set(&dc->writeback_rate.rate, 1);
1156                }
1157                mutex_unlock(&bch_register_lock);
1158        } else
1159                atomic_long_set(&this_dc->writeback_rate.rate, 1);
1160}
1161
1162/* Cached devices - read & write stuff */
1163
1164static blk_qc_t cached_dev_make_request(struct request_queue *q,
1165                                        struct bio *bio)
1166{
1167        struct search *s;
1168        struct bcache_device *d = bio->bi_disk->private_data;
1169        struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1170        int rw = bio_data_dir(bio);
1171
1172        if (unlikely((d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags)) ||
1173                     dc->io_disable)) {
1174                bio->bi_status = BLK_STS_IOERR;
1175                bio_endio(bio);
1176                return BLK_QC_T_NONE;
1177        }
1178
1179        if (likely(d->c)) {
1180                if (atomic_read(&d->c->idle_counter))
1181                        atomic_set(&d->c->idle_counter, 0);
1182                /*
1183                 * If at_max_writeback_rate of cache set is true and new I/O
1184                 * comes, quit max writeback rate of all cached devices
1185                 * attached to this cache set, and set at_max_writeback_rate
1186                 * to false.
1187                 */
1188                if (unlikely(atomic_read(&d->c->at_max_writeback_rate) == 1)) {
1189                        atomic_set(&d->c->at_max_writeback_rate, 0);
1190                        quit_max_writeback_rate(d->c, dc);
1191                }
1192        }
1193
1194        generic_start_io_acct(q,
1195                              bio_op(bio),
1196                              bio_sectors(bio),
1197                              &d->disk->part0);
1198
1199        bio_set_dev(bio, dc->bdev);
1200        bio->bi_iter.bi_sector += dc->sb.data_offset;
1201
1202        if (cached_dev_get(dc)) {
1203                s = search_alloc(bio, d);
1204                trace_bcache_request_start(s->d, bio);
1205
1206                if (!bio->bi_iter.bi_size) {
1207                        /*
1208                         * can't call bch_journal_meta from under
1209                         * generic_make_request
1210                         */
1211                        continue_at_nobarrier(&s->cl,
1212                                              cached_dev_nodata,
1213                                              bcache_wq);
1214                } else {
1215                        s->iop.bypass = check_should_bypass(dc, bio);
1216
1217                        if (rw)
1218                                cached_dev_write(dc, s);
1219                        else
1220                                cached_dev_read(dc, s);
1221                }
1222        } else
1223                /* I/O request sent to backing device */
1224                detached_dev_do_request(d, bio);
1225
1226        return BLK_QC_T_NONE;
1227}
1228
1229static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
1230                            unsigned int cmd, unsigned long arg)
1231{
1232        struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1233
1234        if (dc->io_disable)
1235                return -EIO;
1236
1237        return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
1238}
1239
1240static int cached_dev_congested(void *data, int bits)
1241{
1242        struct bcache_device *d = data;
1243        struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1244        struct request_queue *q = bdev_get_queue(dc->bdev);
1245        int ret = 0;
1246
1247        if (bdi_congested(q->backing_dev_info, bits))
1248                return 1;
1249
1250        if (cached_dev_get(dc)) {
1251                unsigned int i;
1252                struct cache *ca;
1253
1254                for_each_cache(ca, d->c, i) {
1255                        q = bdev_get_queue(ca->bdev);
1256                        ret |= bdi_congested(q->backing_dev_info, bits);
1257                }
1258
1259                cached_dev_put(dc);
1260        }
1261
1262        return ret;
1263}
1264
1265void bch_cached_dev_request_init(struct cached_dev *dc)
1266{
1267        struct gendisk *g = dc->disk.disk;
1268
1269        g->queue->make_request_fn               = cached_dev_make_request;
1270        g->queue->backing_dev_info->congested_fn = cached_dev_congested;
1271        dc->disk.cache_miss                     = cached_dev_cache_miss;
1272        dc->disk.ioctl                          = cached_dev_ioctl;
1273}
1274
1275/* Flash backed devices */
1276
1277static int flash_dev_cache_miss(struct btree *b, struct search *s,
1278                                struct bio *bio, unsigned int sectors)
1279{
1280        unsigned int bytes = min(sectors, bio_sectors(bio)) << 9;
1281
1282        swap(bio->bi_iter.bi_size, bytes);
1283        zero_fill_bio(bio);
1284        swap(bio->bi_iter.bi_size, bytes);
1285
1286        bio_advance(bio, bytes);
1287
1288        if (!bio->bi_iter.bi_size)
1289                return MAP_DONE;
1290
1291        return MAP_CONTINUE;
1292}
1293
1294static void flash_dev_nodata(struct closure *cl)
1295{
1296        struct search *s = container_of(cl, struct search, cl);
1297
1298        if (s->iop.flush_journal)
1299                bch_journal_meta(s->iop.c, cl);
1300
1301        continue_at(cl, search_free, NULL);
1302}
1303
1304static blk_qc_t flash_dev_make_request(struct request_queue *q,
1305                                             struct bio *bio)
1306{
1307        struct search *s;
1308        struct closure *cl;
1309        struct bcache_device *d = bio->bi_disk->private_data;
1310
1311        if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) {
1312                bio->bi_status = BLK_STS_IOERR;
1313                bio_endio(bio);
1314                return BLK_QC_T_NONE;
1315        }
1316
1317        generic_start_io_acct(q, bio_op(bio), bio_sectors(bio), &d->disk->part0);
1318
1319        s = search_alloc(bio, d);
1320        cl = &s->cl;
1321        bio = &s->bio.bio;
1322
1323        trace_bcache_request_start(s->d, bio);
1324
1325        if (!bio->bi_iter.bi_size) {
1326                /*
1327                 * can't call bch_journal_meta from under
1328                 * generic_make_request
1329                 */
1330                continue_at_nobarrier(&s->cl,
1331                                      flash_dev_nodata,
1332                                      bcache_wq);
1333                return BLK_QC_T_NONE;
1334        } else if (bio_data_dir(bio)) {
1335                bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1336                                        &KEY(d->id, bio->bi_iter.bi_sector, 0),
1337                                        &KEY(d->id, bio_end_sector(bio), 0));
1338
1339                s->iop.bypass           = (bio_op(bio) == REQ_OP_DISCARD) != 0;
1340                s->iop.writeback        = true;
1341                s->iop.bio              = bio;
1342
1343                closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1344        } else {
1345                closure_call(&s->iop.cl, cache_lookup, NULL, cl);
1346        }
1347
1348        continue_at(cl, search_free, NULL);
1349        return BLK_QC_T_NONE;
1350}
1351
1352static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1353                           unsigned int cmd, unsigned long arg)
1354{
1355        return -ENOTTY;
1356}
1357
1358static int flash_dev_congested(void *data, int bits)
1359{
1360        struct bcache_device *d = data;
1361        struct request_queue *q;
1362        struct cache *ca;
1363        unsigned int i;
1364        int ret = 0;
1365
1366        for_each_cache(ca, d->c, i) {
1367                q = bdev_get_queue(ca->bdev);
1368                ret |= bdi_congested(q->backing_dev_info, bits);
1369        }
1370
1371        return ret;
1372}
1373
1374void bch_flash_dev_request_init(struct bcache_device *d)
1375{
1376        struct gendisk *g = d->disk;
1377
1378        g->queue->make_request_fn               = flash_dev_make_request;
1379        g->queue->backing_dev_info->congested_fn = flash_dev_congested;
1380        d->cache_miss                           = flash_dev_cache_miss;
1381        d->ioctl                                = flash_dev_ioctl;
1382}
1383
1384void bch_request_exit(void)
1385{
1386        kmem_cache_destroy(bch_search_cache);
1387}
1388
1389int __init bch_request_init(void)
1390{
1391        bch_search_cache = KMEM_CACHE(search, 0);
1392        if (!bch_search_cache)
1393                return -ENOMEM;
1394
1395        return 0;
1396}
1397