linux/drivers/md/bcache/request.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Main bcache entry point - handle a read or a write request and decide what to
   4 * do with it; the make_request functions are called by the block layer.
   5 *
   6 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
   7 * Copyright 2012 Google, Inc.
   8 */
   9
  10#include "bcache.h"
  11#include "btree.h"
  12#include "debug.h"
  13#include "request.h"
  14#include "writeback.h"
  15
  16#include <linux/module.h>
  17#include <linux/hash.h>
  18#include <linux/random.h>
  19#include <linux/backing-dev.h>
  20
  21#include <trace/events/bcache.h>
  22
  23#define CUTOFF_CACHE_ADD        95
  24#define CUTOFF_CACHE_READA      90
  25
  26struct kmem_cache *bch_search_cache;
  27
  28static void bch_data_insert_start(struct closure *cl);
  29
  30static unsigned int cache_mode(struct cached_dev *dc)
  31{
  32        return BDEV_CACHE_MODE(&dc->sb);
  33}
  34
  35static bool verify(struct cached_dev *dc)
  36{
  37        return dc->verify;
  38}
  39
  40static void bio_csum(struct bio *bio, struct bkey *k)
  41{
  42        struct bio_vec bv;
  43        struct bvec_iter iter;
  44        uint64_t csum = 0;
  45
  46        bio_for_each_segment(bv, bio, iter) {
  47                void *d = kmap(bv.bv_page) + bv.bv_offset;
  48
  49                csum = bch_crc64_update(csum, d, bv.bv_len);
  50                kunmap(bv.bv_page);
  51        }
  52
  53        k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
  54}
  55
  56/* Insert data into cache */
  57
  58static void bch_data_insert_keys(struct closure *cl)
  59{
  60        struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
  61        atomic_t *journal_ref = NULL;
  62        struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
  63        int ret;
  64
  65        /*
  66         * If we're looping, might already be waiting on
  67         * another journal write - can't wait on more than one journal write at
  68         * a time
  69         *
  70         * XXX: this looks wrong
  71         */
  72#if 0
  73        while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING)
  74                closure_sync(&s->cl);
  75#endif
  76
  77        if (!op->replace)
  78                journal_ref = bch_journal(op->c, &op->insert_keys,
  79                                          op->flush_journal ? cl : NULL);
  80
  81        ret = bch_btree_insert(op->c, &op->insert_keys,
  82                               journal_ref, replace_key);
  83        if (ret == -ESRCH) {
  84                op->replace_collision = true;
  85        } else if (ret) {
  86                op->status              = BLK_STS_RESOURCE;
  87                op->insert_data_done    = true;
  88        }
  89
  90        if (journal_ref)
  91                atomic_dec_bug(journal_ref);
  92
  93        if (!op->insert_data_done) {
  94                continue_at(cl, bch_data_insert_start, op->wq);
  95                return;
  96        }
  97
  98        bch_keylist_free(&op->insert_keys);
  99        closure_return(cl);
 100}
 101
 102static int bch_keylist_realloc(struct keylist *l, unsigned int u64s,
 103                               struct cache_set *c)
 104{
 105        size_t oldsize = bch_keylist_nkeys(l);
 106        size_t newsize = oldsize + u64s;
 107
 108        /*
 109         * The journalling code doesn't handle the case where the keys to insert
 110         * is bigger than an empty write: If we just return -ENOMEM here,
 111         * bch_data_insert_keys() will insert the keys created so far
 112         * and finish the rest when the keylist is empty.
 113         */
 114        if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
 115                return -ENOMEM;
 116
 117        return __bch_keylist_realloc(l, u64s);
 118}
 119
 120static void bch_data_invalidate(struct closure *cl)
 121{
 122        struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 123        struct bio *bio = op->bio;
 124
 125        pr_debug("invalidating %i sectors from %llu",
 126                 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
 127
 128        while (bio_sectors(bio)) {
 129                unsigned int sectors = min(bio_sectors(bio),
 130                                       1U << (KEY_SIZE_BITS - 1));
 131
 132                if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
 133                        goto out;
 134
 135                bio->bi_iter.bi_sector  += sectors;
 136                bio->bi_iter.bi_size    -= sectors << 9;
 137
 138                bch_keylist_add(&op->insert_keys,
 139                                &KEY(op->inode,
 140                                     bio->bi_iter.bi_sector,
 141                                     sectors));
 142        }
 143
 144        op->insert_data_done = true;
 145        /* get in bch_data_insert() */
 146        bio_put(bio);
 147out:
 148        continue_at(cl, bch_data_insert_keys, op->wq);
 149}
 150
 151static void bch_data_insert_error(struct closure *cl)
 152{
 153        struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 154
 155        /*
 156         * Our data write just errored, which means we've got a bunch of keys to
 157         * insert that point to data that wasn't successfully written.
 158         *
 159         * We don't have to insert those keys but we still have to invalidate
 160         * that region of the cache - so, if we just strip off all the pointers
 161         * from the keys we'll accomplish just that.
 162         */
 163
 164        struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
 165
 166        while (src != op->insert_keys.top) {
 167                struct bkey *n = bkey_next(src);
 168
 169                SET_KEY_PTRS(src, 0);
 170                memmove(dst, src, bkey_bytes(src));
 171
 172                dst = bkey_next(dst);
 173                src = n;
 174        }
 175
 176        op->insert_keys.top = dst;
 177
 178        bch_data_insert_keys(cl);
 179}
 180
 181static void bch_data_insert_endio(struct bio *bio)
 182{
 183        struct closure *cl = bio->bi_private;
 184        struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 185
 186        if (bio->bi_status) {
 187                /* TODO: We could try to recover from this. */
 188                if (op->writeback)
 189                        op->status = bio->bi_status;
 190                else if (!op->replace)
 191                        set_closure_fn(cl, bch_data_insert_error, op->wq);
 192                else
 193                        set_closure_fn(cl, NULL, NULL);
 194        }
 195
 196        bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
 197}
 198
 199static void bch_data_insert_start(struct closure *cl)
 200{
 201        struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 202        struct bio *bio = op->bio, *n;
 203
 204        if (op->bypass)
 205                return bch_data_invalidate(cl);
 206
 207        if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
 208                wake_up_gc(op->c);
 209
 210        /*
 211         * Journal writes are marked REQ_PREFLUSH; if the original write was a
 212         * flush, it'll wait on the journal write.
 213         */
 214        bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
 215
 216        do {
 217                unsigned int i;
 218                struct bkey *k;
 219                struct bio_set *split = &op->c->bio_split;
 220
 221                /* 1 for the device pointer and 1 for the chksum */
 222                if (bch_keylist_realloc(&op->insert_keys,
 223                                        3 + (op->csum ? 1 : 0),
 224                                        op->c)) {
 225                        continue_at(cl, bch_data_insert_keys, op->wq);
 226                        return;
 227                }
 228
 229                k = op->insert_keys.top;
 230                bkey_init(k);
 231                SET_KEY_INODE(k, op->inode);
 232                SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
 233
 234                if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
 235                                       op->write_point, op->write_prio,
 236                                       op->writeback))
 237                        goto err;
 238
 239                n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
 240
 241                n->bi_end_io    = bch_data_insert_endio;
 242                n->bi_private   = cl;
 243
 244                if (op->writeback) {
 245                        SET_KEY_DIRTY(k, true);
 246
 247                        for (i = 0; i < KEY_PTRS(k); i++)
 248                                SET_GC_MARK(PTR_BUCKET(op->c, k, i),
 249                                            GC_MARK_DIRTY);
 250                }
 251
 252                SET_KEY_CSUM(k, op->csum);
 253                if (KEY_CSUM(k))
 254                        bio_csum(n, k);
 255
 256                trace_bcache_cache_insert(k);
 257                bch_keylist_push(&op->insert_keys);
 258
 259                bio_set_op_attrs(n, REQ_OP_WRITE, 0);
 260                bch_submit_bbio(n, op->c, k, 0);
 261        } while (n != bio);
 262
 263        op->insert_data_done = true;
 264        continue_at(cl, bch_data_insert_keys, op->wq);
 265        return;
 266err:
 267        /* bch_alloc_sectors() blocks if s->writeback = true */
 268        BUG_ON(op->writeback);
 269
 270        /*
 271         * But if it's not a writeback write we'd rather just bail out if
 272         * there aren't any buckets ready to write to - it might take awhile and
 273         * we might be starving btree writes for gc or something.
 274         */
 275
 276        if (!op->replace) {
 277                /*
 278                 * Writethrough write: We can't complete the write until we've
 279                 * updated the index. But we don't want to delay the write while
 280                 * we wait for buckets to be freed up, so just invalidate the
 281                 * rest of the write.
 282                 */
 283                op->bypass = true;
 284                return bch_data_invalidate(cl);
 285        } else {
 286                /*
 287                 * From a cache miss, we can just insert the keys for the data
 288                 * we have written or bail out if we didn't do anything.
 289                 */
 290                op->insert_data_done = true;
 291                bio_put(bio);
 292
 293                if (!bch_keylist_empty(&op->insert_keys))
 294                        continue_at(cl, bch_data_insert_keys, op->wq);
 295                else
 296                        closure_return(cl);
 297        }
 298}
 299
 300/**
 301 * bch_data_insert - stick some data in the cache
 302 * @cl: closure pointer.
 303 *
 304 * This is the starting point for any data to end up in a cache device; it could
 305 * be from a normal write, or a writeback write, or a write to a flash only
 306 * volume - it's also used by the moving garbage collector to compact data in
 307 * mostly empty buckets.
 308 *
 309 * It first writes the data to the cache, creating a list of keys to be inserted
 310 * (if the data had to be fragmented there will be multiple keys); after the
 311 * data is written it calls bch_journal, and after the keys have been added to
 312 * the next journal write they're inserted into the btree.
 313 *
 314 * It inserts the data in op->bio; bi_sector is used for the key offset,
 315 * and op->inode is used for the key inode.
 316 *
 317 * If op->bypass is true, instead of inserting the data it invalidates the
 318 * region of the cache represented by op->bio and op->inode.
 319 */
 320void bch_data_insert(struct closure *cl)
 321{
 322        struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 323
 324        trace_bcache_write(op->c, op->inode, op->bio,
 325                           op->writeback, op->bypass);
 326
 327        bch_keylist_init(&op->insert_keys);
 328        bio_get(op->bio);
 329        bch_data_insert_start(cl);
 330}
 331
 332/*
 333 * Congested?  Return 0 (not congested) or the limit (in sectors)
 334 * beyond which we should bypass the cache due to congestion.
 335 */
 336unsigned int bch_get_congested(const struct cache_set *c)
 337{
 338        int i;
 339
 340        if (!c->congested_read_threshold_us &&
 341            !c->congested_write_threshold_us)
 342                return 0;
 343
 344        i = (local_clock_us() - c->congested_last_us) / 1024;
 345        if (i < 0)
 346                return 0;
 347
 348        i += atomic_read(&c->congested);
 349        if (i >= 0)
 350                return 0;
 351
 352        i += CONGESTED_MAX;
 353
 354        if (i > 0)
 355                i = fract_exp_two(i, 6);
 356
 357        i -= hweight32(get_random_u32());
 358
 359        return i > 0 ? i : 1;
 360}
 361
 362static void add_sequential(struct task_struct *t)
 363{
 364        ewma_add(t->sequential_io_avg,
 365                 t->sequential_io, 8, 0);
 366
 367        t->sequential_io = 0;
 368}
 369
 370static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
 371{
 372        return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
 373}
 374
 375static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
 376{
 377        struct cache_set *c = dc->disk.c;
 378        unsigned int mode = cache_mode(dc);
 379        unsigned int sectors, congested;
 380        struct task_struct *task = current;
 381        struct io *i;
 382
 383        if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
 384            c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
 385            (bio_op(bio) == REQ_OP_DISCARD))
 386                goto skip;
 387
 388        if (mode == CACHE_MODE_NONE ||
 389            (mode == CACHE_MODE_WRITEAROUND &&
 390             op_is_write(bio_op(bio))))
 391                goto skip;
 392
 393        /*
 394         * Flag for bypass if the IO is for read-ahead or background,
 395         * unless the read-ahead request is for metadata
 396         * (eg, for gfs2 or xfs).
 397         */
 398        if (bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND) &&
 399            !(bio->bi_opf & (REQ_META|REQ_PRIO)))
 400                goto skip;
 401
 402        if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
 403            bio_sectors(bio) & (c->sb.block_size - 1)) {
 404                pr_debug("skipping unaligned io");
 405                goto skip;
 406        }
 407
 408        if (bypass_torture_test(dc)) {
 409                if ((get_random_int() & 3) == 3)
 410                        goto skip;
 411                else
 412                        goto rescale;
 413        }
 414
 415        congested = bch_get_congested(c);
 416        if (!congested && !dc->sequential_cutoff)
 417                goto rescale;
 418
 419        spin_lock(&dc->io_lock);
 420
 421        hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
 422                if (i->last == bio->bi_iter.bi_sector &&
 423                    time_before(jiffies, i->jiffies))
 424                        goto found;
 425
 426        i = list_first_entry(&dc->io_lru, struct io, lru);
 427
 428        add_sequential(task);
 429        i->sequential = 0;
 430found:
 431        if (i->sequential + bio->bi_iter.bi_size > i->sequential)
 432                i->sequential   += bio->bi_iter.bi_size;
 433
 434        i->last                  = bio_end_sector(bio);
 435        i->jiffies               = jiffies + msecs_to_jiffies(5000);
 436        task->sequential_io      = i->sequential;
 437
 438        hlist_del(&i->hash);
 439        hlist_add_head(&i->hash, iohash(dc, i->last));
 440        list_move_tail(&i->lru, &dc->io_lru);
 441
 442        spin_unlock(&dc->io_lock);
 443
 444        sectors = max(task->sequential_io,
 445                      task->sequential_io_avg) >> 9;
 446
 447        if (dc->sequential_cutoff &&
 448            sectors >= dc->sequential_cutoff >> 9) {
 449                trace_bcache_bypass_sequential(bio);
 450                goto skip;
 451        }
 452
 453        if (congested && sectors >= congested) {
 454                trace_bcache_bypass_congested(bio);
 455                goto skip;
 456        }
 457
 458rescale:
 459        bch_rescale_priorities(c, bio_sectors(bio));
 460        return false;
 461skip:
 462        bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
 463        return true;
 464}
 465
 466/* Cache lookup */
 467
 468struct search {
 469        /* Stack frame for bio_complete */
 470        struct closure          cl;
 471
 472        struct bbio             bio;
 473        struct bio              *orig_bio;
 474        struct bio              *cache_miss;
 475        struct bcache_device    *d;
 476
 477        unsigned int            insert_bio_sectors;
 478        unsigned int            recoverable:1;
 479        unsigned int            write:1;
 480        unsigned int            read_dirty_data:1;
 481        unsigned int            cache_missed:1;
 482
 483        unsigned long           start_time;
 484
 485        struct btree_op         op;
 486        struct data_insert_op   iop;
 487};
 488
 489static void bch_cache_read_endio(struct bio *bio)
 490{
 491        struct bbio *b = container_of(bio, struct bbio, bio);
 492        struct closure *cl = bio->bi_private;
 493        struct search *s = container_of(cl, struct search, cl);
 494
 495        /*
 496         * If the bucket was reused while our bio was in flight, we might have
 497         * read the wrong data. Set s->error but not error so it doesn't get
 498         * counted against the cache device, but we'll still reread the data
 499         * from the backing device.
 500         */
 501
 502        if (bio->bi_status)
 503                s->iop.status = bio->bi_status;
 504        else if (!KEY_DIRTY(&b->key) &&
 505                 ptr_stale(s->iop.c, &b->key, 0)) {
 506                atomic_long_inc(&s->iop.c->cache_read_races);
 507                s->iop.status = BLK_STS_IOERR;
 508        }
 509
 510        bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache");
 511}
 512
 513/*
 514 * Read from a single key, handling the initial cache miss if the key starts in
 515 * the middle of the bio
 516 */
 517static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
 518{
 519        struct search *s = container_of(op, struct search, op);
 520        struct bio *n, *bio = &s->bio.bio;
 521        struct bkey *bio_key;
 522        unsigned int ptr;
 523
 524        if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
 525                return MAP_CONTINUE;
 526
 527        if (KEY_INODE(k) != s->iop.inode ||
 528            KEY_START(k) > bio->bi_iter.bi_sector) {
 529                unsigned int bio_sectors = bio_sectors(bio);
 530                unsigned int sectors = KEY_INODE(k) == s->iop.inode
 531                        ? min_t(uint64_t, INT_MAX,
 532                                KEY_START(k) - bio->bi_iter.bi_sector)
 533                        : INT_MAX;
 534                int ret = s->d->cache_miss(b, s, bio, sectors);
 535
 536                if (ret != MAP_CONTINUE)
 537                        return ret;
 538
 539                /* if this was a complete miss we shouldn't get here */
 540                BUG_ON(bio_sectors <= sectors);
 541        }
 542
 543        if (!KEY_SIZE(k))
 544                return MAP_CONTINUE;
 545
 546        /* XXX: figure out best pointer - for multiple cache devices */
 547        ptr = 0;
 548
 549        PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
 550
 551        if (KEY_DIRTY(k))
 552                s->read_dirty_data = true;
 553
 554        n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
 555                                      KEY_OFFSET(k) - bio->bi_iter.bi_sector),
 556                           GFP_NOIO, &s->d->bio_split);
 557
 558        bio_key = &container_of(n, struct bbio, bio)->key;
 559        bch_bkey_copy_single_ptr(bio_key, k, ptr);
 560
 561        bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
 562        bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
 563
 564        n->bi_end_io    = bch_cache_read_endio;
 565        n->bi_private   = &s->cl;
 566
 567        /*
 568         * The bucket we're reading from might be reused while our bio
 569         * is in flight, and we could then end up reading the wrong
 570         * data.
 571         *
 572         * We guard against this by checking (in cache_read_endio()) if
 573         * the pointer is stale again; if so, we treat it as an error
 574         * and reread from the backing device (but we don't pass that
 575         * error up anywhere).
 576         */
 577
 578        __bch_submit_bbio(n, b->c);
 579        return n == bio ? MAP_DONE : MAP_CONTINUE;
 580}
 581
 582static void cache_lookup(struct closure *cl)
 583{
 584        struct search *s = container_of(cl, struct search, iop.cl);
 585        struct bio *bio = &s->bio.bio;
 586        struct cached_dev *dc;
 587        int ret;
 588
 589        bch_btree_op_init(&s->op, -1);
 590
 591        ret = bch_btree_map_keys(&s->op, s->iop.c,
 592                                 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
 593                                 cache_lookup_fn, MAP_END_KEY);
 594        if (ret == -EAGAIN) {
 595                continue_at(cl, cache_lookup, bcache_wq);
 596                return;
 597        }
 598
 599        /*
 600         * We might meet err when searching the btree, If that happens, we will
 601         * get negative ret, in this scenario we should not recover data from
 602         * backing device (when cache device is dirty) because we don't know
 603         * whether bkeys the read request covered are all clean.
 604         *
 605         * And after that happened, s->iop.status is still its initial value
 606         * before we submit s->bio.bio
 607         */
 608        if (ret < 0) {
 609                BUG_ON(ret == -EINTR);
 610                if (s->d && s->d->c &&
 611                                !UUID_FLASH_ONLY(&s->d->c->uuids[s->d->id])) {
 612                        dc = container_of(s->d, struct cached_dev, disk);
 613                        if (dc && atomic_read(&dc->has_dirty))
 614                                s->recoverable = false;
 615                }
 616                if (!s->iop.status)
 617                        s->iop.status = BLK_STS_IOERR;
 618        }
 619
 620        closure_return(cl);
 621}
 622
 623/* Common code for the make_request functions */
 624
 625static void request_endio(struct bio *bio)
 626{
 627        struct closure *cl = bio->bi_private;
 628
 629        if (bio->bi_status) {
 630                struct search *s = container_of(cl, struct search, cl);
 631
 632                s->iop.status = bio->bi_status;
 633                /* Only cache read errors are recoverable */
 634                s->recoverable = false;
 635        }
 636
 637        bio_put(bio);
 638        closure_put(cl);
 639}
 640
 641static void backing_request_endio(struct bio *bio)
 642{
 643        struct closure *cl = bio->bi_private;
 644
 645        if (bio->bi_status) {
 646                struct search *s = container_of(cl, struct search, cl);
 647                struct cached_dev *dc = container_of(s->d,
 648                                                     struct cached_dev, disk);
 649                /*
 650                 * If a bio has REQ_PREFLUSH for writeback mode, it is
 651                 * speically assembled in cached_dev_write() for a non-zero
 652                 * write request which has REQ_PREFLUSH. we don't set
 653                 * s->iop.status by this failure, the status will be decided
 654                 * by result of bch_data_insert() operation.
 655                 */
 656                if (unlikely(s->iop.writeback &&
 657                             bio->bi_opf & REQ_PREFLUSH)) {
 658                        pr_err("Can't flush %s: returned bi_status %i",
 659                                dc->backing_dev_name, bio->bi_status);
 660                } else {
 661                        /* set to orig_bio->bi_status in bio_complete() */
 662                        s->iop.status = bio->bi_status;
 663                }
 664                s->recoverable = false;
 665                /* should count I/O error for backing device here */
 666                bch_count_backing_io_errors(dc, bio);
 667        }
 668
 669        bio_put(bio);
 670        closure_put(cl);
 671}
 672
 673static void bio_complete(struct search *s)
 674{
 675        if (s->orig_bio) {
 676                generic_end_io_acct(s->d->disk->queue, bio_op(s->orig_bio),
 677                                    &s->d->disk->part0, s->start_time);
 678
 679                trace_bcache_request_end(s->d, s->orig_bio);
 680                s->orig_bio->bi_status = s->iop.status;
 681                bio_endio(s->orig_bio);
 682                s->orig_bio = NULL;
 683        }
 684}
 685
 686static void do_bio_hook(struct search *s,
 687                        struct bio *orig_bio,
 688                        bio_end_io_t *end_io_fn)
 689{
 690        struct bio *bio = &s->bio.bio;
 691
 692        bio_init(bio, NULL, 0);
 693        __bio_clone_fast(bio, orig_bio);
 694        /*
 695         * bi_end_io can be set separately somewhere else, e.g. the
 696         * variants in,
 697         * - cache_bio->bi_end_io from cached_dev_cache_miss()
 698         * - n->bi_end_io from cache_lookup_fn()
 699         */
 700        bio->bi_end_io          = end_io_fn;
 701        bio->bi_private         = &s->cl;
 702
 703        bio_cnt_set(bio, 3);
 704}
 705
 706static void search_free(struct closure *cl)
 707{
 708        struct search *s = container_of(cl, struct search, cl);
 709
 710        atomic_dec(&s->iop.c->search_inflight);
 711
 712        if (s->iop.bio)
 713                bio_put(s->iop.bio);
 714
 715        bio_complete(s);
 716        closure_debug_destroy(cl);
 717        mempool_free(s, &s->iop.c->search);
 718}
 719
 720static inline struct search *search_alloc(struct bio *bio,
 721                                          struct bcache_device *d)
 722{
 723        struct search *s;
 724
 725        s = mempool_alloc(&d->c->search, GFP_NOIO);
 726
 727        closure_init(&s->cl, NULL);
 728        do_bio_hook(s, bio, request_endio);
 729        atomic_inc(&d->c->search_inflight);
 730
 731        s->orig_bio             = bio;
 732        s->cache_miss           = NULL;
 733        s->cache_missed         = 0;
 734        s->d                    = d;
 735        s->recoverable          = 1;
 736        s->write                = op_is_write(bio_op(bio));
 737        s->read_dirty_data      = 0;
 738        s->start_time           = jiffies;
 739
 740        s->iop.c                = d->c;
 741        s->iop.bio              = NULL;
 742        s->iop.inode            = d->id;
 743        s->iop.write_point      = hash_long((unsigned long) current, 16);
 744        s->iop.write_prio       = 0;
 745        s->iop.status           = 0;
 746        s->iop.flags            = 0;
 747        s->iop.flush_journal    = op_is_flush(bio->bi_opf);
 748        s->iop.wq               = bcache_wq;
 749
 750        return s;
 751}
 752
 753/* Cached devices */
 754
 755static void cached_dev_bio_complete(struct closure *cl)
 756{
 757        struct search *s = container_of(cl, struct search, cl);
 758        struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 759
 760        cached_dev_put(dc);
 761        search_free(cl);
 762}
 763
 764/* Process reads */
 765
 766static void cached_dev_read_error_done(struct closure *cl)
 767{
 768        struct search *s = container_of(cl, struct search, cl);
 769
 770        if (s->iop.replace_collision)
 771                bch_mark_cache_miss_collision(s->iop.c, s->d);
 772
 773        if (s->iop.bio)
 774                bio_free_pages(s->iop.bio);
 775
 776        cached_dev_bio_complete(cl);
 777}
 778
 779static void cached_dev_read_error(struct closure *cl)
 780{
 781        struct search *s = container_of(cl, struct search, cl);
 782        struct bio *bio = &s->bio.bio;
 783
 784        /*
 785         * If read request hit dirty data (s->read_dirty_data is true),
 786         * then recovery a failed read request from cached device may
 787         * get a stale data back. So read failure recovery is only
 788         * permitted when read request hit clean data in cache device,
 789         * or when cache read race happened.
 790         */
 791        if (s->recoverable && !s->read_dirty_data) {
 792                /* Retry from the backing device: */
 793                trace_bcache_read_retry(s->orig_bio);
 794
 795                s->iop.status = 0;
 796                do_bio_hook(s, s->orig_bio, backing_request_endio);
 797
 798                /* XXX: invalidate cache */
 799
 800                /* I/O request sent to backing device */
 801                closure_bio_submit(s->iop.c, bio, cl);
 802        }
 803
 804        continue_at(cl, cached_dev_read_error_done, NULL);
 805}
 806
 807static void cached_dev_cache_miss_done(struct closure *cl)
 808{
 809        struct search *s = container_of(cl, struct search, cl);
 810        struct bcache_device *d = s->d;
 811
 812        if (s->iop.replace_collision)
 813                bch_mark_cache_miss_collision(s->iop.c, s->d);
 814
 815        if (s->iop.bio)
 816                bio_free_pages(s->iop.bio);
 817
 818        cached_dev_bio_complete(cl);
 819        closure_put(&d->cl);
 820}
 821
 822static void cached_dev_read_done(struct closure *cl)
 823{
 824        struct search *s = container_of(cl, struct search, cl);
 825        struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 826
 827        /*
 828         * We had a cache miss; cache_bio now contains data ready to be inserted
 829         * into the cache.
 830         *
 831         * First, we copy the data we just read from cache_bio's bounce buffers
 832         * to the buffers the original bio pointed to:
 833         */
 834
 835        if (s->iop.bio) {
 836                bio_reset(s->iop.bio);
 837                s->iop.bio->bi_iter.bi_sector =
 838                        s->cache_miss->bi_iter.bi_sector;
 839                bio_copy_dev(s->iop.bio, s->cache_miss);
 840                s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
 841                bch_bio_map(s->iop.bio, NULL);
 842
 843                bio_copy_data(s->cache_miss, s->iop.bio);
 844
 845                bio_put(s->cache_miss);
 846                s->cache_miss = NULL;
 847        }
 848
 849        if (verify(dc) && s->recoverable && !s->read_dirty_data)
 850                bch_data_verify(dc, s->orig_bio);
 851
 852        closure_get(&dc->disk.cl);
 853        bio_complete(s);
 854
 855        if (s->iop.bio &&
 856            !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
 857                BUG_ON(!s->iop.replace);
 858                closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
 859        }
 860
 861        continue_at(cl, cached_dev_cache_miss_done, NULL);
 862}
 863
 864static void cached_dev_read_done_bh(struct closure *cl)
 865{
 866        struct search *s = container_of(cl, struct search, cl);
 867        struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 868
 869        bch_mark_cache_accounting(s->iop.c, s->d,
 870                                  !s->cache_missed, s->iop.bypass);
 871        trace_bcache_read(s->orig_bio, !s->cache_missed, s->iop.bypass);
 872
 873        if (s->iop.status)
 874                continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
 875        else if (s->iop.bio || verify(dc))
 876                continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
 877        else
 878                continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
 879}
 880
 881static int cached_dev_cache_miss(struct btree *b, struct search *s,
 882                                 struct bio *bio, unsigned int sectors)
 883{
 884        int ret = MAP_CONTINUE;
 885        unsigned int reada = 0;
 886        struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 887        struct bio *miss, *cache_bio;
 888
 889        s->cache_missed = 1;
 890
 891        if (s->cache_miss || s->iop.bypass) {
 892                miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
 893                ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
 894                goto out_submit;
 895        }
 896
 897        if (!(bio->bi_opf & REQ_RAHEAD) &&
 898            !(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
 899            s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
 900                reada = min_t(sector_t, dc->readahead >> 9,
 901                              get_capacity(bio->bi_disk) - bio_end_sector(bio));
 902
 903        s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
 904
 905        s->iop.replace_key = KEY(s->iop.inode,
 906                                 bio->bi_iter.bi_sector + s->insert_bio_sectors,
 907                                 s->insert_bio_sectors);
 908
 909        ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
 910        if (ret)
 911                return ret;
 912
 913        s->iop.replace = true;
 914
 915        miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
 916
 917        /* btree_search_recurse()'s btree iterator is no good anymore */
 918        ret = miss == bio ? MAP_DONE : -EINTR;
 919
 920        cache_bio = bio_alloc_bioset(GFP_NOWAIT,
 921                        DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
 922                        &dc->disk.bio_split);
 923        if (!cache_bio)
 924                goto out_submit;
 925
 926        cache_bio->bi_iter.bi_sector    = miss->bi_iter.bi_sector;
 927        bio_copy_dev(cache_bio, miss);
 928        cache_bio->bi_iter.bi_size      = s->insert_bio_sectors << 9;
 929
 930        cache_bio->bi_end_io    = backing_request_endio;
 931        cache_bio->bi_private   = &s->cl;
 932
 933        bch_bio_map(cache_bio, NULL);
 934        if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
 935                goto out_put;
 936
 937        if (reada)
 938                bch_mark_cache_readahead(s->iop.c, s->d);
 939
 940        s->cache_miss   = miss;
 941        s->iop.bio      = cache_bio;
 942        bio_get(cache_bio);
 943        /* I/O request sent to backing device */
 944        closure_bio_submit(s->iop.c, cache_bio, &s->cl);
 945
 946        return ret;
 947out_put:
 948        bio_put(cache_bio);
 949out_submit:
 950        miss->bi_end_io         = backing_request_endio;
 951        miss->bi_private        = &s->cl;
 952        /* I/O request sent to backing device */
 953        closure_bio_submit(s->iop.c, miss, &s->cl);
 954        return ret;
 955}
 956
 957static void cached_dev_read(struct cached_dev *dc, struct search *s)
 958{
 959        struct closure *cl = &s->cl;
 960
 961        closure_call(&s->iop.cl, cache_lookup, NULL, cl);
 962        continue_at(cl, cached_dev_read_done_bh, NULL);
 963}
 964
 965/* Process writes */
 966
 967static void cached_dev_write_complete(struct closure *cl)
 968{
 969        struct search *s = container_of(cl, struct search, cl);
 970        struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 971
 972        up_read_non_owner(&dc->writeback_lock);
 973        cached_dev_bio_complete(cl);
 974}
 975
 976static void cached_dev_write(struct cached_dev *dc, struct search *s)
 977{
 978        struct closure *cl = &s->cl;
 979        struct bio *bio = &s->bio.bio;
 980        struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
 981        struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
 982
 983        bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
 984
 985        down_read_non_owner(&dc->writeback_lock);
 986        if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
 987                /*
 988                 * We overlap with some dirty data undergoing background
 989                 * writeback, force this write to writeback
 990                 */
 991                s->iop.bypass = false;
 992                s->iop.writeback = true;
 993        }
 994
 995        /*
 996         * Discards aren't _required_ to do anything, so skipping if
 997         * check_overlapping returned true is ok
 998         *
 999         * But check_overlapping drops dirty keys for which io hasn't started,
1000         * so we still want to call it.
1001         */
1002        if (bio_op(bio) == REQ_OP_DISCARD)
1003                s->iop.bypass = true;
1004
1005        if (should_writeback(dc, s->orig_bio,
1006                             cache_mode(dc),
1007                             s->iop.bypass)) {
1008                s->iop.bypass = false;
1009                s->iop.writeback = true;
1010        }
1011
1012        if (s->iop.bypass) {
1013                s->iop.bio = s->orig_bio;
1014                bio_get(s->iop.bio);
1015
1016                if (bio_op(bio) == REQ_OP_DISCARD &&
1017                    !blk_queue_discard(bdev_get_queue(dc->bdev)))
1018                        goto insert_data;
1019
1020                /* I/O request sent to backing device */
1021                bio->bi_end_io = backing_request_endio;
1022                closure_bio_submit(s->iop.c, bio, cl);
1023
1024        } else if (s->iop.writeback) {
1025                bch_writeback_add(dc);
1026                s->iop.bio = bio;
1027
1028                if (bio->bi_opf & REQ_PREFLUSH) {
1029                        /*
1030                         * Also need to send a flush to the backing
1031                         * device.
1032                         */
1033                        struct bio *flush;
1034
1035                        flush = bio_alloc_bioset(GFP_NOIO, 0,
1036                                                 &dc->disk.bio_split);
1037                        if (!flush) {
1038                                s->iop.status = BLK_STS_RESOURCE;
1039                                goto insert_data;
1040                        }
1041                        bio_copy_dev(flush, bio);
1042                        flush->bi_end_io = backing_request_endio;
1043                        flush->bi_private = cl;
1044                        flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
1045                        /* I/O request sent to backing device */
1046                        closure_bio_submit(s->iop.c, flush, cl);
1047                }
1048        } else {
1049                s->iop.bio = bio_clone_fast(bio, GFP_NOIO, &dc->disk.bio_split);
1050                /* I/O request sent to backing device */
1051                bio->bi_end_io = backing_request_endio;
1052                closure_bio_submit(s->iop.c, bio, cl);
1053        }
1054
1055insert_data:
1056        closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1057        continue_at(cl, cached_dev_write_complete, NULL);
1058}
1059
1060static void cached_dev_nodata(struct closure *cl)
1061{
1062        struct search *s = container_of(cl, struct search, cl);
1063        struct bio *bio = &s->bio.bio;
1064
1065        if (s->iop.flush_journal)
1066                bch_journal_meta(s->iop.c, cl);
1067
1068        /* If it's a flush, we send the flush to the backing device too */
1069        bio->bi_end_io = backing_request_endio;
1070        closure_bio_submit(s->iop.c, bio, cl);
1071
1072        continue_at(cl, cached_dev_bio_complete, NULL);
1073}
1074
1075struct detached_dev_io_private {
1076        struct bcache_device    *d;
1077        unsigned long           start_time;
1078        bio_end_io_t            *bi_end_io;
1079        void                    *bi_private;
1080};
1081
1082static void detached_dev_end_io(struct bio *bio)
1083{
1084        struct detached_dev_io_private *ddip;
1085
1086        ddip = bio->bi_private;
1087        bio->bi_end_io = ddip->bi_end_io;
1088        bio->bi_private = ddip->bi_private;
1089
1090        generic_end_io_acct(ddip->d->disk->queue, bio_op(bio),
1091                            &ddip->d->disk->part0, ddip->start_time);
1092
1093        if (bio->bi_status) {
1094                struct cached_dev *dc = container_of(ddip->d,
1095                                                     struct cached_dev, disk);
1096                /* should count I/O error for backing device here */
1097                bch_count_backing_io_errors(dc, bio);
1098        }
1099
1100        kfree(ddip);
1101        bio->bi_end_io(bio);
1102}
1103
1104static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
1105{
1106        struct detached_dev_io_private *ddip;
1107        struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1108
1109        /*
1110         * no need to call closure_get(&dc->disk.cl),
1111         * because upper layer had already opened bcache device,
1112         * which would call closure_get(&dc->disk.cl)
1113         */
1114        ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
1115        ddip->d = d;
1116        ddip->start_time = jiffies;
1117        ddip->bi_end_io = bio->bi_end_io;
1118        ddip->bi_private = bio->bi_private;
1119        bio->bi_end_io = detached_dev_end_io;
1120        bio->bi_private = ddip;
1121
1122        if ((bio_op(bio) == REQ_OP_DISCARD) &&
1123            !blk_queue_discard(bdev_get_queue(dc->bdev)))
1124                bio->bi_end_io(bio);
1125        else
1126                generic_make_request(bio);
1127}
1128
1129static void quit_max_writeback_rate(struct cache_set *c,
1130                                    struct cached_dev *this_dc)
1131{
1132        int i;
1133        struct bcache_device *d;
1134        struct cached_dev *dc;
1135
1136        /*
1137         * mutex bch_register_lock may compete with other parallel requesters,
1138         * or attach/detach operations on other backing device. Waiting to
1139         * the mutex lock may increase I/O request latency for seconds or more.
1140         * To avoid such situation, if mutext_trylock() failed, only writeback
1141         * rate of current cached device is set to 1, and __update_write_back()
1142         * will decide writeback rate of other cached devices (remember now
1143         * c->idle_counter is 0 already).
1144         */
1145        if (mutex_trylock(&bch_register_lock)) {
1146                for (i = 0; i < c->devices_max_used; i++) {
1147                        if (!c->devices[i])
1148                                continue;
1149
1150                        if (UUID_FLASH_ONLY(&c->uuids[i]))
1151                                continue;
1152
1153                        d = c->devices[i];
1154                        dc = container_of(d, struct cached_dev, disk);
1155                        /*
1156                         * set writeback rate to default minimum value,
1157                         * then let update_writeback_rate() to decide the
1158                         * upcoming rate.
1159                         */
1160                        atomic_long_set(&dc->writeback_rate.rate, 1);
1161                }
1162                mutex_unlock(&bch_register_lock);
1163        } else
1164                atomic_long_set(&this_dc->writeback_rate.rate, 1);
1165}
1166
1167/* Cached devices - read & write stuff */
1168
1169static blk_qc_t cached_dev_make_request(struct request_queue *q,
1170                                        struct bio *bio)
1171{
1172        struct search *s;
1173        struct bcache_device *d = bio->bi_disk->private_data;
1174        struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1175        int rw = bio_data_dir(bio);
1176
1177        if (unlikely((d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags)) ||
1178                     dc->io_disable)) {
1179                bio->bi_status = BLK_STS_IOERR;
1180                bio_endio(bio);
1181                return BLK_QC_T_NONE;
1182        }
1183
1184        if (likely(d->c)) {
1185                if (atomic_read(&d->c->idle_counter))
1186                        atomic_set(&d->c->idle_counter, 0);
1187                /*
1188                 * If at_max_writeback_rate of cache set is true and new I/O
1189                 * comes, quit max writeback rate of all cached devices
1190                 * attached to this cache set, and set at_max_writeback_rate
1191                 * to false.
1192                 */
1193                if (unlikely(atomic_read(&d->c->at_max_writeback_rate) == 1)) {
1194                        atomic_set(&d->c->at_max_writeback_rate, 0);
1195                        quit_max_writeback_rate(d->c, dc);
1196                }
1197        }
1198
1199        generic_start_io_acct(q,
1200                              bio_op(bio),
1201                              bio_sectors(bio),
1202                              &d->disk->part0);
1203
1204        bio_set_dev(bio, dc->bdev);
1205        bio->bi_iter.bi_sector += dc->sb.data_offset;
1206
1207        if (cached_dev_get(dc)) {
1208                s = search_alloc(bio, d);
1209                trace_bcache_request_start(s->d, bio);
1210
1211                if (!bio->bi_iter.bi_size) {
1212                        /*
1213                         * can't call bch_journal_meta from under
1214                         * generic_make_request
1215                         */
1216                        continue_at_nobarrier(&s->cl,
1217                                              cached_dev_nodata,
1218                                              bcache_wq);
1219                } else {
1220                        s->iop.bypass = check_should_bypass(dc, bio);
1221
1222                        if (rw)
1223                                cached_dev_write(dc, s);
1224                        else
1225                                cached_dev_read(dc, s);
1226                }
1227        } else
1228                /* I/O request sent to backing device */
1229                detached_dev_do_request(d, bio);
1230
1231        return BLK_QC_T_NONE;
1232}
1233
1234static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
1235                            unsigned int cmd, unsigned long arg)
1236{
1237        struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1238
1239        if (dc->io_disable)
1240                return -EIO;
1241
1242        return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
1243}
1244
1245static int cached_dev_congested(void *data, int bits)
1246{
1247        struct bcache_device *d = data;
1248        struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1249        struct request_queue *q = bdev_get_queue(dc->bdev);
1250        int ret = 0;
1251
1252        if (bdi_congested(q->backing_dev_info, bits))
1253                return 1;
1254
1255        if (cached_dev_get(dc)) {
1256                unsigned int i;
1257                struct cache *ca;
1258
1259                for_each_cache(ca, d->c, i) {
1260                        q = bdev_get_queue(ca->bdev);
1261                        ret |= bdi_congested(q->backing_dev_info, bits);
1262                }
1263
1264                cached_dev_put(dc);
1265        }
1266
1267        return ret;
1268}
1269
1270void bch_cached_dev_request_init(struct cached_dev *dc)
1271{
1272        struct gendisk *g = dc->disk.disk;
1273
1274        g->queue->make_request_fn               = cached_dev_make_request;
1275        g->queue->backing_dev_info->congested_fn = cached_dev_congested;
1276        dc->disk.cache_miss                     = cached_dev_cache_miss;
1277        dc->disk.ioctl                          = cached_dev_ioctl;
1278}
1279
1280/* Flash backed devices */
1281
1282static int flash_dev_cache_miss(struct btree *b, struct search *s,
1283                                struct bio *bio, unsigned int sectors)
1284{
1285        unsigned int bytes = min(sectors, bio_sectors(bio)) << 9;
1286
1287        swap(bio->bi_iter.bi_size, bytes);
1288        zero_fill_bio(bio);
1289        swap(bio->bi_iter.bi_size, bytes);
1290
1291        bio_advance(bio, bytes);
1292
1293        if (!bio->bi_iter.bi_size)
1294                return MAP_DONE;
1295
1296        return MAP_CONTINUE;
1297}
1298
1299static void flash_dev_nodata(struct closure *cl)
1300{
1301        struct search *s = container_of(cl, struct search, cl);
1302
1303        if (s->iop.flush_journal)
1304                bch_journal_meta(s->iop.c, cl);
1305
1306        continue_at(cl, search_free, NULL);
1307}
1308
1309static blk_qc_t flash_dev_make_request(struct request_queue *q,
1310                                             struct bio *bio)
1311{
1312        struct search *s;
1313        struct closure *cl;
1314        struct bcache_device *d = bio->bi_disk->private_data;
1315
1316        if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) {
1317                bio->bi_status = BLK_STS_IOERR;
1318                bio_endio(bio);
1319                return BLK_QC_T_NONE;
1320        }
1321
1322        generic_start_io_acct(q, bio_op(bio), bio_sectors(bio), &d->disk->part0);
1323
1324        s = search_alloc(bio, d);
1325        cl = &s->cl;
1326        bio = &s->bio.bio;
1327
1328        trace_bcache_request_start(s->d, bio);
1329
1330        if (!bio->bi_iter.bi_size) {
1331                /*
1332                 * can't call bch_journal_meta from under
1333                 * generic_make_request
1334                 */
1335                continue_at_nobarrier(&s->cl,
1336                                      flash_dev_nodata,
1337                                      bcache_wq);
1338                return BLK_QC_T_NONE;
1339        } else if (bio_data_dir(bio)) {
1340                bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1341                                        &KEY(d->id, bio->bi_iter.bi_sector, 0),
1342                                        &KEY(d->id, bio_end_sector(bio), 0));
1343
1344                s->iop.bypass           = (bio_op(bio) == REQ_OP_DISCARD) != 0;
1345                s->iop.writeback        = true;
1346                s->iop.bio              = bio;
1347
1348                closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1349        } else {
1350                closure_call(&s->iop.cl, cache_lookup, NULL, cl);
1351        }
1352
1353        continue_at(cl, search_free, NULL);
1354        return BLK_QC_T_NONE;
1355}
1356
1357static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1358                           unsigned int cmd, unsigned long arg)
1359{
1360        return -ENOTTY;
1361}
1362
1363static int flash_dev_congested(void *data, int bits)
1364{
1365        struct bcache_device *d = data;
1366        struct request_queue *q;
1367        struct cache *ca;
1368        unsigned int i;
1369        int ret = 0;
1370
1371        for_each_cache(ca, d->c, i) {
1372                q = bdev_get_queue(ca->bdev);
1373                ret |= bdi_congested(q->backing_dev_info, bits);
1374        }
1375
1376        return ret;
1377}
1378
1379void bch_flash_dev_request_init(struct bcache_device *d)
1380{
1381        struct gendisk *g = d->disk;
1382
1383        g->queue->make_request_fn               = flash_dev_make_request;
1384        g->queue->backing_dev_info->congested_fn = flash_dev_congested;
1385        d->cache_miss                           = flash_dev_cache_miss;
1386        d->ioctl                                = flash_dev_ioctl;
1387}
1388
1389void bch_request_exit(void)
1390{
1391        kmem_cache_destroy(bch_search_cache);
1392}
1393
1394int __init bch_request_init(void)
1395{
1396        bch_search_cache = KMEM_CACHE(search, 0);
1397        if (!bch_search_cache)
1398                return -ENOMEM;
1399
1400        return 0;
1401}
1402