linux/block/bio.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public Licens
  14 * along with this program; if not, write to the Free Software
  15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
  16 *
  17 */
  18#include <linux/mm.h>
  19#include <linux/swap.h>
  20#include <linux/bio.h>
  21#include <linux/blkdev.h>
  22#include <linux/uio.h>
  23#include <linux/iocontext.h>
  24#include <linux/slab.h>
  25#include <linux/init.h>
  26#include <linux/kernel.h>
  27#include <linux/export.h>
  28#include <linux/mempool.h>
  29#include <linux/workqueue.h>
  30#include <linux/cgroup.h>
  31#include <linux/blk-cgroup.h>
  32
  33#include <trace/events/block.h>
  34#include "blk.h"
  35#include "blk-rq-qos.h"
  36
  37/*
  38 * Test patch to inline a certain number of bi_io_vec's inside the bio
  39 * itself, to shrink a bio data allocation from two mempool calls to one
  40 */
  41#define BIO_INLINE_VECS         4
  42
  43/*
  44 * if you change this list, also change bvec_alloc or things will
  45 * break badly! cannot be bigger than what you can fit into an
  46 * unsigned short
  47 */
  48#define BV(x, n) { .nr_vecs = x, .name = "biovec-"#n }
  49static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = {
  50        BV(1, 1), BV(4, 4), BV(16, 16), BV(64, 64), BV(128, 128), BV(BIO_MAX_PAGES, max),
  51};
  52#undef BV
  53
  54/*
  55 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
  56 * IO code that does not need private memory pools.
  57 */
  58struct bio_set fs_bio_set;
  59EXPORT_SYMBOL(fs_bio_set);
  60
  61/*
  62 * Our slab pool management
  63 */
  64struct bio_slab {
  65        struct kmem_cache *slab;
  66        unsigned int slab_ref;
  67        unsigned int slab_size;
  68        char name[8];
  69};
  70static DEFINE_MUTEX(bio_slab_lock);
  71static struct bio_slab *bio_slabs;
  72static unsigned int bio_slab_nr, bio_slab_max;
  73
  74static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
  75{
  76        unsigned int sz = sizeof(struct bio) + extra_size;
  77        struct kmem_cache *slab = NULL;
  78        struct bio_slab *bslab, *new_bio_slabs;
  79        unsigned int new_bio_slab_max;
  80        unsigned int i, entry = -1;
  81
  82        mutex_lock(&bio_slab_lock);
  83
  84        i = 0;
  85        while (i < bio_slab_nr) {
  86                bslab = &bio_slabs[i];
  87
  88                if (!bslab->slab && entry == -1)
  89                        entry = i;
  90                else if (bslab->slab_size == sz) {
  91                        slab = bslab->slab;
  92                        bslab->slab_ref++;
  93                        break;
  94                }
  95                i++;
  96        }
  97
  98        if (slab)
  99                goto out_unlock;
 100
 101        if (bio_slab_nr == bio_slab_max && entry == -1) {
 102                new_bio_slab_max = bio_slab_max << 1;
 103                new_bio_slabs = krealloc(bio_slabs,
 104                                         new_bio_slab_max * sizeof(struct bio_slab),
 105                                         GFP_KERNEL);
 106                if (!new_bio_slabs)
 107                        goto out_unlock;
 108                bio_slab_max = new_bio_slab_max;
 109                bio_slabs = new_bio_slabs;
 110        }
 111        if (entry == -1)
 112                entry = bio_slab_nr++;
 113
 114        bslab = &bio_slabs[entry];
 115
 116        snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
 117        slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN,
 118                                 SLAB_HWCACHE_ALIGN, NULL);
 119        if (!slab)
 120                goto out_unlock;
 121
 122        bslab->slab = slab;
 123        bslab->slab_ref = 1;
 124        bslab->slab_size = sz;
 125out_unlock:
 126        mutex_unlock(&bio_slab_lock);
 127        return slab;
 128}
 129
 130static void bio_put_slab(struct bio_set *bs)
 131{
 132        struct bio_slab *bslab = NULL;
 133        unsigned int i;
 134
 135        mutex_lock(&bio_slab_lock);
 136
 137        for (i = 0; i < bio_slab_nr; i++) {
 138                if (bs->bio_slab == bio_slabs[i].slab) {
 139                        bslab = &bio_slabs[i];
 140                        break;
 141                }
 142        }
 143
 144        if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
 145                goto out;
 146
 147        WARN_ON(!bslab->slab_ref);
 148
 149        if (--bslab->slab_ref)
 150                goto out;
 151
 152        kmem_cache_destroy(bslab->slab);
 153        bslab->slab = NULL;
 154
 155out:
 156        mutex_unlock(&bio_slab_lock);
 157}
 158
 159unsigned int bvec_nr_vecs(unsigned short idx)
 160{
 161        return bvec_slabs[--idx].nr_vecs;
 162}
 163
 164void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
 165{
 166        if (!idx)
 167                return;
 168        idx--;
 169
 170        BIO_BUG_ON(idx >= BVEC_POOL_NR);
 171
 172        if (idx == BVEC_POOL_MAX) {
 173                mempool_free(bv, pool);
 174        } else {
 175                struct biovec_slab *bvs = bvec_slabs + idx;
 176
 177                kmem_cache_free(bvs->slab, bv);
 178        }
 179}
 180
 181struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
 182                           mempool_t *pool)
 183{
 184        struct bio_vec *bvl;
 185
 186        /*
 187         * see comment near bvec_array define!
 188         */
 189        switch (nr) {
 190        case 1:
 191                *idx = 0;
 192                break;
 193        case 2 ... 4:
 194                *idx = 1;
 195                break;
 196        case 5 ... 16:
 197                *idx = 2;
 198                break;
 199        case 17 ... 64:
 200                *idx = 3;
 201                break;
 202        case 65 ... 128:
 203                *idx = 4;
 204                break;
 205        case 129 ... BIO_MAX_PAGES:
 206                *idx = 5;
 207                break;
 208        default:
 209                return NULL;
 210        }
 211
 212        /*
 213         * idx now points to the pool we want to allocate from. only the
 214         * 1-vec entry pool is mempool backed.
 215         */
 216        if (*idx == BVEC_POOL_MAX) {
 217fallback:
 218                bvl = mempool_alloc(pool, gfp_mask);
 219        } else {
 220                struct biovec_slab *bvs = bvec_slabs + *idx;
 221                gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO);
 222
 223                /*
 224                 * Make this allocation restricted and don't dump info on
 225                 * allocation failures, since we'll fallback to the mempool
 226                 * in case of failure.
 227                 */
 228                __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
 229
 230                /*
 231                 * Try a slab allocation. If this fails and __GFP_DIRECT_RECLAIM
 232                 * is set, retry with the 1-entry mempool
 233                 */
 234                bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
 235                if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) {
 236                        *idx = BVEC_POOL_MAX;
 237                        goto fallback;
 238                }
 239        }
 240
 241        (*idx)++;
 242        return bvl;
 243}
 244
 245void bio_uninit(struct bio *bio)
 246{
 247        bio_disassociate_task(bio);
 248}
 249EXPORT_SYMBOL(bio_uninit);
 250
 251static void bio_free(struct bio *bio)
 252{
 253        struct bio_set *bs = bio->bi_pool;
 254        void *p;
 255
 256        bio_uninit(bio);
 257
 258        if (bs) {
 259                bvec_free(&bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
 260
 261                /*
 262                 * If we have front padding, adjust the bio pointer before freeing
 263                 */
 264                p = bio;
 265                p -= bs->front_pad;
 266
 267                mempool_free(p, &bs->bio_pool);
 268        } else {
 269                /* Bio was allocated by bio_kmalloc() */
 270                kfree(bio);
 271        }
 272}
 273
 274/*
 275 * Users of this function have their own bio allocation. Subsequently,
 276 * they must remember to pair any call to bio_init() with bio_uninit()
 277 * when IO has completed, or when the bio is released.
 278 */
 279void bio_init(struct bio *bio, struct bio_vec *table,
 280              unsigned short max_vecs)
 281{
 282        memset(bio, 0, sizeof(*bio));
 283        atomic_set(&bio->__bi_remaining, 1);
 284        atomic_set(&bio->__bi_cnt, 1);
 285
 286        bio->bi_io_vec = table;
 287        bio->bi_max_vecs = max_vecs;
 288}
 289EXPORT_SYMBOL(bio_init);
 290
 291/**
 292 * bio_reset - reinitialize a bio
 293 * @bio:        bio to reset
 294 *
 295 * Description:
 296 *   After calling bio_reset(), @bio will be in the same state as a freshly
 297 *   allocated bio returned bio bio_alloc_bioset() - the only fields that are
 298 *   preserved are the ones that are initialized by bio_alloc_bioset(). See
 299 *   comment in struct bio.
 300 */
 301void bio_reset(struct bio *bio)
 302{
 303        unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
 304
 305        bio_uninit(bio);
 306
 307        memset(bio, 0, BIO_RESET_BYTES);
 308        bio->bi_flags = flags;
 309        atomic_set(&bio->__bi_remaining, 1);
 310}
 311EXPORT_SYMBOL(bio_reset);
 312
 313static struct bio *__bio_chain_endio(struct bio *bio)
 314{
 315        struct bio *parent = bio->bi_private;
 316
 317        if (!parent->bi_status)
 318                parent->bi_status = bio->bi_status;
 319        bio_put(bio);
 320        return parent;
 321}
 322
 323static void bio_chain_endio(struct bio *bio)
 324{
 325        bio_endio(__bio_chain_endio(bio));
 326}
 327
 328/**
 329 * bio_chain - chain bio completions
 330 * @bio: the target bio
 331 * @parent: the @bio's parent bio
 332 *
 333 * The caller won't have a bi_end_io called when @bio completes - instead,
 334 * @parent's bi_end_io won't be called until both @parent and @bio have
 335 * completed; the chained bio will also be freed when it completes.
 336 *
 337 * The caller must not set bi_private or bi_end_io in @bio.
 338 */
 339void bio_chain(struct bio *bio, struct bio *parent)
 340{
 341        BUG_ON(bio->bi_private || bio->bi_end_io);
 342
 343        bio->bi_private = parent;
 344        bio->bi_end_io  = bio_chain_endio;
 345        bio_inc_remaining(parent);
 346}
 347EXPORT_SYMBOL(bio_chain);
 348
 349static void bio_alloc_rescue(struct work_struct *work)
 350{
 351        struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
 352        struct bio *bio;
 353
 354        while (1) {
 355                spin_lock(&bs->rescue_lock);
 356                bio = bio_list_pop(&bs->rescue_list);
 357                spin_unlock(&bs->rescue_lock);
 358
 359                if (!bio)
 360                        break;
 361
 362                generic_make_request(bio);
 363        }
 364}
 365
 366static void punt_bios_to_rescuer(struct bio_set *bs)
 367{
 368        struct bio_list punt, nopunt;
 369        struct bio *bio;
 370
 371        if (WARN_ON_ONCE(!bs->rescue_workqueue))
 372                return;
 373        /*
 374         * In order to guarantee forward progress we must punt only bios that
 375         * were allocated from this bio_set; otherwise, if there was a bio on
 376         * there for a stacking driver higher up in the stack, processing it
 377         * could require allocating bios from this bio_set, and doing that from
 378         * our own rescuer would be bad.
 379         *
 380         * Since bio lists are singly linked, pop them all instead of trying to
 381         * remove from the middle of the list:
 382         */
 383
 384        bio_list_init(&punt);
 385        bio_list_init(&nopunt);
 386
 387        while ((bio = bio_list_pop(&current->bio_list[0])))
 388                bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
 389        current->bio_list[0] = nopunt;
 390
 391        bio_list_init(&nopunt);
 392        while ((bio = bio_list_pop(&current->bio_list[1])))
 393                bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
 394        current->bio_list[1] = nopunt;
 395
 396        spin_lock(&bs->rescue_lock);
 397        bio_list_merge(&bs->rescue_list, &punt);
 398        spin_unlock(&bs->rescue_lock);
 399
 400        queue_work(bs->rescue_workqueue, &bs->rescue_work);
 401}
 402
 403/**
 404 * bio_alloc_bioset - allocate a bio for I/O
 405 * @gfp_mask:   the GFP_* mask given to the slab allocator
 406 * @nr_iovecs:  number of iovecs to pre-allocate
 407 * @bs:         the bio_set to allocate from.
 408 *
 409 * Description:
 410 *   If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
 411 *   backed by the @bs's mempool.
 412 *
 413 *   When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will
 414 *   always be able to allocate a bio. This is due to the mempool guarantees.
 415 *   To make this work, callers must never allocate more than 1 bio at a time
 416 *   from this pool. Callers that need to allocate more than 1 bio must always
 417 *   submit the previously allocated bio for IO before attempting to allocate
 418 *   a new one. Failure to do so can cause deadlocks under memory pressure.
 419 *
 420 *   Note that when running under generic_make_request() (i.e. any block
 421 *   driver), bios are not submitted until after you return - see the code in
 422 *   generic_make_request() that converts recursion into iteration, to prevent
 423 *   stack overflows.
 424 *
 425 *   This would normally mean allocating multiple bios under
 426 *   generic_make_request() would be susceptible to deadlocks, but we have
 427 *   deadlock avoidance code that resubmits any blocked bios from a rescuer
 428 *   thread.
 429 *
 430 *   However, we do not guarantee forward progress for allocations from other
 431 *   mempools. Doing multiple allocations from the same mempool under
 432 *   generic_make_request() should be avoided - instead, use bio_set's front_pad
 433 *   for per bio allocations.
 434 *
 435 *   RETURNS:
 436 *   Pointer to new bio on success, NULL on failure.
 437 */
 438struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
 439                             struct bio_set *bs)
 440{
 441        gfp_t saved_gfp = gfp_mask;
 442        unsigned front_pad;
 443        unsigned inline_vecs;
 444        struct bio_vec *bvl = NULL;
 445        struct bio *bio;
 446        void *p;
 447
 448        if (!bs) {
 449                if (nr_iovecs > UIO_MAXIOV)
 450                        return NULL;
 451
 452                p = kmalloc(sizeof(struct bio) +
 453                            nr_iovecs * sizeof(struct bio_vec),
 454                            gfp_mask);
 455                front_pad = 0;
 456                inline_vecs = nr_iovecs;
 457        } else {
 458                /* should not use nobvec bioset for nr_iovecs > 0 */
 459                if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) &&
 460                                 nr_iovecs > 0))
 461                        return NULL;
 462                /*
 463                 * generic_make_request() converts recursion to iteration; this
 464                 * means if we're running beneath it, any bios we allocate and
 465                 * submit will not be submitted (and thus freed) until after we
 466                 * return.
 467                 *
 468                 * This exposes us to a potential deadlock if we allocate
 469                 * multiple bios from the same bio_set() while running
 470                 * underneath generic_make_request(). If we were to allocate
 471                 * multiple bios (say a stacking block driver that was splitting
 472                 * bios), we would deadlock if we exhausted the mempool's
 473                 * reserve.
 474                 *
 475                 * We solve this, and guarantee forward progress, with a rescuer
 476                 * workqueue per bio_set. If we go to allocate and there are
 477                 * bios on current->bio_list, we first try the allocation
 478                 * without __GFP_DIRECT_RECLAIM; if that fails, we punt those
 479                 * bios we would be blocking to the rescuer workqueue before
 480                 * we retry with the original gfp_flags.
 481                 */
 482
 483                if (current->bio_list &&
 484                    (!bio_list_empty(&current->bio_list[0]) ||
 485                     !bio_list_empty(&current->bio_list[1])) &&
 486                    bs->rescue_workqueue)
 487                        gfp_mask &= ~__GFP_DIRECT_RECLAIM;
 488
 489                p = mempool_alloc(&bs->bio_pool, gfp_mask);
 490                if (!p && gfp_mask != saved_gfp) {
 491                        punt_bios_to_rescuer(bs);
 492                        gfp_mask = saved_gfp;
 493                        p = mempool_alloc(&bs->bio_pool, gfp_mask);
 494                }
 495
 496                front_pad = bs->front_pad;
 497                inline_vecs = BIO_INLINE_VECS;
 498        }
 499
 500        if (unlikely(!p))
 501                return NULL;
 502
 503        bio = p + front_pad;
 504        bio_init(bio, NULL, 0);
 505
 506        if (nr_iovecs > inline_vecs) {
 507                unsigned long idx = 0;
 508
 509                bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
 510                if (!bvl && gfp_mask != saved_gfp) {
 511                        punt_bios_to_rescuer(bs);
 512                        gfp_mask = saved_gfp;
 513                        bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
 514                }
 515
 516                if (unlikely(!bvl))
 517                        goto err_free;
 518
 519                bio->bi_flags |= idx << BVEC_POOL_OFFSET;
 520        } else if (nr_iovecs) {
 521                bvl = bio->bi_inline_vecs;
 522        }
 523
 524        bio->bi_pool = bs;
 525        bio->bi_max_vecs = nr_iovecs;
 526        bio->bi_io_vec = bvl;
 527        return bio;
 528
 529err_free:
 530        mempool_free(p, &bs->bio_pool);
 531        return NULL;
 532}
 533EXPORT_SYMBOL(bio_alloc_bioset);
 534
 535void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
 536{
 537        unsigned long flags;
 538        struct bio_vec bv;
 539        struct bvec_iter iter;
 540
 541        __bio_for_each_segment(bv, bio, iter, start) {
 542                char *data = bvec_kmap_irq(&bv, &flags);
 543                memset(data, 0, bv.bv_len);
 544                flush_dcache_page(bv.bv_page);
 545                bvec_kunmap_irq(data, &flags);
 546        }
 547}
 548EXPORT_SYMBOL(zero_fill_bio_iter);
 549
 550/**
 551 * bio_put - release a reference to a bio
 552 * @bio:   bio to release reference to
 553 *
 554 * Description:
 555 *   Put a reference to a &struct bio, either one you have gotten with
 556 *   bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
 557 **/
 558void bio_put(struct bio *bio)
 559{
 560        if (!bio_flagged(bio, BIO_REFFED))
 561                bio_free(bio);
 562        else {
 563                BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
 564
 565                /*
 566                 * last put frees it
 567                 */
 568                if (atomic_dec_and_test(&bio->__bi_cnt))
 569                        bio_free(bio);
 570        }
 571}
 572EXPORT_SYMBOL(bio_put);
 573
 574inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
 575{
 576        if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
 577                blk_recount_segments(q, bio);
 578
 579        return bio->bi_phys_segments;
 580}
 581EXPORT_SYMBOL(bio_phys_segments);
 582
 583/**
 584 *      __bio_clone_fast - clone a bio that shares the original bio's biovec
 585 *      @bio: destination bio
 586 *      @bio_src: bio to clone
 587 *
 588 *      Clone a &bio. Caller will own the returned bio, but not
 589 *      the actual data it points to. Reference count of returned
 590 *      bio will be one.
 591 *
 592 *      Caller must ensure that @bio_src is not freed before @bio.
 593 */
 594void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
 595{
 596        BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
 597
 598        /*
 599         * most users will be overriding ->bi_disk with a new target,
 600         * so we don't set nor calculate new physical/hw segment counts here
 601         */
 602        bio->bi_disk = bio_src->bi_disk;
 603        bio->bi_partno = bio_src->bi_partno;
 604        bio_set_flag(bio, BIO_CLONED);
 605        if (bio_flagged(bio_src, BIO_THROTTLED))
 606                bio_set_flag(bio, BIO_THROTTLED);
 607        bio->bi_opf = bio_src->bi_opf;
 608        bio->bi_ioprio = bio_src->bi_ioprio;
 609        bio->bi_write_hint = bio_src->bi_write_hint;
 610        bio->bi_iter = bio_src->bi_iter;
 611        bio->bi_io_vec = bio_src->bi_io_vec;
 612
 613        bio_clone_blkcg_association(bio, bio_src);
 614}
 615EXPORT_SYMBOL(__bio_clone_fast);
 616
 617/**
 618 *      bio_clone_fast - clone a bio that shares the original bio's biovec
 619 *      @bio: bio to clone
 620 *      @gfp_mask: allocation priority
 621 *      @bs: bio_set to allocate from
 622 *
 623 *      Like __bio_clone_fast, only also allocates the returned bio
 624 */
 625struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
 626{
 627        struct bio *b;
 628
 629        b = bio_alloc_bioset(gfp_mask, 0, bs);
 630        if (!b)
 631                return NULL;
 632
 633        __bio_clone_fast(b, bio);
 634
 635        if (bio_integrity(bio)) {
 636                int ret;
 637
 638                ret = bio_integrity_clone(b, bio, gfp_mask);
 639
 640                if (ret < 0) {
 641                        bio_put(b);
 642                        return NULL;
 643                }
 644        }
 645
 646        return b;
 647}
 648EXPORT_SYMBOL(bio_clone_fast);
 649
 650/**
 651 *      bio_add_pc_page -       attempt to add page to bio
 652 *      @q: the target queue
 653 *      @bio: destination bio
 654 *      @page: page to add
 655 *      @len: vec entry length
 656 *      @offset: vec entry offset
 657 *
 658 *      Attempt to add a page to the bio_vec maplist. This can fail for a
 659 *      number of reasons, such as the bio being full or target block device
 660 *      limitations. The target block device must allow bio's up to PAGE_SIZE,
 661 *      so it is always possible to add a single page to an empty bio.
 662 *
 663 *      This should only be used by REQ_PC bios.
 664 */
 665int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
 666                    *page, unsigned int len, unsigned int offset)
 667{
 668        int retried_segments = 0;
 669        struct bio_vec *bvec;
 670
 671        /*
 672         * cloned bio must not modify vec list
 673         */
 674        if (unlikely(bio_flagged(bio, BIO_CLONED)))
 675                return 0;
 676
 677        if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q))
 678                return 0;
 679
 680        /*
 681         * For filesystems with a blocksize smaller than the pagesize
 682         * we will often be called with the same page as last time and
 683         * a consecutive offset.  Optimize this special case.
 684         */
 685        if (bio->bi_vcnt > 0) {
 686                struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
 687
 688                if (page == prev->bv_page &&
 689                    offset == prev->bv_offset + prev->bv_len) {
 690                        prev->bv_len += len;
 691                        bio->bi_iter.bi_size += len;
 692                        goto done;
 693                }
 694
 695                /*
 696                 * If the queue doesn't support SG gaps and adding this
 697                 * offset would create a gap, disallow it.
 698                 */
 699                if (bvec_gap_to_prev(q, prev, offset))
 700                        return 0;
 701        }
 702
 703        if (bio_full(bio))
 704                return 0;
 705
 706        /*
 707         * setup the new entry, we might clear it again later if we
 708         * cannot add the page
 709         */
 710        bvec = &bio->bi_io_vec[bio->bi_vcnt];
 711        bvec->bv_page = page;
 712        bvec->bv_len = len;
 713        bvec->bv_offset = offset;
 714        bio->bi_vcnt++;
 715        bio->bi_phys_segments++;
 716        bio->bi_iter.bi_size += len;
 717
 718        /*
 719         * Perform a recount if the number of segments is greater
 720         * than queue_max_segments(q).
 721         */
 722
 723        while (bio->bi_phys_segments > queue_max_segments(q)) {
 724
 725                if (retried_segments)
 726                        goto failed;
 727
 728                retried_segments = 1;
 729                blk_recount_segments(q, bio);
 730        }
 731
 732        /* If we may be able to merge these biovecs, force a recount */
 733        if (bio->bi_vcnt > 1 && biovec_phys_mergeable(q, bvec - 1, bvec))
 734                bio_clear_flag(bio, BIO_SEG_VALID);
 735
 736 done:
 737        return len;
 738
 739 failed:
 740        bvec->bv_page = NULL;
 741        bvec->bv_len = 0;
 742        bvec->bv_offset = 0;
 743        bio->bi_vcnt--;
 744        bio->bi_iter.bi_size -= len;
 745        blk_recount_segments(q, bio);
 746        return 0;
 747}
 748EXPORT_SYMBOL(bio_add_pc_page);
 749
 750/**
 751 * __bio_try_merge_page - try appending data to an existing bvec.
 752 * @bio: destination bio
 753 * @page: page to add
 754 * @len: length of the data to add
 755 * @off: offset of the data in @page
 756 *
 757 * Try to add the data at @page + @off to the last bvec of @bio.  This is a
 758 * a useful optimisation for file systems with a block size smaller than the
 759 * page size.
 760 *
 761 * Return %true on success or %false on failure.
 762 */
 763bool __bio_try_merge_page(struct bio *bio, struct page *page,
 764                unsigned int len, unsigned int off)
 765{
 766        if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
 767                return false;
 768
 769        if (bio->bi_vcnt > 0) {
 770                struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
 771
 772                if (page == bv->bv_page && off == bv->bv_offset + bv->bv_len) {
 773                        bv->bv_len += len;
 774                        bio->bi_iter.bi_size += len;
 775                        return true;
 776                }
 777        }
 778        return false;
 779}
 780EXPORT_SYMBOL_GPL(__bio_try_merge_page);
 781
 782/**
 783 * __bio_add_page - add page to a bio in a new segment
 784 * @bio: destination bio
 785 * @page: page to add
 786 * @len: length of the data to add
 787 * @off: offset of the data in @page
 788 *
 789 * Add the data at @page + @off to @bio as a new bvec.  The caller must ensure
 790 * that @bio has space for another bvec.
 791 */
 792void __bio_add_page(struct bio *bio, struct page *page,
 793                unsigned int len, unsigned int off)
 794{
 795        struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
 796
 797        WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
 798        WARN_ON_ONCE(bio_full(bio));
 799
 800        bv->bv_page = page;
 801        bv->bv_offset = off;
 802        bv->bv_len = len;
 803
 804        bio->bi_iter.bi_size += len;
 805        bio->bi_vcnt++;
 806}
 807EXPORT_SYMBOL_GPL(__bio_add_page);
 808
 809/**
 810 *      bio_add_page    -       attempt to add page to bio
 811 *      @bio: destination bio
 812 *      @page: page to add
 813 *      @len: vec entry length
 814 *      @offset: vec entry offset
 815 *
 816 *      Attempt to add a page to the bio_vec maplist. This will only fail
 817 *      if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
 818 */
 819int bio_add_page(struct bio *bio, struct page *page,
 820                 unsigned int len, unsigned int offset)
 821{
 822        if (!__bio_try_merge_page(bio, page, len, offset)) {
 823                if (bio_full(bio))
 824                        return 0;
 825                __bio_add_page(bio, page, len, offset);
 826        }
 827        return len;
 828}
 829EXPORT_SYMBOL(bio_add_page);
 830
 831#define PAGE_PTRS_PER_BVEC     (sizeof(struct bio_vec) / sizeof(struct page *))
 832
 833/**
 834 * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
 835 * @bio: bio to add pages to
 836 * @iter: iov iterator describing the region to be mapped
 837 *
 838 * Pins pages from *iter and appends them to @bio's bvec array. The
 839 * pages will have to be released using put_page() when done.
 840 * For multi-segment *iter, this function only adds pages from the
 841 * the next non-empty segment of the iov iterator.
 842 */
 843static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
 844{
 845        unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
 846        unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
 847        struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
 848        struct page **pages = (struct page **)bv;
 849        ssize_t size, left;
 850        unsigned len, i;
 851        size_t offset;
 852
 853        /*
 854         * Move page array up in the allocated memory for the bio vecs as far as
 855         * possible so that we can start filling biovecs from the beginning
 856         * without overwriting the temporary page array.
 857        */
 858        BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
 859        pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
 860
 861        size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
 862        if (unlikely(size <= 0))
 863                return size ? size : -EFAULT;
 864
 865        for (left = size, i = 0; left > 0; left -= len, i++) {
 866                struct page *page = pages[i];
 867
 868                len = min_t(size_t, PAGE_SIZE - offset, left);
 869                if (WARN_ON_ONCE(bio_add_page(bio, page, len, offset) != len))
 870                        return -EINVAL;
 871                offset = 0;
 872        }
 873
 874        iov_iter_advance(iter, size);
 875        return 0;
 876}
 877
 878/**
 879 * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
 880 * @bio: bio to add pages to
 881 * @iter: iov iterator describing the region to be mapped
 882 *
 883 * Pins pages from *iter and appends them to @bio's bvec array. The
 884 * pages will have to be released using put_page() when done.
 885 * The function tries, but does not guarantee, to pin as many pages as
 886 * fit into the bio, or are requested in *iter, whatever is smaller.
 887 * If MM encounters an error pinning the requested pages, it stops.
 888 * Error is returned only if 0 pages could be pinned.
 889 */
 890int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
 891{
 892        unsigned short orig_vcnt = bio->bi_vcnt;
 893
 894        do {
 895                int ret = __bio_iov_iter_get_pages(bio, iter);
 896
 897                if (unlikely(ret))
 898                        return bio->bi_vcnt > orig_vcnt ? 0 : ret;
 899
 900        } while (iov_iter_count(iter) && !bio_full(bio));
 901
 902        return 0;
 903}
 904EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
 905
 906static void submit_bio_wait_endio(struct bio *bio)
 907{
 908        complete(bio->bi_private);
 909}
 910
 911/**
 912 * submit_bio_wait - submit a bio, and wait until it completes
 913 * @bio: The &struct bio which describes the I/O
 914 *
 915 * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
 916 * bio_endio() on failure.
 917 *
 918 * WARNING: Unlike to how submit_bio() is usually used, this function does not
 919 * result in bio reference to be consumed. The caller must drop the reference
 920 * on his own.
 921 */
 922int submit_bio_wait(struct bio *bio)
 923{
 924        DECLARE_COMPLETION_ONSTACK_MAP(done, bio->bi_disk->lockdep_map);
 925
 926        bio->bi_private = &done;
 927        bio->bi_end_io = submit_bio_wait_endio;
 928        bio->bi_opf |= REQ_SYNC;
 929        submit_bio(bio);
 930        wait_for_completion_io(&done);
 931
 932        return blk_status_to_errno(bio->bi_status);
 933}
 934EXPORT_SYMBOL(submit_bio_wait);
 935
 936/**
 937 * bio_advance - increment/complete a bio by some number of bytes
 938 * @bio:        bio to advance
 939 * @bytes:      number of bytes to complete
 940 *
 941 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
 942 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
 943 * be updated on the last bvec as well.
 944 *
 945 * @bio will then represent the remaining, uncompleted portion of the io.
 946 */
 947void bio_advance(struct bio *bio, unsigned bytes)
 948{
 949        if (bio_integrity(bio))
 950                bio_integrity_advance(bio, bytes);
 951
 952        bio_advance_iter(bio, &bio->bi_iter, bytes);
 953}
 954EXPORT_SYMBOL(bio_advance);
 955
 956void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
 957                        struct bio *src, struct bvec_iter *src_iter)
 958{
 959        struct bio_vec src_bv, dst_bv;
 960        void *src_p, *dst_p;
 961        unsigned bytes;
 962
 963        while (src_iter->bi_size && dst_iter->bi_size) {
 964                src_bv = bio_iter_iovec(src, *src_iter);
 965                dst_bv = bio_iter_iovec(dst, *dst_iter);
 966
 967                bytes = min(src_bv.bv_len, dst_bv.bv_len);
 968
 969                src_p = kmap_atomic(src_bv.bv_page);
 970                dst_p = kmap_atomic(dst_bv.bv_page);
 971
 972                memcpy(dst_p + dst_bv.bv_offset,
 973                       src_p + src_bv.bv_offset,
 974                       bytes);
 975
 976                kunmap_atomic(dst_p);
 977                kunmap_atomic(src_p);
 978
 979                flush_dcache_page(dst_bv.bv_page);
 980
 981                bio_advance_iter(src, src_iter, bytes);
 982                bio_advance_iter(dst, dst_iter, bytes);
 983        }
 984}
 985EXPORT_SYMBOL(bio_copy_data_iter);
 986
 987/**
 988 * bio_copy_data - copy contents of data buffers from one bio to another
 989 * @src: source bio
 990 * @dst: destination bio
 991 *
 992 * Stops when it reaches the end of either @src or @dst - that is, copies
 993 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
 994 */
 995void bio_copy_data(struct bio *dst, struct bio *src)
 996{
 997        struct bvec_iter src_iter = src->bi_iter;
 998        struct bvec_iter dst_iter = dst->bi_iter;
 999
1000        bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1001}
1002EXPORT_SYMBOL(bio_copy_data);
1003
1004/**
1005 * bio_list_copy_data - copy contents of data buffers from one chain of bios to
1006 * another
1007 * @src: source bio list
1008 * @dst: destination bio list
1009 *
1010 * Stops when it reaches the end of either the @src list or @dst list - that is,
1011 * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of
1012 * bios).
1013 */
1014void bio_list_copy_data(struct bio *dst, struct bio *src)
1015{
1016        struct bvec_iter src_iter = src->bi_iter;
1017        struct bvec_iter dst_iter = dst->bi_iter;
1018
1019        while (1) {
1020                if (!src_iter.bi_size) {
1021                        src = src->bi_next;
1022                        if (!src)
1023                                break;
1024
1025                        src_iter = src->bi_iter;
1026                }
1027
1028                if (!dst_iter.bi_size) {
1029                        dst = dst->bi_next;
1030                        if (!dst)
1031                                break;
1032
1033                        dst_iter = dst->bi_iter;
1034                }
1035
1036                bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1037        }
1038}
1039EXPORT_SYMBOL(bio_list_copy_data);
1040
1041struct bio_map_data {
1042        int is_our_pages;
1043        struct iov_iter iter;
1044        struct iovec iov[];
1045};
1046
1047static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
1048                                               gfp_t gfp_mask)
1049{
1050        struct bio_map_data *bmd;
1051        if (data->nr_segs > UIO_MAXIOV)
1052                return NULL;
1053
1054        bmd = kmalloc(sizeof(struct bio_map_data) +
1055                       sizeof(struct iovec) * data->nr_segs, gfp_mask);
1056        if (!bmd)
1057                return NULL;
1058        memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs);
1059        bmd->iter = *data;
1060        bmd->iter.iov = bmd->iov;
1061        return bmd;
1062}
1063
1064/**
1065 * bio_copy_from_iter - copy all pages from iov_iter to bio
1066 * @bio: The &struct bio which describes the I/O as destination
1067 * @iter: iov_iter as source
1068 *
1069 * Copy all pages from iov_iter to bio.
1070 * Returns 0 on success, or error on failure.
1071 */
1072static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
1073{
1074        int i;
1075        struct bio_vec *bvec;
1076
1077        bio_for_each_segment_all(bvec, bio, i) {
1078                ssize_t ret;
1079
1080                ret = copy_page_from_iter(bvec->bv_page,
1081                                          bvec->bv_offset,
1082                                          bvec->bv_len,
1083                                          iter);
1084
1085                if (!iov_iter_count(iter))
1086                        break;
1087
1088                if (ret < bvec->bv_len)
1089                        return -EFAULT;
1090        }
1091
1092        return 0;
1093}
1094
1095/**
1096 * bio_copy_to_iter - copy all pages from bio to iov_iter
1097 * @bio: The &struct bio which describes the I/O as source
1098 * @iter: iov_iter as destination
1099 *
1100 * Copy all pages from bio to iov_iter.
1101 * Returns 0 on success, or error on failure.
1102 */
1103static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
1104{
1105        int i;
1106        struct bio_vec *bvec;
1107
1108        bio_for_each_segment_all(bvec, bio, i) {
1109                ssize_t ret;
1110
1111                ret = copy_page_to_iter(bvec->bv_page,
1112                                        bvec->bv_offset,
1113                                        bvec->bv_len,
1114                                        &iter);
1115
1116                if (!iov_iter_count(&iter))
1117                        break;
1118
1119                if (ret < bvec->bv_len)
1120                        return -EFAULT;
1121        }
1122
1123        return 0;
1124}
1125
1126void bio_free_pages(struct bio *bio)
1127{
1128        struct bio_vec *bvec;
1129        int i;
1130
1131        bio_for_each_segment_all(bvec, bio, i)
1132                __free_page(bvec->bv_page);
1133}
1134EXPORT_SYMBOL(bio_free_pages);
1135
1136/**
1137 *      bio_uncopy_user -       finish previously mapped bio
1138 *      @bio: bio being terminated
1139 *
1140 *      Free pages allocated from bio_copy_user_iov() and write back data
1141 *      to user space in case of a read.
1142 */
1143int bio_uncopy_user(struct bio *bio)
1144{
1145        struct bio_map_data *bmd = bio->bi_private;
1146        int ret = 0;
1147
1148        if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1149                /*
1150                 * if we're in a workqueue, the request is orphaned, so
1151                 * don't copy into a random user address space, just free
1152                 * and return -EINTR so user space doesn't expect any data.
1153                 */
1154                if (!current->mm)
1155                        ret = -EINTR;
1156                else if (bio_data_dir(bio) == READ)
1157                        ret = bio_copy_to_iter(bio, bmd->iter);
1158                if (bmd->is_our_pages)
1159                        bio_free_pages(bio);
1160        }
1161        kfree(bmd);
1162        bio_put(bio);
1163        return ret;
1164}
1165
1166/**
1167 *      bio_copy_user_iov       -       copy user data to bio
1168 *      @q:             destination block queue
1169 *      @map_data:      pointer to the rq_map_data holding pages (if necessary)
1170 *      @iter:          iovec iterator
1171 *      @gfp_mask:      memory allocation flags
1172 *
1173 *      Prepares and returns a bio for indirect user io, bouncing data
1174 *      to/from kernel pages as necessary. Must be paired with
1175 *      call bio_uncopy_user() on io completion.
1176 */
1177struct bio *bio_copy_user_iov(struct request_queue *q,
1178                              struct rq_map_data *map_data,
1179                              struct iov_iter *iter,
1180                              gfp_t gfp_mask)
1181{
1182        struct bio_map_data *bmd;
1183        struct page *page;
1184        struct bio *bio;
1185        int i = 0, ret;
1186        int nr_pages;
1187        unsigned int len = iter->count;
1188        unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
1189
1190        bmd = bio_alloc_map_data(iter, gfp_mask);
1191        if (!bmd)
1192                return ERR_PTR(-ENOMEM);
1193
1194        /*
1195         * We need to do a deep copy of the iov_iter including the iovecs.
1196         * The caller provided iov might point to an on-stack or otherwise
1197         * shortlived one.
1198         */
1199        bmd->is_our_pages = map_data ? 0 : 1;
1200
1201        nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
1202        if (nr_pages > BIO_MAX_PAGES)
1203                nr_pages = BIO_MAX_PAGES;
1204
1205        ret = -ENOMEM;
1206        bio = bio_kmalloc(gfp_mask, nr_pages);
1207        if (!bio)
1208                goto out_bmd;
1209
1210        ret = 0;
1211
1212        if (map_data) {
1213                nr_pages = 1 << map_data->page_order;
1214                i = map_data->offset / PAGE_SIZE;
1215        }
1216        while (len) {
1217                unsigned int bytes = PAGE_SIZE;
1218
1219                bytes -= offset;
1220
1221                if (bytes > len)
1222                        bytes = len;
1223
1224                if (map_data) {
1225                        if (i == map_data->nr_entries * nr_pages) {
1226                                ret = -ENOMEM;
1227                                break;
1228                        }
1229
1230                        page = map_data->pages[i / nr_pages];
1231                        page += (i % nr_pages);
1232
1233                        i++;
1234                } else {
1235                        page = alloc_page(q->bounce_gfp | gfp_mask);
1236                        if (!page) {
1237                                ret = -ENOMEM;
1238                                break;
1239                        }
1240                }
1241
1242                if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
1243                        break;
1244
1245                len -= bytes;
1246                offset = 0;
1247        }
1248
1249        if (ret)
1250                goto cleanup;
1251
1252        if (map_data)
1253                map_data->offset += bio->bi_iter.bi_size;
1254
1255        /*
1256         * success
1257         */
1258        if ((iov_iter_rw(iter) == WRITE && (!map_data || !map_data->null_mapped)) ||
1259            (map_data && map_data->from_user)) {
1260                ret = bio_copy_from_iter(bio, iter);
1261                if (ret)
1262                        goto cleanup;
1263        } else {
1264                if (bmd->is_our_pages)
1265                        zero_fill_bio(bio);
1266                iov_iter_advance(iter, bio->bi_iter.bi_size);
1267        }
1268
1269        bio->bi_private = bmd;
1270        if (map_data && map_data->null_mapped)
1271                bio_set_flag(bio, BIO_NULL_MAPPED);
1272        return bio;
1273cleanup:
1274        if (!map_data)
1275                bio_free_pages(bio);
1276        bio_put(bio);
1277out_bmd:
1278        kfree(bmd);
1279        return ERR_PTR(ret);
1280}
1281
1282/**
1283 *      bio_map_user_iov - map user iovec into bio
1284 *      @q:             the struct request_queue for the bio
1285 *      @iter:          iovec iterator
1286 *      @gfp_mask:      memory allocation flags
1287 *
1288 *      Map the user space address into a bio suitable for io to a block
1289 *      device. Returns an error pointer in case of error.
1290 */
1291struct bio *bio_map_user_iov(struct request_queue *q,
1292                             struct iov_iter *iter,
1293                             gfp_t gfp_mask)
1294{
1295        int j;
1296        struct bio *bio;
1297        int ret;
1298        struct bio_vec *bvec;
1299
1300        if (!iov_iter_count(iter))
1301                return ERR_PTR(-EINVAL);
1302
1303        bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES));
1304        if (!bio)
1305                return ERR_PTR(-ENOMEM);
1306
1307        while (iov_iter_count(iter)) {
1308                struct page **pages;
1309                ssize_t bytes;
1310                size_t offs, added = 0;
1311                int npages;
1312
1313                bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs);
1314                if (unlikely(bytes <= 0)) {
1315                        ret = bytes ? bytes : -EFAULT;
1316                        goto out_unmap;
1317                }
1318
1319                npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
1320
1321                if (unlikely(offs & queue_dma_alignment(q))) {
1322                        ret = -EINVAL;
1323                        j = 0;
1324                } else {
1325                        for (j = 0; j < npages; j++) {
1326                                struct page *page = pages[j];
1327                                unsigned int n = PAGE_SIZE - offs;
1328                                unsigned short prev_bi_vcnt = bio->bi_vcnt;
1329
1330                                if (n > bytes)
1331                                        n = bytes;
1332
1333                                if (!bio_add_pc_page(q, bio, page, n, offs))
1334                                        break;
1335
1336                                /*
1337                                 * check if vector was merged with previous
1338                                 * drop page reference if needed
1339                                 */
1340                                if (bio->bi_vcnt == prev_bi_vcnt)
1341                                        put_page(page);
1342
1343                                added += n;
1344                                bytes -= n;
1345                                offs = 0;
1346                        }
1347                        iov_iter_advance(iter, added);
1348                }
1349                /*
1350                 * release the pages we didn't map into the bio, if any
1351                 */
1352                while (j < npages)
1353                        put_page(pages[j++]);
1354                kvfree(pages);
1355                /* couldn't stuff something into bio? */
1356                if (bytes)
1357                        break;
1358        }
1359
1360        bio_set_flag(bio, BIO_USER_MAPPED);
1361
1362        /*
1363         * subtle -- if bio_map_user_iov() ended up bouncing a bio,
1364         * it would normally disappear when its bi_end_io is run.
1365         * however, we need it for the unmap, so grab an extra
1366         * reference to it
1367         */
1368        bio_get(bio);
1369        return bio;
1370
1371 out_unmap:
1372        bio_for_each_segment_all(bvec, bio, j) {
1373                put_page(bvec->bv_page);
1374        }
1375        bio_put(bio);
1376        return ERR_PTR(ret);
1377}
1378
1379static void __bio_unmap_user(struct bio *bio)
1380{
1381        struct bio_vec *bvec;
1382        int i;
1383
1384        /*
1385         * make sure we dirty pages we wrote to
1386         */
1387        bio_for_each_segment_all(bvec, bio, i) {
1388                if (bio_data_dir(bio) == READ)
1389                        set_page_dirty_lock(bvec->bv_page);
1390
1391                put_page(bvec->bv_page);
1392        }
1393
1394        bio_put(bio);
1395}
1396
1397/**
1398 *      bio_unmap_user  -       unmap a bio
1399 *      @bio:           the bio being unmapped
1400 *
1401 *      Unmap a bio previously mapped by bio_map_user_iov(). Must be called from
1402 *      process context.
1403 *
1404 *      bio_unmap_user() may sleep.
1405 */
1406void bio_unmap_user(struct bio *bio)
1407{
1408        __bio_unmap_user(bio);
1409        bio_put(bio);
1410}
1411
1412static void bio_map_kern_endio(struct bio *bio)
1413{
1414        bio_put(bio);
1415}
1416
1417/**
1418 *      bio_map_kern    -       map kernel address into bio
1419 *      @q: the struct request_queue for the bio
1420 *      @data: pointer to buffer to map
1421 *      @len: length in bytes
1422 *      @gfp_mask: allocation flags for bio allocation
1423 *
1424 *      Map the kernel address into a bio suitable for io to a block
1425 *      device. Returns an error pointer in case of error.
1426 */
1427struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
1428                         gfp_t gfp_mask)
1429{
1430        unsigned long kaddr = (unsigned long)data;
1431        unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1432        unsigned long start = kaddr >> PAGE_SHIFT;
1433        const int nr_pages = end - start;
1434        int offset, i;
1435        struct bio *bio;
1436
1437        bio = bio_kmalloc(gfp_mask, nr_pages);
1438        if (!bio)
1439                return ERR_PTR(-ENOMEM);
1440
1441        offset = offset_in_page(kaddr);
1442        for (i = 0; i < nr_pages; i++) {
1443                unsigned int bytes = PAGE_SIZE - offset;
1444
1445                if (len <= 0)
1446                        break;
1447
1448                if (bytes > len)
1449                        bytes = len;
1450
1451                if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
1452                                    offset) < bytes) {
1453                        /* we don't support partial mappings */
1454                        bio_put(bio);
1455                        return ERR_PTR(-EINVAL);
1456                }
1457
1458                data += bytes;
1459                len -= bytes;
1460                offset = 0;
1461        }
1462
1463        bio->bi_end_io = bio_map_kern_endio;
1464        return bio;
1465}
1466EXPORT_SYMBOL(bio_map_kern);
1467
1468static void bio_copy_kern_endio(struct bio *bio)
1469{
1470        bio_free_pages(bio);
1471        bio_put(bio);
1472}
1473
1474static void bio_copy_kern_endio_read(struct bio *bio)
1475{
1476        char *p = bio->bi_private;
1477        struct bio_vec *bvec;
1478        int i;
1479
1480        bio_for_each_segment_all(bvec, bio, i) {
1481                memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
1482                p += bvec->bv_len;
1483        }
1484
1485        bio_copy_kern_endio(bio);
1486}
1487
1488/**
1489 *      bio_copy_kern   -       copy kernel address into bio
1490 *      @q: the struct request_queue for the bio
1491 *      @data: pointer to buffer to copy
1492 *      @len: length in bytes
1493 *      @gfp_mask: allocation flags for bio and page allocation
1494 *      @reading: data direction is READ
1495 *
1496 *      copy the kernel address into a bio suitable for io to a block
1497 *      device. Returns an error pointer in case of error.
1498 */
1499struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
1500                          gfp_t gfp_mask, int reading)
1501{
1502        unsigned long kaddr = (unsigned long)data;
1503        unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1504        unsigned long start = kaddr >> PAGE_SHIFT;
1505        struct bio *bio;
1506        void *p = data;
1507        int nr_pages = 0;
1508
1509        /*
1510         * Overflow, abort
1511         */
1512        if (end < start)
1513                return ERR_PTR(-EINVAL);
1514
1515        nr_pages = end - start;
1516        bio = bio_kmalloc(gfp_mask, nr_pages);
1517        if (!bio)
1518                return ERR_PTR(-ENOMEM);
1519
1520        while (len) {
1521                struct page *page;
1522                unsigned int bytes = PAGE_SIZE;
1523
1524                if (bytes > len)
1525                        bytes = len;
1526
1527                page = alloc_page(q->bounce_gfp | gfp_mask);
1528                if (!page)
1529                        goto cleanup;
1530
1531                if (!reading)
1532                        memcpy(page_address(page), p, bytes);
1533
1534                if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
1535                        break;
1536
1537                len -= bytes;
1538                p += bytes;
1539        }
1540
1541        if (reading) {
1542                bio->bi_end_io = bio_copy_kern_endio_read;
1543                bio->bi_private = data;
1544        } else {
1545                bio->bi_end_io = bio_copy_kern_endio;
1546        }
1547
1548        return bio;
1549
1550cleanup:
1551        bio_free_pages(bio);
1552        bio_put(bio);
1553        return ERR_PTR(-ENOMEM);
1554}
1555
1556/*
1557 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1558 * for performing direct-IO in BIOs.
1559 *
1560 * The problem is that we cannot run set_page_dirty() from interrupt context
1561 * because the required locks are not interrupt-safe.  So what we can do is to
1562 * mark the pages dirty _before_ performing IO.  And in interrupt context,
1563 * check that the pages are still dirty.   If so, fine.  If not, redirty them
1564 * in process context.
1565 *
1566 * We special-case compound pages here: normally this means reads into hugetlb
1567 * pages.  The logic in here doesn't really work right for compound pages
1568 * because the VM does not uniformly chase down the head page in all cases.
1569 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1570 * handle them at all.  So we skip compound pages here at an early stage.
1571 *
1572 * Note that this code is very hard to test under normal circumstances because
1573 * direct-io pins the pages with get_user_pages().  This makes
1574 * is_page_cache_freeable return false, and the VM will not clean the pages.
1575 * But other code (eg, flusher threads) could clean the pages if they are mapped
1576 * pagecache.
1577 *
1578 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1579 * deferred bio dirtying paths.
1580 */
1581
1582/*
1583 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1584 */
1585void bio_set_pages_dirty(struct bio *bio)
1586{
1587        struct bio_vec *bvec;
1588        int i;
1589
1590        bio_for_each_segment_all(bvec, bio, i) {
1591                if (!PageCompound(bvec->bv_page))
1592                        set_page_dirty_lock(bvec->bv_page);
1593        }
1594}
1595EXPORT_SYMBOL_GPL(bio_set_pages_dirty);
1596
1597static void bio_release_pages(struct bio *bio)
1598{
1599        struct bio_vec *bvec;
1600        int i;
1601
1602        bio_for_each_segment_all(bvec, bio, i)
1603                put_page(bvec->bv_page);
1604}
1605
1606/*
1607 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1608 * If they are, then fine.  If, however, some pages are clean then they must
1609 * have been written out during the direct-IO read.  So we take another ref on
1610 * the BIO and re-dirty the pages in process context.
1611 *
1612 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1613 * here on.  It will run one put_page() against each page and will run one
1614 * bio_put() against the BIO.
1615 */
1616
1617static void bio_dirty_fn(struct work_struct *work);
1618
1619static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1620static DEFINE_SPINLOCK(bio_dirty_lock);
1621static struct bio *bio_dirty_list;
1622
1623/*
1624 * This runs in process context
1625 */
1626static void bio_dirty_fn(struct work_struct *work)
1627{
1628        struct bio *bio, *next;
1629
1630        spin_lock_irq(&bio_dirty_lock);
1631        next = bio_dirty_list;
1632        bio_dirty_list = NULL;
1633        spin_unlock_irq(&bio_dirty_lock);
1634
1635        while ((bio = next) != NULL) {
1636                next = bio->bi_private;
1637
1638                bio_set_pages_dirty(bio);
1639                bio_release_pages(bio);
1640                bio_put(bio);
1641        }
1642}
1643
1644void bio_check_pages_dirty(struct bio *bio)
1645{
1646        struct bio_vec *bvec;
1647        unsigned long flags;
1648        int i;
1649
1650        bio_for_each_segment_all(bvec, bio, i) {
1651                if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
1652                        goto defer;
1653        }
1654
1655        bio_release_pages(bio);
1656        bio_put(bio);
1657        return;
1658defer:
1659        spin_lock_irqsave(&bio_dirty_lock, flags);
1660        bio->bi_private = bio_dirty_list;
1661        bio_dirty_list = bio;
1662        spin_unlock_irqrestore(&bio_dirty_lock, flags);
1663        schedule_work(&bio_dirty_work);
1664}
1665EXPORT_SYMBOL_GPL(bio_check_pages_dirty);
1666
1667void generic_start_io_acct(struct request_queue *q, int op,
1668                           unsigned long sectors, struct hd_struct *part)
1669{
1670        const int sgrp = op_stat_group(op);
1671        int cpu = part_stat_lock();
1672
1673        part_round_stats(q, cpu, part);
1674        part_stat_inc(cpu, part, ios[sgrp]);
1675        part_stat_add(cpu, part, sectors[sgrp], sectors);
1676        part_inc_in_flight(q, part, op_is_write(op));
1677
1678        part_stat_unlock();
1679}
1680EXPORT_SYMBOL(generic_start_io_acct);
1681
1682void generic_end_io_acct(struct request_queue *q, int req_op,
1683                         struct hd_struct *part, unsigned long start_time)
1684{
1685        unsigned long duration = jiffies - start_time;
1686        const int sgrp = op_stat_group(req_op);
1687        int cpu = part_stat_lock();
1688
1689        part_stat_add(cpu, part, nsecs[sgrp], jiffies_to_nsecs(duration));
1690        part_round_stats(q, cpu, part);
1691        part_dec_in_flight(q, part, op_is_write(req_op));
1692
1693        part_stat_unlock();
1694}
1695EXPORT_SYMBOL(generic_end_io_acct);
1696
1697#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1698void bio_flush_dcache_pages(struct bio *bi)
1699{
1700        struct bio_vec bvec;
1701        struct bvec_iter iter;
1702
1703        bio_for_each_segment(bvec, bi, iter)
1704                flush_dcache_page(bvec.bv_page);
1705}
1706EXPORT_SYMBOL(bio_flush_dcache_pages);
1707#endif
1708
1709static inline bool bio_remaining_done(struct bio *bio)
1710{
1711        /*
1712         * If we're not chaining, then ->__bi_remaining is always 1 and
1713         * we always end io on the first invocation.
1714         */
1715        if (!bio_flagged(bio, BIO_CHAIN))
1716                return true;
1717
1718        BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1719
1720        if (atomic_dec_and_test(&bio->__bi_remaining)) {
1721                bio_clear_flag(bio, BIO_CHAIN);
1722                return true;
1723        }
1724
1725        return false;
1726}
1727
1728/**
1729 * bio_endio - end I/O on a bio
1730 * @bio:        bio
1731 *
1732 * Description:
1733 *   bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
1734 *   way to end I/O on a bio. No one should call bi_end_io() directly on a
1735 *   bio unless they own it and thus know that it has an end_io function.
1736 *
1737 *   bio_endio() can be called several times on a bio that has been chained
1738 *   using bio_chain().  The ->bi_end_io() function will only be called the
1739 *   last time.  At this point the BLK_TA_COMPLETE tracing event will be
1740 *   generated if BIO_TRACE_COMPLETION is set.
1741 **/
1742void bio_endio(struct bio *bio)
1743{
1744again:
1745        if (!bio_remaining_done(bio))
1746                return;
1747        if (!bio_integrity_endio(bio))
1748                return;
1749
1750        if (bio->bi_disk)
1751                rq_qos_done_bio(bio->bi_disk->queue, bio);
1752
1753        /*
1754         * Need to have a real endio function for chained bios, otherwise
1755         * various corner cases will break (like stacking block devices that
1756         * save/restore bi_end_io) - however, we want to avoid unbounded
1757         * recursion and blowing the stack. Tail call optimization would
1758         * handle this, but compiling with frame pointers also disables
1759         * gcc's sibling call optimization.
1760         */
1761        if (bio->bi_end_io == bio_chain_endio) {
1762                bio = __bio_chain_endio(bio);
1763                goto again;
1764        }
1765
1766        if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
1767                trace_block_bio_complete(bio->bi_disk->queue, bio,
1768                                         blk_status_to_errno(bio->bi_status));
1769                bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1770        }
1771
1772        blk_throtl_bio_endio(bio);
1773        /* release cgroup info */
1774        bio_uninit(bio);
1775        if (bio->bi_end_io)
1776                bio->bi_end_io(bio);
1777}
1778EXPORT_SYMBOL(bio_endio);
1779
1780/**
1781 * bio_split - split a bio
1782 * @bio:        bio to split
1783 * @sectors:    number of sectors to split from the front of @bio
1784 * @gfp:        gfp mask
1785 * @bs:         bio set to allocate from
1786 *
1787 * Allocates and returns a new bio which represents @sectors from the start of
1788 * @bio, and updates @bio to represent the remaining sectors.
1789 *
1790 * Unless this is a discard request the newly allocated bio will point
1791 * to @bio's bi_io_vec; it is the caller's responsibility to ensure that
1792 * @bio is not freed before the split.
1793 */
1794struct bio *bio_split(struct bio *bio, int sectors,
1795                      gfp_t gfp, struct bio_set *bs)
1796{
1797        struct bio *split;
1798
1799        BUG_ON(sectors <= 0);
1800        BUG_ON(sectors >= bio_sectors(bio));
1801
1802        split = bio_clone_fast(bio, gfp, bs);
1803        if (!split)
1804                return NULL;
1805
1806        split->bi_iter.bi_size = sectors << 9;
1807
1808        if (bio_integrity(split))
1809                bio_integrity_trim(split);
1810
1811        bio_advance(bio, split->bi_iter.bi_size);
1812
1813        if (bio_flagged(bio, BIO_TRACE_COMPLETION))
1814                bio_set_flag(split, BIO_TRACE_COMPLETION);
1815
1816        return split;
1817}
1818EXPORT_SYMBOL(bio_split);
1819
1820/**
1821 * bio_trim - trim a bio
1822 * @bio:        bio to trim
1823 * @offset:     number of sectors to trim from the front of @bio
1824 * @size:       size we want to trim @bio to, in sectors
1825 */
1826void bio_trim(struct bio *bio, int offset, int size)
1827{
1828        /* 'bio' is a cloned bio which we need to trim to match
1829         * the given offset and size.
1830         */
1831
1832        size <<= 9;
1833        if (offset == 0 && size == bio->bi_iter.bi_size)
1834                return;
1835
1836        bio_clear_flag(bio, BIO_SEG_VALID);
1837
1838        bio_advance(bio, offset << 9);
1839
1840        bio->bi_iter.bi_size = size;
1841
1842        if (bio_integrity(bio))
1843                bio_integrity_trim(bio);
1844
1845}
1846EXPORT_SYMBOL_GPL(bio_trim);
1847
1848/*
1849 * create memory pools for biovec's in a bio_set.
1850 * use the global biovec slabs created for general use.
1851 */
1852int biovec_init_pool(mempool_t *pool, int pool_entries)
1853{
1854        struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX;
1855
1856        return mempool_init_slab_pool(pool, pool_entries, bp->slab);
1857}
1858
1859/*
1860 * bioset_exit - exit a bioset initialized with bioset_init()
1861 *
1862 * May be called on a zeroed but uninitialized bioset (i.e. allocated with
1863 * kzalloc()).
1864 */
1865void bioset_exit(struct bio_set *bs)
1866{
1867        if (bs->rescue_workqueue)
1868                destroy_workqueue(bs->rescue_workqueue);
1869        bs->rescue_workqueue = NULL;
1870
1871        mempool_exit(&bs->bio_pool);
1872        mempool_exit(&bs->bvec_pool);
1873
1874        bioset_integrity_free(bs);
1875        if (bs->bio_slab)
1876                bio_put_slab(bs);
1877        bs->bio_slab = NULL;
1878}
1879EXPORT_SYMBOL(bioset_exit);
1880
1881/**
1882 * bioset_init - Initialize a bio_set
1883 * @bs:         pool to initialize
1884 * @pool_size:  Number of bio and bio_vecs to cache in the mempool
1885 * @front_pad:  Number of bytes to allocate in front of the returned bio
1886 * @flags:      Flags to modify behavior, currently %BIOSET_NEED_BVECS
1887 *              and %BIOSET_NEED_RESCUER
1888 *
1889 * Description:
1890 *    Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1891 *    to ask for a number of bytes to be allocated in front of the bio.
1892 *    Front pad allocation is useful for embedding the bio inside
1893 *    another structure, to avoid allocating extra data to go with the bio.
1894 *    Note that the bio must be embedded at the END of that structure always,
1895 *    or things will break badly.
1896 *    If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
1897 *    for allocating iovecs.  This pool is not needed e.g. for bio_clone_fast().
1898 *    If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to
1899 *    dispatch queued requests when the mempool runs out of space.
1900 *
1901 */
1902int bioset_init(struct bio_set *bs,
1903                unsigned int pool_size,
1904                unsigned int front_pad,
1905                int flags)
1906{
1907        unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1908
1909        bs->front_pad = front_pad;
1910
1911        spin_lock_init(&bs->rescue_lock);
1912        bio_list_init(&bs->rescue_list);
1913        INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1914
1915        bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
1916        if (!bs->bio_slab)
1917                return -ENOMEM;
1918
1919        if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab))
1920                goto bad;
1921
1922        if ((flags & BIOSET_NEED_BVECS) &&
1923            biovec_init_pool(&bs->bvec_pool, pool_size))
1924                goto bad;
1925
1926        if (!(flags & BIOSET_NEED_RESCUER))
1927                return 0;
1928
1929        bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
1930        if (!bs->rescue_workqueue)
1931                goto bad;
1932
1933        return 0;
1934bad:
1935        bioset_exit(bs);
1936        return -ENOMEM;
1937}
1938EXPORT_SYMBOL(bioset_init);
1939
1940/*
1941 * Initialize and setup a new bio_set, based on the settings from
1942 * another bio_set.
1943 */
1944int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
1945{
1946        int flags;
1947
1948        flags = 0;
1949        if (src->bvec_pool.min_nr)
1950                flags |= BIOSET_NEED_BVECS;
1951        if (src->rescue_workqueue)
1952                flags |= BIOSET_NEED_RESCUER;
1953
1954        return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags);
1955}
1956EXPORT_SYMBOL(bioset_init_from_src);
1957
1958#ifdef CONFIG_BLK_CGROUP
1959
1960#ifdef CONFIG_MEMCG
1961/**
1962 * bio_associate_blkcg_from_page - associate a bio with the page's blkcg
1963 * @bio: target bio
1964 * @page: the page to lookup the blkcg from
1965 *
1966 * Associate @bio with the blkcg from @page's owning memcg.  This works like
1967 * every other associate function wrt references.
1968 */
1969int bio_associate_blkcg_from_page(struct bio *bio, struct page *page)
1970{
1971        struct cgroup_subsys_state *blkcg_css;
1972
1973        if (unlikely(bio->bi_css))
1974                return -EBUSY;
1975        if (!page->mem_cgroup)
1976                return 0;
1977        blkcg_css = cgroup_get_e_css(page->mem_cgroup->css.cgroup,
1978                                     &io_cgrp_subsys);
1979        bio->bi_css = blkcg_css;
1980        return 0;
1981}
1982#endif /* CONFIG_MEMCG */
1983
1984/**
1985 * bio_associate_blkcg - associate a bio with the specified blkcg
1986 * @bio: target bio
1987 * @blkcg_css: css of the blkcg to associate
1988 *
1989 * Associate @bio with the blkcg specified by @blkcg_css.  Block layer will
1990 * treat @bio as if it were issued by a task which belongs to the blkcg.
1991 *
1992 * This function takes an extra reference of @blkcg_css which will be put
1993 * when @bio is released.  The caller must own @bio and is responsible for
1994 * synchronizing calls to this function.
1995 */
1996int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
1997{
1998        if (unlikely(bio->bi_css))
1999                return -EBUSY;
2000        css_get(blkcg_css);
2001        bio->bi_css = blkcg_css;
2002        return 0;
2003}
2004EXPORT_SYMBOL_GPL(bio_associate_blkcg);
2005
2006/**
2007 * bio_associate_blkg - associate a bio with the specified blkg
2008 * @bio: target bio
2009 * @blkg: the blkg to associate
2010 *
2011 * Associate @bio with the blkg specified by @blkg.  This is the queue specific
2012 * blkcg information associated with the @bio, a reference will be taken on the
2013 * @blkg and will be freed when the bio is freed.
2014 */
2015int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
2016{
2017        if (unlikely(bio->bi_blkg))
2018                return -EBUSY;
2019        if (!blkg_try_get(blkg))
2020                return -ENODEV;
2021        bio->bi_blkg = blkg;
2022        return 0;
2023}
2024
2025/**
2026 * bio_disassociate_task - undo bio_associate_current()
2027 * @bio: target bio
2028 */
2029void bio_disassociate_task(struct bio *bio)
2030{
2031        if (bio->bi_ioc) {
2032                put_io_context(bio->bi_ioc);
2033                bio->bi_ioc = NULL;
2034        }
2035        if (bio->bi_css) {
2036                css_put(bio->bi_css);
2037                bio->bi_css = NULL;
2038        }
2039        if (bio->bi_blkg) {
2040                blkg_put(bio->bi_blkg);
2041                bio->bi_blkg = NULL;
2042        }
2043}
2044
2045/**
2046 * bio_clone_blkcg_association - clone blkcg association from src to dst bio
2047 * @dst: destination bio
2048 * @src: source bio
2049 */
2050void bio_clone_blkcg_association(struct bio *dst, struct bio *src)
2051{
2052        if (src->bi_css)
2053                WARN_ON(bio_associate_blkcg(dst, src->bi_css));
2054}
2055EXPORT_SYMBOL_GPL(bio_clone_blkcg_association);
2056#endif /* CONFIG_BLK_CGROUP */
2057
2058static void __init biovec_init_slabs(void)
2059{
2060        int i;
2061
2062        for (i = 0; i < BVEC_POOL_NR; i++) {
2063                int size;
2064                struct biovec_slab *bvs = bvec_slabs + i;
2065
2066                if (bvs->nr_vecs <= BIO_INLINE_VECS) {
2067                        bvs->slab = NULL;
2068                        continue;
2069                }
2070
2071                size = bvs->nr_vecs * sizeof(struct bio_vec);
2072                bvs->slab = kmem_cache_create(bvs->name, size, 0,
2073                                SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2074        }
2075}
2076
2077static int __init init_bio(void)
2078{
2079        bio_slab_max = 2;
2080        bio_slab_nr = 0;
2081        bio_slabs = kcalloc(bio_slab_max, sizeof(struct bio_slab),
2082                            GFP_KERNEL);
2083        if (!bio_slabs)
2084                panic("bio: can't allocate bios\n");
2085
2086        bio_integrity_init();
2087        biovec_init_slabs();
2088
2089        if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS))
2090                panic("bio: can't allocate bios\n");
2091
2092        if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE))
2093                panic("bio: can't create integrity pool\n");
2094
2095        return 0;
2096}
2097subsys_initcall(init_bio);
2098