linux/block/bio.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public Licens
  14 * along with this program; if not, write to the Free Software
  15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
  16 *
  17 */
  18#include <linux/mm.h>
  19#include <linux/swap.h>
  20#include <linux/bio.h>
  21#include <linux/blkdev.h>
  22#include <linux/uio.h>
  23#include <linux/iocontext.h>
  24#include <linux/slab.h>
  25#include <linux/init.h>
  26#include <linux/kernel.h>
  27#include <linux/export.h>
  28#include <linux/mempool.h>
  29#include <linux/workqueue.h>
  30#include <linux/cgroup.h>
  31
  32#include <trace/events/block.h>
  33#include "blk.h"
  34
  35/*
  36 * Test patch to inline a certain number of bi_io_vec's inside the bio
  37 * itself, to shrink a bio data allocation from two mempool calls to one
  38 */
  39#define BIO_INLINE_VECS         4
  40
  41/*
  42 * if you change this list, also change bvec_alloc or things will
  43 * break badly! cannot be bigger than what you can fit into an
  44 * unsigned short
  45 */
  46#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
  47static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = {
  48        BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
  49};
  50#undef BV
  51
  52/*
  53 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
  54 * IO code that does not need private memory pools.
  55 */
  56struct bio_set *fs_bio_set;
  57EXPORT_SYMBOL(fs_bio_set);
  58
  59/*
  60 * Our slab pool management
  61 */
  62struct bio_slab {
  63        struct kmem_cache *slab;
  64        unsigned int slab_ref;
  65        unsigned int slab_size;
  66        char name[8];
  67};
  68static DEFINE_MUTEX(bio_slab_lock);
  69static struct bio_slab *bio_slabs;
  70static unsigned int bio_slab_nr, bio_slab_max;
  71
  72static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
  73{
  74        unsigned int sz = sizeof(struct bio) + extra_size;
  75        struct kmem_cache *slab = NULL;
  76        struct bio_slab *bslab, *new_bio_slabs;
  77        unsigned int new_bio_slab_max;
  78        unsigned int i, entry = -1;
  79
  80        mutex_lock(&bio_slab_lock);
  81
  82        i = 0;
  83        while (i < bio_slab_nr) {
  84                bslab = &bio_slabs[i];
  85
  86                if (!bslab->slab && entry == -1)
  87                        entry = i;
  88                else if (bslab->slab_size == sz) {
  89                        slab = bslab->slab;
  90                        bslab->slab_ref++;
  91                        break;
  92                }
  93                i++;
  94        }
  95
  96        if (slab)
  97                goto out_unlock;
  98
  99        if (bio_slab_nr == bio_slab_max && entry == -1) {
 100                new_bio_slab_max = bio_slab_max << 1;
 101                new_bio_slabs = krealloc(bio_slabs,
 102                                         new_bio_slab_max * sizeof(struct bio_slab),
 103                                         GFP_KERNEL);
 104                if (!new_bio_slabs)
 105                        goto out_unlock;
 106                bio_slab_max = new_bio_slab_max;
 107                bio_slabs = new_bio_slabs;
 108        }
 109        if (entry == -1)
 110                entry = bio_slab_nr++;
 111
 112        bslab = &bio_slabs[entry];
 113
 114        snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
 115        slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN,
 116                                 SLAB_HWCACHE_ALIGN, NULL);
 117        if (!slab)
 118                goto out_unlock;
 119
 120        bslab->slab = slab;
 121        bslab->slab_ref = 1;
 122        bslab->slab_size = sz;
 123out_unlock:
 124        mutex_unlock(&bio_slab_lock);
 125        return slab;
 126}
 127
 128static void bio_put_slab(struct bio_set *bs)
 129{
 130        struct bio_slab *bslab = NULL;
 131        unsigned int i;
 132
 133        mutex_lock(&bio_slab_lock);
 134
 135        for (i = 0; i < bio_slab_nr; i++) {
 136                if (bs->bio_slab == bio_slabs[i].slab) {
 137                        bslab = &bio_slabs[i];
 138                        break;
 139                }
 140        }
 141
 142        if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
 143                goto out;
 144
 145        WARN_ON(!bslab->slab_ref);
 146
 147        if (--bslab->slab_ref)
 148                goto out;
 149
 150        kmem_cache_destroy(bslab->slab);
 151        bslab->slab = NULL;
 152
 153out:
 154        mutex_unlock(&bio_slab_lock);
 155}
 156
 157unsigned int bvec_nr_vecs(unsigned short idx)
 158{
 159        return bvec_slabs[idx].nr_vecs;
 160}
 161
 162void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
 163{
 164        if (!idx)
 165                return;
 166        idx--;
 167
 168        BIO_BUG_ON(idx >= BVEC_POOL_NR);
 169
 170        if (idx == BVEC_POOL_MAX) {
 171                mempool_free(bv, pool);
 172        } else {
 173                struct biovec_slab *bvs = bvec_slabs + idx;
 174
 175                kmem_cache_free(bvs->slab, bv);
 176        }
 177}
 178
 179struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
 180                           mempool_t *pool)
 181{
 182        struct bio_vec *bvl;
 183
 184        /*
 185         * see comment near bvec_array define!
 186         */
 187        switch (nr) {
 188        case 1:
 189                *idx = 0;
 190                break;
 191        case 2 ... 4:
 192                *idx = 1;
 193                break;
 194        case 5 ... 16:
 195                *idx = 2;
 196                break;
 197        case 17 ... 64:
 198                *idx = 3;
 199                break;
 200        case 65 ... 128:
 201                *idx = 4;
 202                break;
 203        case 129 ... BIO_MAX_PAGES:
 204                *idx = 5;
 205                break;
 206        default:
 207                return NULL;
 208        }
 209
 210        /*
 211         * idx now points to the pool we want to allocate from. only the
 212         * 1-vec entry pool is mempool backed.
 213         */
 214        if (*idx == BVEC_POOL_MAX) {
 215fallback:
 216                bvl = mempool_alloc(pool, gfp_mask);
 217        } else {
 218                struct biovec_slab *bvs = bvec_slabs + *idx;
 219                gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO);
 220
 221                /*
 222                 * Make this allocation restricted and don't dump info on
 223                 * allocation failures, since we'll fallback to the mempool
 224                 * in case of failure.
 225                 */
 226                __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
 227
 228                /*
 229                 * Try a slab allocation. If this fails and __GFP_DIRECT_RECLAIM
 230                 * is set, retry with the 1-entry mempool
 231                 */
 232                bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
 233                if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) {
 234                        *idx = BVEC_POOL_MAX;
 235                        goto fallback;
 236                }
 237        }
 238
 239        (*idx)++;
 240        return bvl;
 241}
 242
 243void bio_uninit(struct bio *bio)
 244{
 245        bio_disassociate_task(bio);
 246}
 247EXPORT_SYMBOL(bio_uninit);
 248
 249static void bio_free(struct bio *bio)
 250{
 251        struct bio_set *bs = bio->bi_pool;
 252        void *p;
 253
 254        bio_uninit(bio);
 255
 256        if (bs) {
 257                bvec_free(bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
 258
 259                /*
 260                 * If we have front padding, adjust the bio pointer before freeing
 261                 */
 262                p = bio;
 263                p -= bs->front_pad;
 264
 265                mempool_free(p, bs->bio_pool);
 266        } else {
 267                /* Bio was allocated by bio_kmalloc() */
 268                kfree(bio);
 269        }
 270}
 271
 272/*
 273 * Users of this function have their own bio allocation. Subsequently,
 274 * they must remember to pair any call to bio_init() with bio_uninit()
 275 * when IO has completed, or when the bio is released.
 276 */
 277void bio_init(struct bio *bio, struct bio_vec *table,
 278              unsigned short max_vecs)
 279{
 280        memset(bio, 0, sizeof(*bio));
 281        atomic_set(&bio->__bi_remaining, 1);
 282        atomic_set(&bio->__bi_cnt, 1);
 283
 284        bio->bi_io_vec = table;
 285        bio->bi_max_vecs = max_vecs;
 286}
 287EXPORT_SYMBOL(bio_init);
 288
 289/**
 290 * bio_reset - reinitialize a bio
 291 * @bio:        bio to reset
 292 *
 293 * Description:
 294 *   After calling bio_reset(), @bio will be in the same state as a freshly
 295 *   allocated bio returned bio bio_alloc_bioset() - the only fields that are
 296 *   preserved are the ones that are initialized by bio_alloc_bioset(). See
 297 *   comment in struct bio.
 298 */
 299void bio_reset(struct bio *bio)
 300{
 301        unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
 302
 303        bio_uninit(bio);
 304
 305        memset(bio, 0, BIO_RESET_BYTES);
 306        bio->bi_flags = flags;
 307        atomic_set(&bio->__bi_remaining, 1);
 308}
 309EXPORT_SYMBOL(bio_reset);
 310
 311static struct bio *__bio_chain_endio(struct bio *bio)
 312{
 313        struct bio *parent = bio->bi_private;
 314
 315        if (!parent->bi_status)
 316                parent->bi_status = bio->bi_status;
 317        bio_put(bio);
 318        return parent;
 319}
 320
 321static void bio_chain_endio(struct bio *bio)
 322{
 323        bio_endio(__bio_chain_endio(bio));
 324}
 325
 326/**
 327 * bio_chain - chain bio completions
 328 * @bio: the target bio
 329 * @parent: the @bio's parent bio
 330 *
 331 * The caller won't have a bi_end_io called when @bio completes - instead,
 332 * @parent's bi_end_io won't be called until both @parent and @bio have
 333 * completed; the chained bio will also be freed when it completes.
 334 *
 335 * The caller must not set bi_private or bi_end_io in @bio.
 336 */
 337void bio_chain(struct bio *bio, struct bio *parent)
 338{
 339        BUG_ON(bio->bi_private || bio->bi_end_io);
 340
 341        bio->bi_private = parent;
 342        bio->bi_end_io  = bio_chain_endio;
 343        bio_inc_remaining(parent);
 344}
 345EXPORT_SYMBOL(bio_chain);
 346
 347static void bio_alloc_rescue(struct work_struct *work)
 348{
 349        struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
 350        struct bio *bio;
 351
 352        while (1) {
 353                spin_lock(&bs->rescue_lock);
 354                bio = bio_list_pop(&bs->rescue_list);
 355                spin_unlock(&bs->rescue_lock);
 356
 357                if (!bio)
 358                        break;
 359
 360                generic_make_request(bio);
 361        }
 362}
 363
 364static void punt_bios_to_rescuer(struct bio_set *bs)
 365{
 366        struct bio_list punt, nopunt;
 367        struct bio *bio;
 368
 369        if (WARN_ON_ONCE(!bs->rescue_workqueue))
 370                return;
 371        /*
 372         * In order to guarantee forward progress we must punt only bios that
 373         * were allocated from this bio_set; otherwise, if there was a bio on
 374         * there for a stacking driver higher up in the stack, processing it
 375         * could require allocating bios from this bio_set, and doing that from
 376         * our own rescuer would be bad.
 377         *
 378         * Since bio lists are singly linked, pop them all instead of trying to
 379         * remove from the middle of the list:
 380         */
 381
 382        bio_list_init(&punt);
 383        bio_list_init(&nopunt);
 384
 385        while ((bio = bio_list_pop(&current->bio_list[0])))
 386                bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
 387        current->bio_list[0] = nopunt;
 388
 389        bio_list_init(&nopunt);
 390        while ((bio = bio_list_pop(&current->bio_list[1])))
 391                bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
 392        current->bio_list[1] = nopunt;
 393
 394        spin_lock(&bs->rescue_lock);
 395        bio_list_merge(&bs->rescue_list, &punt);
 396        spin_unlock(&bs->rescue_lock);
 397
 398        queue_work(bs->rescue_workqueue, &bs->rescue_work);
 399}
 400
 401/**
 402 * bio_alloc_bioset - allocate a bio for I/O
 403 * @gfp_mask:   the GFP_ mask given to the slab allocator
 404 * @nr_iovecs:  number of iovecs to pre-allocate
 405 * @bs:         the bio_set to allocate from.
 406 *
 407 * Description:
 408 *   If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
 409 *   backed by the @bs's mempool.
 410 *
 411 *   When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will
 412 *   always be able to allocate a bio. This is due to the mempool guarantees.
 413 *   To make this work, callers must never allocate more than 1 bio at a time
 414 *   from this pool. Callers that need to allocate more than 1 bio must always
 415 *   submit the previously allocated bio for IO before attempting to allocate
 416 *   a new one. Failure to do so can cause deadlocks under memory pressure.
 417 *
 418 *   Note that when running under generic_make_request() (i.e. any block
 419 *   driver), bios are not submitted until after you return - see the code in
 420 *   generic_make_request() that converts recursion into iteration, to prevent
 421 *   stack overflows.
 422 *
 423 *   This would normally mean allocating multiple bios under
 424 *   generic_make_request() would be susceptible to deadlocks, but we have
 425 *   deadlock avoidance code that resubmits any blocked bios from a rescuer
 426 *   thread.
 427 *
 428 *   However, we do not guarantee forward progress for allocations from other
 429 *   mempools. Doing multiple allocations from the same mempool under
 430 *   generic_make_request() should be avoided - instead, use bio_set's front_pad
 431 *   for per bio allocations.
 432 *
 433 *   RETURNS:
 434 *   Pointer to new bio on success, NULL on failure.
 435 */
 436struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
 437                             struct bio_set *bs)
 438{
 439        gfp_t saved_gfp = gfp_mask;
 440        unsigned front_pad;
 441        unsigned inline_vecs;
 442        struct bio_vec *bvl = NULL;
 443        struct bio *bio;
 444        void *p;
 445
 446        if (!bs) {
 447                if (nr_iovecs > UIO_MAXIOV)
 448                        return NULL;
 449
 450                p = kmalloc(sizeof(struct bio) +
 451                            nr_iovecs * sizeof(struct bio_vec),
 452                            gfp_mask);
 453                front_pad = 0;
 454                inline_vecs = nr_iovecs;
 455        } else {
 456                /* should not use nobvec bioset for nr_iovecs > 0 */
 457                if (WARN_ON_ONCE(!bs->bvec_pool && nr_iovecs > 0))
 458                        return NULL;
 459                /*
 460                 * generic_make_request() converts recursion to iteration; this
 461                 * means if we're running beneath it, any bios we allocate and
 462                 * submit will not be submitted (and thus freed) until after we
 463                 * return.
 464                 *
 465                 * This exposes us to a potential deadlock if we allocate
 466                 * multiple bios from the same bio_set() while running
 467                 * underneath generic_make_request(). If we were to allocate
 468                 * multiple bios (say a stacking block driver that was splitting
 469                 * bios), we would deadlock if we exhausted the mempool's
 470                 * reserve.
 471                 *
 472                 * We solve this, and guarantee forward progress, with a rescuer
 473                 * workqueue per bio_set. If we go to allocate and there are
 474                 * bios on current->bio_list, we first try the allocation
 475                 * without __GFP_DIRECT_RECLAIM; if that fails, we punt those
 476                 * bios we would be blocking to the rescuer workqueue before
 477                 * we retry with the original gfp_flags.
 478                 */
 479
 480                if (current->bio_list &&
 481                    (!bio_list_empty(&current->bio_list[0]) ||
 482                     !bio_list_empty(&current->bio_list[1])) &&
 483                    bs->rescue_workqueue)
 484                        gfp_mask &= ~__GFP_DIRECT_RECLAIM;
 485
 486                p = mempool_alloc(bs->bio_pool, gfp_mask);
 487                if (!p && gfp_mask != saved_gfp) {
 488                        punt_bios_to_rescuer(bs);
 489                        gfp_mask = saved_gfp;
 490                        p = mempool_alloc(bs->bio_pool, gfp_mask);
 491                }
 492
 493                front_pad = bs->front_pad;
 494                inline_vecs = BIO_INLINE_VECS;
 495        }
 496
 497        if (unlikely(!p))
 498                return NULL;
 499
 500        bio = p + front_pad;
 501        bio_init(bio, NULL, 0);
 502
 503        if (nr_iovecs > inline_vecs) {
 504                unsigned long idx = 0;
 505
 506                bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
 507                if (!bvl && gfp_mask != saved_gfp) {
 508                        punt_bios_to_rescuer(bs);
 509                        gfp_mask = saved_gfp;
 510                        bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
 511                }
 512
 513                if (unlikely(!bvl))
 514                        goto err_free;
 515
 516                bio->bi_flags |= idx << BVEC_POOL_OFFSET;
 517        } else if (nr_iovecs) {
 518                bvl = bio->bi_inline_vecs;
 519        }
 520
 521        bio->bi_pool = bs;
 522        bio->bi_max_vecs = nr_iovecs;
 523        bio->bi_io_vec = bvl;
 524        return bio;
 525
 526err_free:
 527        mempool_free(p, bs->bio_pool);
 528        return NULL;
 529}
 530EXPORT_SYMBOL(bio_alloc_bioset);
 531
 532void zero_fill_bio(struct bio *bio)
 533{
 534        unsigned long flags;
 535        struct bio_vec bv;
 536        struct bvec_iter iter;
 537
 538        bio_for_each_segment(bv, bio, iter) {
 539                char *data = bvec_kmap_irq(&bv, &flags);
 540                memset(data, 0, bv.bv_len);
 541                flush_dcache_page(bv.bv_page);
 542                bvec_kunmap_irq(data, &flags);
 543        }
 544}
 545EXPORT_SYMBOL(zero_fill_bio);
 546
 547/**
 548 * bio_put - release a reference to a bio
 549 * @bio:   bio to release reference to
 550 *
 551 * Description:
 552 *   Put a reference to a &struct bio, either one you have gotten with
 553 *   bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
 554 **/
 555void bio_put(struct bio *bio)
 556{
 557        if (!bio_flagged(bio, BIO_REFFED))
 558                bio_free(bio);
 559        else {
 560                BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
 561
 562                /*
 563                 * last put frees it
 564                 */
 565                if (atomic_dec_and_test(&bio->__bi_cnt))
 566                        bio_free(bio);
 567        }
 568}
 569EXPORT_SYMBOL(bio_put);
 570
 571inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
 572{
 573        if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
 574                blk_recount_segments(q, bio);
 575
 576        return bio->bi_phys_segments;
 577}
 578EXPORT_SYMBOL(bio_phys_segments);
 579
 580/**
 581 *      __bio_clone_fast - clone a bio that shares the original bio's biovec
 582 *      @bio: destination bio
 583 *      @bio_src: bio to clone
 584 *
 585 *      Clone a &bio. Caller will own the returned bio, but not
 586 *      the actual data it points to. Reference count of returned
 587 *      bio will be one.
 588 *
 589 *      Caller must ensure that @bio_src is not freed before @bio.
 590 */
 591void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
 592{
 593        BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
 594
 595        /*
 596         * most users will be overriding ->bi_disk with a new target,
 597         * so we don't set nor calculate new physical/hw segment counts here
 598         */
 599        bio->bi_disk = bio_src->bi_disk;
 600        bio_set_flag(bio, BIO_CLONED);
 601        bio->bi_opf = bio_src->bi_opf;
 602        bio->bi_write_hint = bio_src->bi_write_hint;
 603        bio->bi_iter = bio_src->bi_iter;
 604        bio->bi_io_vec = bio_src->bi_io_vec;
 605
 606        bio_clone_blkcg_association(bio, bio_src);
 607}
 608EXPORT_SYMBOL(__bio_clone_fast);
 609
 610/**
 611 *      bio_clone_fast - clone a bio that shares the original bio's biovec
 612 *      @bio: bio to clone
 613 *      @gfp_mask: allocation priority
 614 *      @bs: bio_set to allocate from
 615 *
 616 *      Like __bio_clone_fast, only also allocates the returned bio
 617 */
 618struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
 619{
 620        struct bio *b;
 621
 622        b = bio_alloc_bioset(gfp_mask, 0, bs);
 623        if (!b)
 624                return NULL;
 625
 626        __bio_clone_fast(b, bio);
 627
 628        if (bio_integrity(bio)) {
 629                int ret;
 630
 631                ret = bio_integrity_clone(b, bio, gfp_mask);
 632
 633                if (ret < 0) {
 634                        bio_put(b);
 635                        return NULL;
 636                }
 637        }
 638
 639        return b;
 640}
 641EXPORT_SYMBOL(bio_clone_fast);
 642
 643/**
 644 *      bio_clone_bioset - clone a bio
 645 *      @bio_src: bio to clone
 646 *      @gfp_mask: allocation priority
 647 *      @bs: bio_set to allocate from
 648 *
 649 *      Clone bio. Caller will own the returned bio, but not the actual data it
 650 *      points to. Reference count of returned bio will be one.
 651 */
 652struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
 653                             struct bio_set *bs)
 654{
 655        struct bvec_iter iter;
 656        struct bio_vec bv;
 657        struct bio *bio;
 658
 659        /*
 660         * Pre immutable biovecs, __bio_clone() used to just do a memcpy from
 661         * bio_src->bi_io_vec to bio->bi_io_vec.
 662         *
 663         * We can't do that anymore, because:
 664         *
 665         *  - The point of cloning the biovec is to produce a bio with a biovec
 666         *    the caller can modify: bi_idx and bi_bvec_done should be 0.
 667         *
 668         *  - The original bio could've had more than BIO_MAX_PAGES biovecs; if
 669         *    we tried to clone the whole thing bio_alloc_bioset() would fail.
 670         *    But the clone should succeed as long as the number of biovecs we
 671         *    actually need to allocate is fewer than BIO_MAX_PAGES.
 672         *
 673         *  - Lastly, bi_vcnt should not be looked at or relied upon by code
 674         *    that does not own the bio - reason being drivers don't use it for
 675         *    iterating over the biovec anymore, so expecting it to be kept up
 676         *    to date (i.e. for clones that share the parent biovec) is just
 677         *    asking for trouble and would force extra work on
 678         *    __bio_clone_fast() anyways.
 679         */
 680
 681        bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
 682        if (!bio)
 683                return NULL;
 684        bio->bi_disk            = bio_src->bi_disk;
 685        bio->bi_opf             = bio_src->bi_opf;
 686        bio->bi_write_hint      = bio_src->bi_write_hint;
 687        bio->bi_iter.bi_sector  = bio_src->bi_iter.bi_sector;
 688        bio->bi_iter.bi_size    = bio_src->bi_iter.bi_size;
 689
 690        switch (bio_op(bio)) {
 691        case REQ_OP_DISCARD:
 692        case REQ_OP_SECURE_ERASE:
 693        case REQ_OP_WRITE_ZEROES:
 694                break;
 695        case REQ_OP_WRITE_SAME:
 696                bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
 697                break;
 698        default:
 699                bio_for_each_segment(bv, bio_src, iter)
 700                        bio->bi_io_vec[bio->bi_vcnt++] = bv;
 701                break;
 702        }
 703
 704        if (bio_integrity(bio_src)) {
 705                int ret;
 706
 707                ret = bio_integrity_clone(bio, bio_src, gfp_mask);
 708                if (ret < 0) {
 709                        bio_put(bio);
 710                        return NULL;
 711                }
 712        }
 713
 714        bio_clone_blkcg_association(bio, bio_src);
 715
 716        return bio;
 717}
 718EXPORT_SYMBOL(bio_clone_bioset);
 719
 720/**
 721 *      bio_add_pc_page -       attempt to add page to bio
 722 *      @q: the target queue
 723 *      @bio: destination bio
 724 *      @page: page to add
 725 *      @len: vec entry length
 726 *      @offset: vec entry offset
 727 *
 728 *      Attempt to add a page to the bio_vec maplist. This can fail for a
 729 *      number of reasons, such as the bio being full or target block device
 730 *      limitations. The target block device must allow bio's up to PAGE_SIZE,
 731 *      so it is always possible to add a single page to an empty bio.
 732 *
 733 *      This should only be used by REQ_PC bios.
 734 */
 735int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
 736                    *page, unsigned int len, unsigned int offset)
 737{
 738        int retried_segments = 0;
 739        struct bio_vec *bvec;
 740
 741        /*
 742         * cloned bio must not modify vec list
 743         */
 744        if (unlikely(bio_flagged(bio, BIO_CLONED)))
 745                return 0;
 746
 747        if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q))
 748                return 0;
 749
 750        /*
 751         * For filesystems with a blocksize smaller than the pagesize
 752         * we will often be called with the same page as last time and
 753         * a consecutive offset.  Optimize this special case.
 754         */
 755        if (bio->bi_vcnt > 0) {
 756                struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
 757
 758                if (page == prev->bv_page &&
 759                    offset == prev->bv_offset + prev->bv_len) {
 760                        prev->bv_len += len;
 761                        bio->bi_iter.bi_size += len;
 762                        goto done;
 763                }
 764
 765                /*
 766                 * If the queue doesn't support SG gaps and adding this
 767                 * offset would create a gap, disallow it.
 768                 */
 769                if (bvec_gap_to_prev(q, prev, offset))
 770                        return 0;
 771        }
 772
 773        if (bio->bi_vcnt >= bio->bi_max_vecs)
 774                return 0;
 775
 776        /*
 777         * setup the new entry, we might clear it again later if we
 778         * cannot add the page
 779         */
 780        bvec = &bio->bi_io_vec[bio->bi_vcnt];
 781        bvec->bv_page = page;
 782        bvec->bv_len = len;
 783        bvec->bv_offset = offset;
 784        bio->bi_vcnt++;
 785        bio->bi_phys_segments++;
 786        bio->bi_iter.bi_size += len;
 787
 788        /*
 789         * Perform a recount if the number of segments is greater
 790         * than queue_max_segments(q).
 791         */
 792
 793        while (bio->bi_phys_segments > queue_max_segments(q)) {
 794
 795                if (retried_segments)
 796                        goto failed;
 797
 798                retried_segments = 1;
 799                blk_recount_segments(q, bio);
 800        }
 801
 802        /* If we may be able to merge these biovecs, force a recount */
 803        if (bio->bi_vcnt > 1 && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
 804                bio_clear_flag(bio, BIO_SEG_VALID);
 805
 806 done:
 807        return len;
 808
 809 failed:
 810        bvec->bv_page = NULL;
 811        bvec->bv_len = 0;
 812        bvec->bv_offset = 0;
 813        bio->bi_vcnt--;
 814        bio->bi_iter.bi_size -= len;
 815        blk_recount_segments(q, bio);
 816        return 0;
 817}
 818EXPORT_SYMBOL(bio_add_pc_page);
 819
 820/**
 821 *      bio_add_page    -       attempt to add page to bio
 822 *      @bio: destination bio
 823 *      @page: page to add
 824 *      @len: vec entry length
 825 *      @offset: vec entry offset
 826 *
 827 *      Attempt to add a page to the bio_vec maplist. This will only fail
 828 *      if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
 829 */
 830int bio_add_page(struct bio *bio, struct page *page,
 831                 unsigned int len, unsigned int offset)
 832{
 833        struct bio_vec *bv;
 834
 835        /*
 836         * cloned bio must not modify vec list
 837         */
 838        if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
 839                return 0;
 840
 841        /*
 842         * For filesystems with a blocksize smaller than the pagesize
 843         * we will often be called with the same page as last time and
 844         * a consecutive offset.  Optimize this special case.
 845         */
 846        if (bio->bi_vcnt > 0) {
 847                bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
 848
 849                if (page == bv->bv_page &&
 850                    offset == bv->bv_offset + bv->bv_len) {
 851                        bv->bv_len += len;
 852                        goto done;
 853                }
 854        }
 855
 856        if (bio->bi_vcnt >= bio->bi_max_vecs)
 857                return 0;
 858
 859        bv              = &bio->bi_io_vec[bio->bi_vcnt];
 860        bv->bv_page     = page;
 861        bv->bv_len      = len;
 862        bv->bv_offset   = offset;
 863
 864        bio->bi_vcnt++;
 865done:
 866        bio->bi_iter.bi_size += len;
 867        return len;
 868}
 869EXPORT_SYMBOL(bio_add_page);
 870
 871/**
 872 * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
 873 * @bio: bio to add pages to
 874 * @iter: iov iterator describing the region to be mapped
 875 *
 876 * Pins as many pages from *iter and appends them to @bio's bvec array. The
 877 * pages will have to be released using put_page() when done.
 878 */
 879int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
 880{
 881        unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
 882        struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
 883        struct page **pages = (struct page **)bv;
 884        size_t offset, diff;
 885        ssize_t size;
 886
 887        size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
 888        if (unlikely(size <= 0))
 889                return size ? size : -EFAULT;
 890        nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
 891
 892        /*
 893         * Deep magic below:  We need to walk the pinned pages backwards
 894         * because we are abusing the space allocated for the bio_vecs
 895         * for the page array.  Because the bio_vecs are larger than the
 896         * page pointers by definition this will always work.  But it also
 897         * means we can't use bio_add_page, so any changes to it's semantics
 898         * need to be reflected here as well.
 899         */
 900        bio->bi_iter.bi_size += size;
 901        bio->bi_vcnt += nr_pages;
 902
 903        diff = (nr_pages * PAGE_SIZE - offset) - size;
 904        while (nr_pages--) {
 905                bv[nr_pages].bv_page = pages[nr_pages];
 906                bv[nr_pages].bv_len = PAGE_SIZE;
 907                bv[nr_pages].bv_offset = 0;
 908        }
 909
 910        bv[0].bv_offset += offset;
 911        bv[0].bv_len -= offset;
 912        if (diff)
 913                bv[bio->bi_vcnt - 1].bv_len -= diff;
 914
 915        iov_iter_advance(iter, size);
 916        return 0;
 917}
 918EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
 919
 920struct submit_bio_ret {
 921        struct completion event;
 922        int error;
 923};
 924
 925static void submit_bio_wait_endio(struct bio *bio)
 926{
 927        struct submit_bio_ret *ret = bio->bi_private;
 928
 929        ret->error = blk_status_to_errno(bio->bi_status);
 930        complete(&ret->event);
 931}
 932
 933/**
 934 * submit_bio_wait - submit a bio, and wait until it completes
 935 * @bio: The &struct bio which describes the I/O
 936 *
 937 * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
 938 * bio_endio() on failure.
 939 *
 940 * WARNING: Unlike to how submit_bio() is usually used, this function does not
 941 * result in bio reference to be consumed. The caller must drop the reference
 942 * on his own.
 943 */
 944int submit_bio_wait(struct bio *bio)
 945{
 946        struct submit_bio_ret ret;
 947
 948        init_completion(&ret.event);
 949        bio->bi_private = &ret;
 950        bio->bi_end_io = submit_bio_wait_endio;
 951        bio->bi_opf |= REQ_SYNC;
 952        submit_bio(bio);
 953        wait_for_completion_io(&ret.event);
 954
 955        return ret.error;
 956}
 957EXPORT_SYMBOL(submit_bio_wait);
 958
 959/**
 960 * bio_advance - increment/complete a bio by some number of bytes
 961 * @bio:        bio to advance
 962 * @bytes:      number of bytes to complete
 963 *
 964 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
 965 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
 966 * be updated on the last bvec as well.
 967 *
 968 * @bio will then represent the remaining, uncompleted portion of the io.
 969 */
 970void bio_advance(struct bio *bio, unsigned bytes)
 971{
 972        if (bio_integrity(bio))
 973                bio_integrity_advance(bio, bytes);
 974
 975        bio_advance_iter(bio, &bio->bi_iter, bytes);
 976}
 977EXPORT_SYMBOL(bio_advance);
 978
 979/**
 980 * bio_alloc_pages - allocates a single page for each bvec in a bio
 981 * @bio: bio to allocate pages for
 982 * @gfp_mask: flags for allocation
 983 *
 984 * Allocates pages up to @bio->bi_vcnt.
 985 *
 986 * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are
 987 * freed.
 988 */
 989int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
 990{
 991        int i;
 992        struct bio_vec *bv;
 993
 994        bio_for_each_segment_all(bv, bio, i) {
 995                bv->bv_page = alloc_page(gfp_mask);
 996                if (!bv->bv_page) {
 997                        while (--bv >= bio->bi_io_vec)
 998                                __free_page(bv->bv_page);
 999                        return -ENOMEM;
1000                }
1001        }
1002
1003        return 0;
1004}
1005EXPORT_SYMBOL(bio_alloc_pages);
1006
1007/**
1008 * bio_copy_data - copy contents of data buffers from one chain of bios to
1009 * another
1010 * @src: source bio list
1011 * @dst: destination bio list
1012 *
1013 * If @src and @dst are single bios, bi_next must be NULL - otherwise, treats
1014 * @src and @dst as linked lists of bios.
1015 *
1016 * Stops when it reaches the end of either @src or @dst - that is, copies
1017 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
1018 */
1019void bio_copy_data(struct bio *dst, struct bio *src)
1020{
1021        struct bvec_iter src_iter, dst_iter;
1022        struct bio_vec src_bv, dst_bv;
1023        void *src_p, *dst_p;
1024        unsigned bytes;
1025
1026        src_iter = src->bi_iter;
1027        dst_iter = dst->bi_iter;
1028
1029        while (1) {
1030                if (!src_iter.bi_size) {
1031                        src = src->bi_next;
1032                        if (!src)
1033                                break;
1034
1035                        src_iter = src->bi_iter;
1036                }
1037
1038                if (!dst_iter.bi_size) {
1039                        dst = dst->bi_next;
1040                        if (!dst)
1041                                break;
1042
1043                        dst_iter = dst->bi_iter;
1044                }
1045
1046                src_bv = bio_iter_iovec(src, src_iter);
1047                dst_bv = bio_iter_iovec(dst, dst_iter);
1048
1049                bytes = min(src_bv.bv_len, dst_bv.bv_len);
1050
1051                src_p = kmap_atomic(src_bv.bv_page);
1052                dst_p = kmap_atomic(dst_bv.bv_page);
1053
1054                memcpy(dst_p + dst_bv.bv_offset,
1055                       src_p + src_bv.bv_offset,
1056                       bytes);
1057
1058                kunmap_atomic(dst_p);
1059                kunmap_atomic(src_p);
1060
1061                bio_advance_iter(src, &src_iter, bytes);
1062                bio_advance_iter(dst, &dst_iter, bytes);
1063        }
1064}
1065EXPORT_SYMBOL(bio_copy_data);
1066
1067struct bio_map_data {
1068        int is_our_pages;
1069        struct iov_iter iter;
1070        struct iovec iov[];
1071};
1072
1073static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count,
1074                                               gfp_t gfp_mask)
1075{
1076        if (iov_count > UIO_MAXIOV)
1077                return NULL;
1078
1079        return kmalloc(sizeof(struct bio_map_data) +
1080                       sizeof(struct iovec) * iov_count, gfp_mask);
1081}
1082
1083/**
1084 * bio_copy_from_iter - copy all pages from iov_iter to bio
1085 * @bio: The &struct bio which describes the I/O as destination
1086 * @iter: iov_iter as source
1087 *
1088 * Copy all pages from iov_iter to bio.
1089 * Returns 0 on success, or error on failure.
1090 */
1091static int bio_copy_from_iter(struct bio *bio, struct iov_iter iter)
1092{
1093        int i;
1094        struct bio_vec *bvec;
1095
1096        bio_for_each_segment_all(bvec, bio, i) {
1097                ssize_t ret;
1098
1099                ret = copy_page_from_iter(bvec->bv_page,
1100                                          bvec->bv_offset,
1101                                          bvec->bv_len,
1102                                          &iter);
1103
1104                if (!iov_iter_count(&iter))
1105                        break;
1106
1107                if (ret < bvec->bv_len)
1108                        return -EFAULT;
1109        }
1110
1111        return 0;
1112}
1113
1114/**
1115 * bio_copy_to_iter - copy all pages from bio to iov_iter
1116 * @bio: The &struct bio which describes the I/O as source
1117 * @iter: iov_iter as destination
1118 *
1119 * Copy all pages from bio to iov_iter.
1120 * Returns 0 on success, or error on failure.
1121 */
1122static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
1123{
1124        int i;
1125        struct bio_vec *bvec;
1126
1127        bio_for_each_segment_all(bvec, bio, i) {
1128                ssize_t ret;
1129
1130                ret = copy_page_to_iter(bvec->bv_page,
1131                                        bvec->bv_offset,
1132                                        bvec->bv_len,
1133                                        &iter);
1134
1135                if (!iov_iter_count(&iter))
1136                        break;
1137
1138                if (ret < bvec->bv_len)
1139                        return -EFAULT;
1140        }
1141
1142        return 0;
1143}
1144
1145void bio_free_pages(struct bio *bio)
1146{
1147        struct bio_vec *bvec;
1148        int i;
1149
1150        bio_for_each_segment_all(bvec, bio, i)
1151                __free_page(bvec->bv_page);
1152}
1153EXPORT_SYMBOL(bio_free_pages);
1154
1155/**
1156 *      bio_uncopy_user -       finish previously mapped bio
1157 *      @bio: bio being terminated
1158 *
1159 *      Free pages allocated from bio_copy_user_iov() and write back data
1160 *      to user space in case of a read.
1161 */
1162int bio_uncopy_user(struct bio *bio)
1163{
1164        struct bio_map_data *bmd = bio->bi_private;
1165        int ret = 0;
1166
1167        if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1168                /*
1169                 * if we're in a workqueue, the request is orphaned, so
1170                 * don't copy into a random user address space, just free
1171                 * and return -EINTR so user space doesn't expect any data.
1172                 */
1173                if (!current->mm)
1174                        ret = -EINTR;
1175                else if (bio_data_dir(bio) == READ)
1176                        ret = bio_copy_to_iter(bio, bmd->iter);
1177                if (bmd->is_our_pages)
1178                        bio_free_pages(bio);
1179        }
1180        kfree(bmd);
1181        bio_put(bio);
1182        return ret;
1183}
1184
1185/**
1186 *      bio_copy_user_iov       -       copy user data to bio
1187 *      @q:             destination block queue
1188 *      @map_data:      pointer to the rq_map_data holding pages (if necessary)
1189 *      @iter:          iovec iterator
1190 *      @gfp_mask:      memory allocation flags
1191 *
1192 *      Prepares and returns a bio for indirect user io, bouncing data
1193 *      to/from kernel pages as necessary. Must be paired with
1194 *      call bio_uncopy_user() on io completion.
1195 */
1196struct bio *bio_copy_user_iov(struct request_queue *q,
1197                              struct rq_map_data *map_data,
1198                              const struct iov_iter *iter,
1199                              gfp_t gfp_mask)
1200{
1201        struct bio_map_data *bmd;
1202        struct page *page;
1203        struct bio *bio;
1204        int i, ret;
1205        int nr_pages = 0;
1206        unsigned int len = iter->count;
1207        unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
1208
1209        for (i = 0; i < iter->nr_segs; i++) {
1210                unsigned long uaddr;
1211                unsigned long end;
1212                unsigned long start;
1213
1214                uaddr = (unsigned long) iter->iov[i].iov_base;
1215                end = (uaddr + iter->iov[i].iov_len + PAGE_SIZE - 1)
1216                        >> PAGE_SHIFT;
1217                start = uaddr >> PAGE_SHIFT;
1218
1219                /*
1220                 * Overflow, abort
1221                 */
1222                if (end < start)
1223                        return ERR_PTR(-EINVAL);
1224
1225                nr_pages += end - start;
1226        }
1227
1228        if (offset)
1229                nr_pages++;
1230
1231        bmd = bio_alloc_map_data(iter->nr_segs, gfp_mask);
1232        if (!bmd)
1233                return ERR_PTR(-ENOMEM);
1234
1235        /*
1236         * We need to do a deep copy of the iov_iter including the iovecs.
1237         * The caller provided iov might point to an on-stack or otherwise
1238         * shortlived one.
1239         */
1240        bmd->is_our_pages = map_data ? 0 : 1;
1241        memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs);
1242        bmd->iter = *iter;
1243        bmd->iter.iov = bmd->iov;
1244
1245        ret = -ENOMEM;
1246        bio = bio_kmalloc(gfp_mask, nr_pages);
1247        if (!bio)
1248                goto out_bmd;
1249
1250        ret = 0;
1251
1252        if (map_data) {
1253                nr_pages = 1 << map_data->page_order;
1254                i = map_data->offset / PAGE_SIZE;
1255        }
1256        while (len) {
1257                unsigned int bytes = PAGE_SIZE;
1258
1259                bytes -= offset;
1260
1261                if (bytes > len)
1262                        bytes = len;
1263
1264                if (map_data) {
1265                        if (i == map_data->nr_entries * nr_pages) {
1266                                ret = -ENOMEM;
1267                                break;
1268                        }
1269
1270                        page = map_data->pages[i / nr_pages];
1271                        page += (i % nr_pages);
1272
1273                        i++;
1274                } else {
1275                        page = alloc_page(q->bounce_gfp | gfp_mask);
1276                        if (!page) {
1277                                ret = -ENOMEM;
1278                                break;
1279                        }
1280                }
1281
1282                if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
1283                        break;
1284
1285                len -= bytes;
1286                offset = 0;
1287        }
1288
1289        if (ret)
1290                goto cleanup;
1291
1292        /*
1293         * success
1294         */
1295        if (((iter->type & WRITE) && (!map_data || !map_data->null_mapped)) ||
1296            (map_data && map_data->from_user)) {
1297                ret = bio_copy_from_iter(bio, *iter);
1298                if (ret)
1299                        goto cleanup;
1300        }
1301
1302        bio->bi_private = bmd;
1303        return bio;
1304cleanup:
1305        if (!map_data)
1306                bio_free_pages(bio);
1307        bio_put(bio);
1308out_bmd:
1309        kfree(bmd);
1310        return ERR_PTR(ret);
1311}
1312
1313/**
1314 *      bio_map_user_iov - map user iovec into bio
1315 *      @q:             the struct request_queue for the bio
1316 *      @iter:          iovec iterator
1317 *      @gfp_mask:      memory allocation flags
1318 *
1319 *      Map the user space address into a bio suitable for io to a block
1320 *      device. Returns an error pointer in case of error.
1321 */
1322struct bio *bio_map_user_iov(struct request_queue *q,
1323                             const struct iov_iter *iter,
1324                             gfp_t gfp_mask)
1325{
1326        int j;
1327        int nr_pages = 0;
1328        struct page **pages;
1329        struct bio *bio;
1330        int cur_page = 0;
1331        int ret, offset;
1332        struct iov_iter i;
1333        struct iovec iov;
1334        struct bio_vec *bvec;
1335
1336        iov_for_each(iov, i, *iter) {
1337                unsigned long uaddr = (unsigned long) iov.iov_base;
1338                unsigned long len = iov.iov_len;
1339                unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1340                unsigned long start = uaddr >> PAGE_SHIFT;
1341
1342                /*
1343                 * Overflow, abort
1344                 */
1345                if (end < start)
1346                        return ERR_PTR(-EINVAL);
1347
1348                nr_pages += end - start;
1349                /*
1350                 * buffer must be aligned to at least logical block size for now
1351                 */
1352                if (uaddr & queue_dma_alignment(q))
1353                        return ERR_PTR(-EINVAL);
1354        }
1355
1356        if (!nr_pages)
1357                return ERR_PTR(-EINVAL);
1358
1359        bio = bio_kmalloc(gfp_mask, nr_pages);
1360        if (!bio)
1361                return ERR_PTR(-ENOMEM);
1362
1363        ret = -ENOMEM;
1364        pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask);
1365        if (!pages)
1366                goto out;
1367
1368        iov_for_each(iov, i, *iter) {
1369                unsigned long uaddr = (unsigned long) iov.iov_base;
1370                unsigned long len = iov.iov_len;
1371                unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1372                unsigned long start = uaddr >> PAGE_SHIFT;
1373                const int local_nr_pages = end - start;
1374                const int page_limit = cur_page + local_nr_pages;
1375
1376                ret = get_user_pages_fast(uaddr, local_nr_pages,
1377                                (iter->type & WRITE) != WRITE,
1378                                &pages[cur_page]);
1379                if (unlikely(ret < local_nr_pages)) {
1380                        for (j = cur_page; j < page_limit; j++) {
1381                                if (!pages[j])
1382                                        break;
1383                                put_page(pages[j]);
1384                        }
1385                        ret = -EFAULT;
1386                        goto out_unmap;
1387                }
1388
1389                offset = offset_in_page(uaddr);
1390                for (j = cur_page; j < page_limit; j++) {
1391                        unsigned int bytes = PAGE_SIZE - offset;
1392                        unsigned short prev_bi_vcnt = bio->bi_vcnt;
1393
1394                        if (len <= 0)
1395                                break;
1396                        
1397                        if (bytes > len)
1398                                bytes = len;
1399
1400                        /*
1401                         * sorry...
1402                         */
1403                        if (bio_add_pc_page(q, bio, pages[j], bytes, offset) <
1404                                            bytes)
1405                                break;
1406
1407                        /*
1408                         * check if vector was merged with previous
1409                         * drop page reference if needed
1410                         */
1411                        if (bio->bi_vcnt == prev_bi_vcnt)
1412                                put_page(pages[j]);
1413
1414                        len -= bytes;
1415                        offset = 0;
1416                }
1417
1418                cur_page = j;
1419                /*
1420                 * release the pages we didn't map into the bio, if any
1421                 */
1422                while (j < page_limit)
1423                        put_page(pages[j++]);
1424        }
1425
1426        kfree(pages);
1427
1428        bio_set_flag(bio, BIO_USER_MAPPED);
1429
1430        /*
1431         * subtle -- if bio_map_user_iov() ended up bouncing a bio,
1432         * it would normally disappear when its bi_end_io is run.
1433         * however, we need it for the unmap, so grab an extra
1434         * reference to it
1435         */
1436        bio_get(bio);
1437        return bio;
1438
1439 out_unmap:
1440        bio_for_each_segment_all(bvec, bio, j) {
1441                put_page(bvec->bv_page);
1442        }
1443 out:
1444        kfree(pages);
1445        bio_put(bio);
1446        return ERR_PTR(ret);
1447}
1448
1449static void __bio_unmap_user(struct bio *bio)
1450{
1451        struct bio_vec *bvec;
1452        int i;
1453
1454        /*
1455         * make sure we dirty pages we wrote to
1456         */
1457        bio_for_each_segment_all(bvec, bio, i) {
1458                if (bio_data_dir(bio) == READ)
1459                        set_page_dirty_lock(bvec->bv_page);
1460
1461                put_page(bvec->bv_page);
1462        }
1463
1464        bio_put(bio);
1465}
1466
1467/**
1468 *      bio_unmap_user  -       unmap a bio
1469 *      @bio:           the bio being unmapped
1470 *
1471 *      Unmap a bio previously mapped by bio_map_user_iov(). Must be called from
1472 *      process context.
1473 *
1474 *      bio_unmap_user() may sleep.
1475 */
1476void bio_unmap_user(struct bio *bio)
1477{
1478        __bio_unmap_user(bio);
1479        bio_put(bio);
1480}
1481
1482static void bio_map_kern_endio(struct bio *bio)
1483{
1484        bio_put(bio);
1485}
1486
1487/**
1488 *      bio_map_kern    -       map kernel address into bio
1489 *      @q: the struct request_queue for the bio
1490 *      @data: pointer to buffer to map
1491 *      @len: length in bytes
1492 *      @gfp_mask: allocation flags for bio allocation
1493 *
1494 *      Map the kernel address into a bio suitable for io to a block
1495 *      device. Returns an error pointer in case of error.
1496 */
1497struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
1498                         gfp_t gfp_mask)
1499{
1500        unsigned long kaddr = (unsigned long)data;
1501        unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1502        unsigned long start = kaddr >> PAGE_SHIFT;
1503        const int nr_pages = end - start;
1504        int offset, i;
1505        struct bio *bio;
1506
1507        bio = bio_kmalloc(gfp_mask, nr_pages);
1508        if (!bio)
1509                return ERR_PTR(-ENOMEM);
1510
1511        offset = offset_in_page(kaddr);
1512        for (i = 0; i < nr_pages; i++) {
1513                unsigned int bytes = PAGE_SIZE - offset;
1514
1515                if (len <= 0)
1516                        break;
1517
1518                if (bytes > len)
1519                        bytes = len;
1520
1521                if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
1522                                    offset) < bytes) {
1523                        /* we don't support partial mappings */
1524                        bio_put(bio);
1525                        return ERR_PTR(-EINVAL);
1526                }
1527
1528                data += bytes;
1529                len -= bytes;
1530                offset = 0;
1531        }
1532
1533        bio->bi_end_io = bio_map_kern_endio;
1534        return bio;
1535}
1536EXPORT_SYMBOL(bio_map_kern);
1537
1538static void bio_copy_kern_endio(struct bio *bio)
1539{
1540        bio_free_pages(bio);
1541        bio_put(bio);
1542}
1543
1544static void bio_copy_kern_endio_read(struct bio *bio)
1545{
1546        char *p = bio->bi_private;
1547        struct bio_vec *bvec;
1548        int i;
1549
1550        bio_for_each_segment_all(bvec, bio, i) {
1551                memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
1552                p += bvec->bv_len;
1553        }
1554
1555        bio_copy_kern_endio(bio);
1556}
1557
1558/**
1559 *      bio_copy_kern   -       copy kernel address into bio
1560 *      @q: the struct request_queue for the bio
1561 *      @data: pointer to buffer to copy
1562 *      @len: length in bytes
1563 *      @gfp_mask: allocation flags for bio and page allocation
1564 *      @reading: data direction is READ
1565 *
1566 *      copy the kernel address into a bio suitable for io to a block
1567 *      device. Returns an error pointer in case of error.
1568 */
1569struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
1570                          gfp_t gfp_mask, int reading)
1571{
1572        unsigned long kaddr = (unsigned long)data;
1573        unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1574        unsigned long start = kaddr >> PAGE_SHIFT;
1575        struct bio *bio;
1576        void *p = data;
1577        int nr_pages = 0;
1578
1579        /*
1580         * Overflow, abort
1581         */
1582        if (end < start)
1583                return ERR_PTR(-EINVAL);
1584
1585        nr_pages = end - start;
1586        bio = bio_kmalloc(gfp_mask, nr_pages);
1587        if (!bio)
1588                return ERR_PTR(-ENOMEM);
1589
1590        while (len) {
1591                struct page *page;
1592                unsigned int bytes = PAGE_SIZE;
1593
1594                if (bytes > len)
1595                        bytes = len;
1596
1597                page = alloc_page(q->bounce_gfp | gfp_mask);
1598                if (!page)
1599                        goto cleanup;
1600
1601                if (!reading)
1602                        memcpy(page_address(page), p, bytes);
1603
1604                if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
1605                        break;
1606
1607                len -= bytes;
1608                p += bytes;
1609        }
1610
1611        if (reading) {
1612                bio->bi_end_io = bio_copy_kern_endio_read;
1613                bio->bi_private = data;
1614        } else {
1615                bio->bi_end_io = bio_copy_kern_endio;
1616        }
1617
1618        return bio;
1619
1620cleanup:
1621        bio_free_pages(bio);
1622        bio_put(bio);
1623        return ERR_PTR(-ENOMEM);
1624}
1625
1626/*
1627 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1628 * for performing direct-IO in BIOs.
1629 *
1630 * The problem is that we cannot run set_page_dirty() from interrupt context
1631 * because the required locks are not interrupt-safe.  So what we can do is to
1632 * mark the pages dirty _before_ performing IO.  And in interrupt context,
1633 * check that the pages are still dirty.   If so, fine.  If not, redirty them
1634 * in process context.
1635 *
1636 * We special-case compound pages here: normally this means reads into hugetlb
1637 * pages.  The logic in here doesn't really work right for compound pages
1638 * because the VM does not uniformly chase down the head page in all cases.
1639 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1640 * handle them at all.  So we skip compound pages here at an early stage.
1641 *
1642 * Note that this code is very hard to test under normal circumstances because
1643 * direct-io pins the pages with get_user_pages().  This makes
1644 * is_page_cache_freeable return false, and the VM will not clean the pages.
1645 * But other code (eg, flusher threads) could clean the pages if they are mapped
1646 * pagecache.
1647 *
1648 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1649 * deferred bio dirtying paths.
1650 */
1651
1652/*
1653 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1654 */
1655void bio_set_pages_dirty(struct bio *bio)
1656{
1657        struct bio_vec *bvec;
1658        int i;
1659
1660        bio_for_each_segment_all(bvec, bio, i) {
1661                struct page *page = bvec->bv_page;
1662
1663                if (page && !PageCompound(page))
1664                        set_page_dirty_lock(page);
1665        }
1666}
1667
1668static void bio_release_pages(struct bio *bio)
1669{
1670        struct bio_vec *bvec;
1671        int i;
1672
1673        bio_for_each_segment_all(bvec, bio, i) {
1674                struct page *page = bvec->bv_page;
1675
1676                if (page)
1677                        put_page(page);
1678        }
1679}
1680
1681/*
1682 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1683 * If they are, then fine.  If, however, some pages are clean then they must
1684 * have been written out during the direct-IO read.  So we take another ref on
1685 * the BIO and the offending pages and re-dirty the pages in process context.
1686 *
1687 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1688 * here on.  It will run one put_page() against each page and will run one
1689 * bio_put() against the BIO.
1690 */
1691
1692static void bio_dirty_fn(struct work_struct *work);
1693
1694static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1695static DEFINE_SPINLOCK(bio_dirty_lock);
1696static struct bio *bio_dirty_list;
1697
1698/*
1699 * This runs in process context
1700 */
1701static void bio_dirty_fn(struct work_struct *work)
1702{
1703        unsigned long flags;
1704        struct bio *bio;
1705
1706        spin_lock_irqsave(&bio_dirty_lock, flags);
1707        bio = bio_dirty_list;
1708        bio_dirty_list = NULL;
1709        spin_unlock_irqrestore(&bio_dirty_lock, flags);
1710
1711        while (bio) {
1712                struct bio *next = bio->bi_private;
1713
1714                bio_set_pages_dirty(bio);
1715                bio_release_pages(bio);
1716                bio_put(bio);
1717                bio = next;
1718        }
1719}
1720
1721void bio_check_pages_dirty(struct bio *bio)
1722{
1723        struct bio_vec *bvec;
1724        int nr_clean_pages = 0;
1725        int i;
1726
1727        bio_for_each_segment_all(bvec, bio, i) {
1728                struct page *page = bvec->bv_page;
1729
1730                if (PageDirty(page) || PageCompound(page)) {
1731                        put_page(page);
1732                        bvec->bv_page = NULL;
1733                } else {
1734                        nr_clean_pages++;
1735                }
1736        }
1737
1738        if (nr_clean_pages) {
1739                unsigned long flags;
1740
1741                spin_lock_irqsave(&bio_dirty_lock, flags);
1742                bio->bi_private = bio_dirty_list;
1743                bio_dirty_list = bio;
1744                spin_unlock_irqrestore(&bio_dirty_lock, flags);
1745                schedule_work(&bio_dirty_work);
1746        } else {
1747                bio_put(bio);
1748        }
1749}
1750
1751void generic_start_io_acct(struct request_queue *q, int rw,
1752                           unsigned long sectors, struct hd_struct *part)
1753{
1754        int cpu = part_stat_lock();
1755
1756        part_round_stats(q, cpu, part);
1757        part_stat_inc(cpu, part, ios[rw]);
1758        part_stat_add(cpu, part, sectors[rw], sectors);
1759        part_inc_in_flight(q, part, rw);
1760
1761        part_stat_unlock();
1762}
1763EXPORT_SYMBOL(generic_start_io_acct);
1764
1765void generic_end_io_acct(struct request_queue *q, int rw,
1766                         struct hd_struct *part, unsigned long start_time)
1767{
1768        unsigned long duration = jiffies - start_time;
1769        int cpu = part_stat_lock();
1770
1771        part_stat_add(cpu, part, ticks[rw], duration);
1772        part_round_stats(q, cpu, part);
1773        part_dec_in_flight(q, part, rw);
1774
1775        part_stat_unlock();
1776}
1777EXPORT_SYMBOL(generic_end_io_acct);
1778
1779#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1780void bio_flush_dcache_pages(struct bio *bi)
1781{
1782        struct bio_vec bvec;
1783        struct bvec_iter iter;
1784
1785        bio_for_each_segment(bvec, bi, iter)
1786                flush_dcache_page(bvec.bv_page);
1787}
1788EXPORT_SYMBOL(bio_flush_dcache_pages);
1789#endif
1790
1791static inline bool bio_remaining_done(struct bio *bio)
1792{
1793        /*
1794         * If we're not chaining, then ->__bi_remaining is always 1 and
1795         * we always end io on the first invocation.
1796         */
1797        if (!bio_flagged(bio, BIO_CHAIN))
1798                return true;
1799
1800        BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1801
1802        if (atomic_dec_and_test(&bio->__bi_remaining)) {
1803                bio_clear_flag(bio, BIO_CHAIN);
1804                return true;
1805        }
1806
1807        return false;
1808}
1809
1810/**
1811 * bio_endio - end I/O on a bio
1812 * @bio:        bio
1813 *
1814 * Description:
1815 *   bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
1816 *   way to end I/O on a bio. No one should call bi_end_io() directly on a
1817 *   bio unless they own it and thus know that it has an end_io function.
1818 *
1819 *   bio_endio() can be called several times on a bio that has been chained
1820 *   using bio_chain().  The ->bi_end_io() function will only be called the
1821 *   last time.  At this point the BLK_TA_COMPLETE tracing event will be
1822 *   generated if BIO_TRACE_COMPLETION is set.
1823 **/
1824void bio_endio(struct bio *bio)
1825{
1826again:
1827        if (!bio_remaining_done(bio))
1828                return;
1829        if (!bio_integrity_endio(bio))
1830                return;
1831
1832        /*
1833         * Need to have a real endio function for chained bios, otherwise
1834         * various corner cases will break (like stacking block devices that
1835         * save/restore bi_end_io) - however, we want to avoid unbounded
1836         * recursion and blowing the stack. Tail call optimization would
1837         * handle this, but compiling with frame pointers also disables
1838         * gcc's sibling call optimization.
1839         */
1840        if (bio->bi_end_io == bio_chain_endio) {
1841                bio = __bio_chain_endio(bio);
1842                goto again;
1843        }
1844
1845        if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
1846                trace_block_bio_complete(bio->bi_disk->queue, bio,
1847                                         blk_status_to_errno(bio->bi_status));
1848                bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1849        }
1850
1851        blk_throtl_bio_endio(bio);
1852        /* release cgroup info */
1853        bio_uninit(bio);
1854        if (bio->bi_end_io)
1855                bio->bi_end_io(bio);
1856}
1857EXPORT_SYMBOL(bio_endio);
1858
1859/**
1860 * bio_split - split a bio
1861 * @bio:        bio to split
1862 * @sectors:    number of sectors to split from the front of @bio
1863 * @gfp:        gfp mask
1864 * @bs:         bio set to allocate from
1865 *
1866 * Allocates and returns a new bio which represents @sectors from the start of
1867 * @bio, and updates @bio to represent the remaining sectors.
1868 *
1869 * Unless this is a discard request the newly allocated bio will point
1870 * to @bio's bi_io_vec; it is the caller's responsibility to ensure that
1871 * @bio is not freed before the split.
1872 */
1873struct bio *bio_split(struct bio *bio, int sectors,
1874                      gfp_t gfp, struct bio_set *bs)
1875{
1876        struct bio *split = NULL;
1877
1878        BUG_ON(sectors <= 0);
1879        BUG_ON(sectors >= bio_sectors(bio));
1880
1881        split = bio_clone_fast(bio, gfp, bs);
1882        if (!split)
1883                return NULL;
1884
1885        split->bi_iter.bi_size = sectors << 9;
1886
1887        if (bio_integrity(split))
1888                bio_integrity_trim(split);
1889
1890        bio_advance(bio, split->bi_iter.bi_size);
1891
1892        if (bio_flagged(bio, BIO_TRACE_COMPLETION))
1893                bio_set_flag(bio, BIO_TRACE_COMPLETION);
1894
1895        return split;
1896}
1897EXPORT_SYMBOL(bio_split);
1898
1899/**
1900 * bio_trim - trim a bio
1901 * @bio:        bio to trim
1902 * @offset:     number of sectors to trim from the front of @bio
1903 * @size:       size we want to trim @bio to, in sectors
1904 */
1905void bio_trim(struct bio *bio, int offset, int size)
1906{
1907        /* 'bio' is a cloned bio which we need to trim to match
1908         * the given offset and size.
1909         */
1910
1911        size <<= 9;
1912        if (offset == 0 && size == bio->bi_iter.bi_size)
1913                return;
1914
1915        bio_clear_flag(bio, BIO_SEG_VALID);
1916
1917        bio_advance(bio, offset << 9);
1918
1919        bio->bi_iter.bi_size = size;
1920
1921        if (bio_integrity(bio))
1922                bio_integrity_trim(bio);
1923
1924}
1925EXPORT_SYMBOL_GPL(bio_trim);
1926
1927/*
1928 * create memory pools for biovec's in a bio_set.
1929 * use the global biovec slabs created for general use.
1930 */
1931mempool_t *biovec_create_pool(int pool_entries)
1932{
1933        struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX;
1934
1935        return mempool_create_slab_pool(pool_entries, bp->slab);
1936}
1937
1938void bioset_free(struct bio_set *bs)
1939{
1940        if (bs->rescue_workqueue)
1941                destroy_workqueue(bs->rescue_workqueue);
1942
1943        if (bs->bio_pool)
1944                mempool_destroy(bs->bio_pool);
1945
1946        if (bs->bvec_pool)
1947                mempool_destroy(bs->bvec_pool);
1948
1949        bioset_integrity_free(bs);
1950        bio_put_slab(bs);
1951
1952        kfree(bs);
1953}
1954EXPORT_SYMBOL(bioset_free);
1955
1956/**
1957 * bioset_create  - Create a bio_set
1958 * @pool_size:  Number of bio and bio_vecs to cache in the mempool
1959 * @front_pad:  Number of bytes to allocate in front of the returned bio
1960 * @flags:      Flags to modify behavior, currently %BIOSET_NEED_BVECS
1961 *              and %BIOSET_NEED_RESCUER
1962 *
1963 * Description:
1964 *    Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1965 *    to ask for a number of bytes to be allocated in front of the bio.
1966 *    Front pad allocation is useful for embedding the bio inside
1967 *    another structure, to avoid allocating extra data to go with the bio.
1968 *    Note that the bio must be embedded at the END of that structure always,
1969 *    or things will break badly.
1970 *    If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
1971 *    for allocating iovecs.  This pool is not needed e.g. for bio_clone_fast().
1972 *    If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to
1973 *    dispatch queued requests when the mempool runs out of space.
1974 *
1975 */
1976struct bio_set *bioset_create(unsigned int pool_size,
1977                              unsigned int front_pad,
1978                              int flags)
1979{
1980        unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1981        struct bio_set *bs;
1982
1983        bs = kzalloc(sizeof(*bs), GFP_KERNEL);
1984        if (!bs)
1985                return NULL;
1986
1987        bs->front_pad = front_pad;
1988
1989        spin_lock_init(&bs->rescue_lock);
1990        bio_list_init(&bs->rescue_list);
1991        INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1992
1993        bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
1994        if (!bs->bio_slab) {
1995                kfree(bs);
1996                return NULL;
1997        }
1998
1999        bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab);
2000        if (!bs->bio_pool)
2001                goto bad;
2002
2003        if (flags & BIOSET_NEED_BVECS) {
2004                bs->bvec_pool = biovec_create_pool(pool_size);
2005                if (!bs->bvec_pool)
2006                        goto bad;
2007        }
2008
2009        if (!(flags & BIOSET_NEED_RESCUER))
2010                return bs;
2011
2012        bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
2013        if (!bs->rescue_workqueue)
2014                goto bad;
2015
2016        return bs;
2017bad:
2018        bioset_free(bs);
2019        return NULL;
2020}
2021EXPORT_SYMBOL(bioset_create);
2022
2023#ifdef CONFIG_BLK_CGROUP
2024
2025/**
2026 * bio_associate_blkcg - associate a bio with the specified blkcg
2027 * @bio: target bio
2028 * @blkcg_css: css of the blkcg to associate
2029 *
2030 * Associate @bio with the blkcg specified by @blkcg_css.  Block layer will
2031 * treat @bio as if it were issued by a task which belongs to the blkcg.
2032 *
2033 * This function takes an extra reference of @blkcg_css which will be put
2034 * when @bio is released.  The caller must own @bio and is responsible for
2035 * synchronizing calls to this function.
2036 */
2037int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
2038{
2039        if (unlikely(bio->bi_css))
2040                return -EBUSY;
2041        css_get(blkcg_css);
2042        bio->bi_css = blkcg_css;
2043        return 0;
2044}
2045EXPORT_SYMBOL_GPL(bio_associate_blkcg);
2046
2047/**
2048 * bio_associate_current - associate a bio with %current
2049 * @bio: target bio
2050 *
2051 * Associate @bio with %current if it hasn't been associated yet.  Block
2052 * layer will treat @bio as if it were issued by %current no matter which
2053 * task actually issues it.
2054 *
2055 * This function takes an extra reference of @task's io_context and blkcg
2056 * which will be put when @bio is released.  The caller must own @bio,
2057 * ensure %current->io_context exists, and is responsible for synchronizing
2058 * calls to this function.
2059 */
2060int bio_associate_current(struct bio *bio)
2061{
2062        struct io_context *ioc;
2063
2064        if (bio->bi_css)
2065                return -EBUSY;
2066
2067        ioc = current->io_context;
2068        if (!ioc)
2069                return -ENOENT;
2070
2071        get_io_context_active(ioc);
2072        bio->bi_ioc = ioc;
2073        bio->bi_css = task_get_css(current, io_cgrp_id);
2074        return 0;
2075}
2076EXPORT_SYMBOL_GPL(bio_associate_current);
2077
2078/**
2079 * bio_disassociate_task - undo bio_associate_current()
2080 * @bio: target bio
2081 */
2082void bio_disassociate_task(struct bio *bio)
2083{
2084        if (bio->bi_ioc) {
2085                put_io_context(bio->bi_ioc);
2086                bio->bi_ioc = NULL;
2087        }
2088        if (bio->bi_css) {
2089                css_put(bio->bi_css);
2090                bio->bi_css = NULL;
2091        }
2092}
2093
2094/**
2095 * bio_clone_blkcg_association - clone blkcg association from src to dst bio
2096 * @dst: destination bio
2097 * @src: source bio
2098 */
2099void bio_clone_blkcg_association(struct bio *dst, struct bio *src)
2100{
2101        if (src->bi_css)
2102                WARN_ON(bio_associate_blkcg(dst, src->bi_css));
2103}
2104EXPORT_SYMBOL_GPL(bio_clone_blkcg_association);
2105#endif /* CONFIG_BLK_CGROUP */
2106
2107static void __init biovec_init_slabs(void)
2108{
2109        int i;
2110
2111        for (i = 0; i < BVEC_POOL_NR; i++) {
2112                int size;
2113                struct biovec_slab *bvs = bvec_slabs + i;
2114
2115                if (bvs->nr_vecs <= BIO_INLINE_VECS) {
2116                        bvs->slab = NULL;
2117                        continue;
2118                }
2119
2120                size = bvs->nr_vecs * sizeof(struct bio_vec);
2121                bvs->slab = kmem_cache_create(bvs->name, size, 0,
2122                                SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2123        }
2124}
2125
2126static int __init init_bio(void)
2127{
2128        bio_slab_max = 2;
2129        bio_slab_nr = 0;
2130        bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL);
2131        if (!bio_slabs)
2132                panic("bio: can't allocate bios\n");
2133
2134        bio_integrity_init();
2135        biovec_init_slabs();
2136
2137        fs_bio_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
2138        if (!fs_bio_set)
2139                panic("bio: can't allocate bios\n");
2140
2141        if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE))
2142                panic("bio: can't create integrity pool\n");
2143
2144        return 0;
2145}
2146subsys_initcall(init_bio);
2147