linux/block/bio.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public Licens
  14 * along with this program; if not, write to the Free Software
  15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
  16 *
  17 */
  18#include <linux/mm.h>
  19#include <linux/swap.h>
  20#include <linux/bio.h>
  21#include <linux/blkdev.h>
  22#include <linux/uio.h>
  23#include <linux/iocontext.h>
  24#include <linux/slab.h>
  25#include <linux/init.h>
  26#include <linux/kernel.h>
  27#include <linux/export.h>
  28#include <linux/mempool.h>
  29#include <linux/workqueue.h>
  30#include <linux/cgroup.h>
  31
  32#include <trace/events/block.h>
  33
  34/*
  35 * Test patch to inline a certain number of bi_io_vec's inside the bio
  36 * itself, to shrink a bio data allocation from two mempool calls to one
  37 */
  38#define BIO_INLINE_VECS         4
  39
  40/*
  41 * if you change this list, also change bvec_alloc or things will
  42 * break badly! cannot be bigger than what you can fit into an
  43 * unsigned short
  44 */
  45#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
  46static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = {
  47        BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
  48};
  49#undef BV
  50
  51/*
  52 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
  53 * IO code that does not need private memory pools.
  54 */
  55struct bio_set *fs_bio_set;
  56EXPORT_SYMBOL(fs_bio_set);
  57
  58/*
  59 * Our slab pool management
  60 */
  61struct bio_slab {
  62        struct kmem_cache *slab;
  63        unsigned int slab_ref;
  64        unsigned int slab_size;
  65        char name[8];
  66};
  67static DEFINE_MUTEX(bio_slab_lock);
  68static struct bio_slab *bio_slabs;
  69static unsigned int bio_slab_nr, bio_slab_max;
  70
  71static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
  72{
  73        unsigned int sz = sizeof(struct bio) + extra_size;
  74        struct kmem_cache *slab = NULL;
  75        struct bio_slab *bslab, *new_bio_slabs;
  76        unsigned int new_bio_slab_max;
  77        unsigned int i, entry = -1;
  78
  79        mutex_lock(&bio_slab_lock);
  80
  81        i = 0;
  82        while (i < bio_slab_nr) {
  83                bslab = &bio_slabs[i];
  84
  85                if (!bslab->slab && entry == -1)
  86                        entry = i;
  87                else if (bslab->slab_size == sz) {
  88                        slab = bslab->slab;
  89                        bslab->slab_ref++;
  90                        break;
  91                }
  92                i++;
  93        }
  94
  95        if (slab)
  96                goto out_unlock;
  97
  98        if (bio_slab_nr == bio_slab_max && entry == -1) {
  99                new_bio_slab_max = bio_slab_max << 1;
 100                new_bio_slabs = krealloc(bio_slabs,
 101                                         new_bio_slab_max * sizeof(struct bio_slab),
 102                                         GFP_KERNEL);
 103                if (!new_bio_slabs)
 104                        goto out_unlock;
 105                bio_slab_max = new_bio_slab_max;
 106                bio_slabs = new_bio_slabs;
 107        }
 108        if (entry == -1)
 109                entry = bio_slab_nr++;
 110
 111        bslab = &bio_slabs[entry];
 112
 113        snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
 114        slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN,
 115                                 SLAB_HWCACHE_ALIGN, NULL);
 116        if (!slab)
 117                goto out_unlock;
 118
 119        bslab->slab = slab;
 120        bslab->slab_ref = 1;
 121        bslab->slab_size = sz;
 122out_unlock:
 123        mutex_unlock(&bio_slab_lock);
 124        return slab;
 125}
 126
 127static void bio_put_slab(struct bio_set *bs)
 128{
 129        struct bio_slab *bslab = NULL;
 130        unsigned int i;
 131
 132        mutex_lock(&bio_slab_lock);
 133
 134        for (i = 0; i < bio_slab_nr; i++) {
 135                if (bs->bio_slab == bio_slabs[i].slab) {
 136                        bslab = &bio_slabs[i];
 137                        break;
 138                }
 139        }
 140
 141        if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
 142                goto out;
 143
 144        WARN_ON(!bslab->slab_ref);
 145
 146        if (--bslab->slab_ref)
 147                goto out;
 148
 149        kmem_cache_destroy(bslab->slab);
 150        bslab->slab = NULL;
 151
 152out:
 153        mutex_unlock(&bio_slab_lock);
 154}
 155
 156unsigned int bvec_nr_vecs(unsigned short idx)
 157{
 158        return bvec_slabs[idx].nr_vecs;
 159}
 160
 161void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
 162{
 163        if (!idx)
 164                return;
 165        idx--;
 166
 167        BIO_BUG_ON(idx >= BVEC_POOL_NR);
 168
 169        if (idx == BVEC_POOL_MAX) {
 170                mempool_free(bv, pool);
 171        } else {
 172                struct biovec_slab *bvs = bvec_slabs + idx;
 173
 174                kmem_cache_free(bvs->slab, bv);
 175        }
 176}
 177
 178struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
 179                           mempool_t *pool)
 180{
 181        struct bio_vec *bvl;
 182
 183        /*
 184         * see comment near bvec_array define!
 185         */
 186        switch (nr) {
 187        case 1:
 188                *idx = 0;
 189                break;
 190        case 2 ... 4:
 191                *idx = 1;
 192                break;
 193        case 5 ... 16:
 194                *idx = 2;
 195                break;
 196        case 17 ... 64:
 197                *idx = 3;
 198                break;
 199        case 65 ... 128:
 200                *idx = 4;
 201                break;
 202        case 129 ... BIO_MAX_PAGES:
 203                *idx = 5;
 204                break;
 205        default:
 206                return NULL;
 207        }
 208
 209        /*
 210         * idx now points to the pool we want to allocate from. only the
 211         * 1-vec entry pool is mempool backed.
 212         */
 213        if (*idx == BVEC_POOL_MAX) {
 214fallback:
 215                bvl = mempool_alloc(pool, gfp_mask);
 216        } else {
 217                struct biovec_slab *bvs = bvec_slabs + *idx;
 218                gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO);
 219
 220                /*
 221                 * Make this allocation restricted and don't dump info on
 222                 * allocation failures, since we'll fallback to the mempool
 223                 * in case of failure.
 224                 */
 225                __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
 226
 227                /*
 228                 * Try a slab allocation. If this fails and __GFP_DIRECT_RECLAIM
 229                 * is set, retry with the 1-entry mempool
 230                 */
 231                bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
 232                if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) {
 233                        *idx = BVEC_POOL_MAX;
 234                        goto fallback;
 235                }
 236        }
 237
 238        (*idx)++;
 239        return bvl;
 240}
 241
 242static void __bio_free(struct bio *bio)
 243{
 244        bio_disassociate_task(bio);
 245
 246        if (bio_integrity(bio))
 247                bio_integrity_free(bio);
 248}
 249
 250static void bio_free(struct bio *bio)
 251{
 252        struct bio_set *bs = bio->bi_pool;
 253        void *p;
 254
 255        __bio_free(bio);
 256
 257        if (bs) {
 258                bvec_free(bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
 259
 260                /*
 261                 * If we have front padding, adjust the bio pointer before freeing
 262                 */
 263                p = bio;
 264                p -= bs->front_pad;
 265
 266                mempool_free(p, bs->bio_pool);
 267        } else {
 268                /* Bio was allocated by bio_kmalloc() */
 269                kfree(bio);
 270        }
 271}
 272
 273void bio_init(struct bio *bio)
 274{
 275        memset(bio, 0, sizeof(*bio));
 276        atomic_set(&bio->__bi_remaining, 1);
 277        atomic_set(&bio->__bi_cnt, 1);
 278}
 279EXPORT_SYMBOL(bio_init);
 280
 281/**
 282 * bio_reset - reinitialize a bio
 283 * @bio:        bio to reset
 284 *
 285 * Description:
 286 *   After calling bio_reset(), @bio will be in the same state as a freshly
 287 *   allocated bio returned bio bio_alloc_bioset() - the only fields that are
 288 *   preserved are the ones that are initialized by bio_alloc_bioset(). See
 289 *   comment in struct bio.
 290 */
 291void bio_reset(struct bio *bio)
 292{
 293        unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
 294
 295        __bio_free(bio);
 296
 297        memset(bio, 0, BIO_RESET_BYTES);
 298        bio->bi_flags = flags;
 299        atomic_set(&bio->__bi_remaining, 1);
 300}
 301EXPORT_SYMBOL(bio_reset);
 302
 303static struct bio *__bio_chain_endio(struct bio *bio)
 304{
 305        struct bio *parent = bio->bi_private;
 306
 307        if (!parent->bi_error)
 308                parent->bi_error = bio->bi_error;
 309        bio_put(bio);
 310        return parent;
 311}
 312
 313static void bio_chain_endio(struct bio *bio)
 314{
 315        bio_endio(__bio_chain_endio(bio));
 316}
 317
 318/**
 319 * bio_chain - chain bio completions
 320 * @bio: the target bio
 321 * @parent: the @bio's parent bio
 322 *
 323 * The caller won't have a bi_end_io called when @bio completes - instead,
 324 * @parent's bi_end_io won't be called until both @parent and @bio have
 325 * completed; the chained bio will also be freed when it completes.
 326 *
 327 * The caller must not set bi_private or bi_end_io in @bio.
 328 */
 329void bio_chain(struct bio *bio, struct bio *parent)
 330{
 331        BUG_ON(bio->bi_private || bio->bi_end_io);
 332
 333        bio->bi_private = parent;
 334        bio->bi_end_io  = bio_chain_endio;
 335        bio_inc_remaining(parent);
 336}
 337EXPORT_SYMBOL(bio_chain);
 338
 339static void bio_alloc_rescue(struct work_struct *work)
 340{
 341        struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
 342        struct bio *bio;
 343
 344        while (1) {
 345                spin_lock(&bs->rescue_lock);
 346                bio = bio_list_pop(&bs->rescue_list);
 347                spin_unlock(&bs->rescue_lock);
 348
 349                if (!bio)
 350                        break;
 351
 352                generic_make_request(bio);
 353        }
 354}
 355
 356static void punt_bios_to_rescuer(struct bio_set *bs)
 357{
 358        struct bio_list punt, nopunt;
 359        struct bio *bio;
 360
 361        /*
 362         * In order to guarantee forward progress we must punt only bios that
 363         * were allocated from this bio_set; otherwise, if there was a bio on
 364         * there for a stacking driver higher up in the stack, processing it
 365         * could require allocating bios from this bio_set, and doing that from
 366         * our own rescuer would be bad.
 367         *
 368         * Since bio lists are singly linked, pop them all instead of trying to
 369         * remove from the middle of the list:
 370         */
 371
 372        bio_list_init(&punt);
 373        bio_list_init(&nopunt);
 374
 375        while ((bio = bio_list_pop(current->bio_list)))
 376                bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
 377
 378        *current->bio_list = nopunt;
 379
 380        spin_lock(&bs->rescue_lock);
 381        bio_list_merge(&bs->rescue_list, &punt);
 382        spin_unlock(&bs->rescue_lock);
 383
 384        queue_work(bs->rescue_workqueue, &bs->rescue_work);
 385}
 386
 387/**
 388 * bio_alloc_bioset - allocate a bio for I/O
 389 * @gfp_mask:   the GFP_ mask given to the slab allocator
 390 * @nr_iovecs:  number of iovecs to pre-allocate
 391 * @bs:         the bio_set to allocate from.
 392 *
 393 * Description:
 394 *   If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
 395 *   backed by the @bs's mempool.
 396 *
 397 *   When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will
 398 *   always be able to allocate a bio. This is due to the mempool guarantees.
 399 *   To make this work, callers must never allocate more than 1 bio at a time
 400 *   from this pool. Callers that need to allocate more than 1 bio must always
 401 *   submit the previously allocated bio for IO before attempting to allocate
 402 *   a new one. Failure to do so can cause deadlocks under memory pressure.
 403 *
 404 *   Note that when running under generic_make_request() (i.e. any block
 405 *   driver), bios are not submitted until after you return - see the code in
 406 *   generic_make_request() that converts recursion into iteration, to prevent
 407 *   stack overflows.
 408 *
 409 *   This would normally mean allocating multiple bios under
 410 *   generic_make_request() would be susceptible to deadlocks, but we have
 411 *   deadlock avoidance code that resubmits any blocked bios from a rescuer
 412 *   thread.
 413 *
 414 *   However, we do not guarantee forward progress for allocations from other
 415 *   mempools. Doing multiple allocations from the same mempool under
 416 *   generic_make_request() should be avoided - instead, use bio_set's front_pad
 417 *   for per bio allocations.
 418 *
 419 *   RETURNS:
 420 *   Pointer to new bio on success, NULL on failure.
 421 */
 422struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
 423{
 424        gfp_t saved_gfp = gfp_mask;
 425        unsigned front_pad;
 426        unsigned inline_vecs;
 427        struct bio_vec *bvl = NULL;
 428        struct bio *bio;
 429        void *p;
 430
 431        if (!bs) {
 432                if (nr_iovecs > UIO_MAXIOV)
 433                        return NULL;
 434
 435                p = kmalloc(sizeof(struct bio) +
 436                            nr_iovecs * sizeof(struct bio_vec),
 437                            gfp_mask);
 438                front_pad = 0;
 439                inline_vecs = nr_iovecs;
 440        } else {
 441                /* should not use nobvec bioset for nr_iovecs > 0 */
 442                if (WARN_ON_ONCE(!bs->bvec_pool && nr_iovecs > 0))
 443                        return NULL;
 444                /*
 445                 * generic_make_request() converts recursion to iteration; this
 446                 * means if we're running beneath it, any bios we allocate and
 447                 * submit will not be submitted (and thus freed) until after we
 448                 * return.
 449                 *
 450                 * This exposes us to a potential deadlock if we allocate
 451                 * multiple bios from the same bio_set() while running
 452                 * underneath generic_make_request(). If we were to allocate
 453                 * multiple bios (say a stacking block driver that was splitting
 454                 * bios), we would deadlock if we exhausted the mempool's
 455                 * reserve.
 456                 *
 457                 * We solve this, and guarantee forward progress, with a rescuer
 458                 * workqueue per bio_set. If we go to allocate and there are
 459                 * bios on current->bio_list, we first try the allocation
 460                 * without __GFP_DIRECT_RECLAIM; if that fails, we punt those
 461                 * bios we would be blocking to the rescuer workqueue before
 462                 * we retry with the original gfp_flags.
 463                 */
 464
 465                if (current->bio_list && !bio_list_empty(current->bio_list))
 466                        gfp_mask &= ~__GFP_DIRECT_RECLAIM;
 467
 468                p = mempool_alloc(bs->bio_pool, gfp_mask);
 469                if (!p && gfp_mask != saved_gfp) {
 470                        punt_bios_to_rescuer(bs);
 471                        gfp_mask = saved_gfp;
 472                        p = mempool_alloc(bs->bio_pool, gfp_mask);
 473                }
 474
 475                front_pad = bs->front_pad;
 476                inline_vecs = BIO_INLINE_VECS;
 477        }
 478
 479        if (unlikely(!p))
 480                return NULL;
 481
 482        bio = p + front_pad;
 483        bio_init(bio);
 484
 485        if (nr_iovecs > inline_vecs) {
 486                unsigned long idx = 0;
 487
 488                bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
 489                if (!bvl && gfp_mask != saved_gfp) {
 490                        punt_bios_to_rescuer(bs);
 491                        gfp_mask = saved_gfp;
 492                        bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
 493                }
 494
 495                if (unlikely(!bvl))
 496                        goto err_free;
 497
 498                bio->bi_flags |= idx << BVEC_POOL_OFFSET;
 499        } else if (nr_iovecs) {
 500                bvl = bio->bi_inline_vecs;
 501        }
 502
 503        bio->bi_pool = bs;
 504        bio->bi_max_vecs = nr_iovecs;
 505        bio->bi_io_vec = bvl;
 506        return bio;
 507
 508err_free:
 509        mempool_free(p, bs->bio_pool);
 510        return NULL;
 511}
 512EXPORT_SYMBOL(bio_alloc_bioset);
 513
 514void zero_fill_bio(struct bio *bio)
 515{
 516        unsigned long flags;
 517        struct bio_vec bv;
 518        struct bvec_iter iter;
 519
 520        bio_for_each_segment(bv, bio, iter) {
 521                char *data = bvec_kmap_irq(&bv, &flags);
 522                memset(data, 0, bv.bv_len);
 523                flush_dcache_page(bv.bv_page);
 524                bvec_kunmap_irq(data, &flags);
 525        }
 526}
 527EXPORT_SYMBOL(zero_fill_bio);
 528
 529/**
 530 * bio_put - release a reference to a bio
 531 * @bio:   bio to release reference to
 532 *
 533 * Description:
 534 *   Put a reference to a &struct bio, either one you have gotten with
 535 *   bio_alloc, bio_get or bio_clone. The last put of a bio will free it.
 536 **/
 537void bio_put(struct bio *bio)
 538{
 539        if (!bio_flagged(bio, BIO_REFFED))
 540                bio_free(bio);
 541        else {
 542                BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
 543
 544                /*
 545                 * last put frees it
 546                 */
 547                if (atomic_dec_and_test(&bio->__bi_cnt))
 548                        bio_free(bio);
 549        }
 550}
 551EXPORT_SYMBOL(bio_put);
 552
 553inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
 554{
 555        if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
 556                blk_recount_segments(q, bio);
 557
 558        return bio->bi_phys_segments;
 559}
 560EXPORT_SYMBOL(bio_phys_segments);
 561
 562/**
 563 *      __bio_clone_fast - clone a bio that shares the original bio's biovec
 564 *      @bio: destination bio
 565 *      @bio_src: bio to clone
 566 *
 567 *      Clone a &bio. Caller will own the returned bio, but not
 568 *      the actual data it points to. Reference count of returned
 569 *      bio will be one.
 570 *
 571 *      Caller must ensure that @bio_src is not freed before @bio.
 572 */
 573void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
 574{
 575        BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
 576
 577        /*
 578         * most users will be overriding ->bi_bdev with a new target,
 579         * so we don't set nor calculate new physical/hw segment counts here
 580         */
 581        bio->bi_bdev = bio_src->bi_bdev;
 582        bio_set_flag(bio, BIO_CLONED);
 583        bio->bi_opf = bio_src->bi_opf;
 584        bio->bi_iter = bio_src->bi_iter;
 585        bio->bi_io_vec = bio_src->bi_io_vec;
 586
 587        bio_clone_blkcg_association(bio, bio_src);
 588}
 589EXPORT_SYMBOL(__bio_clone_fast);
 590
 591/**
 592 *      bio_clone_fast - clone a bio that shares the original bio's biovec
 593 *      @bio: bio to clone
 594 *      @gfp_mask: allocation priority
 595 *      @bs: bio_set to allocate from
 596 *
 597 *      Like __bio_clone_fast, only also allocates the returned bio
 598 */
 599struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
 600{
 601        struct bio *b;
 602
 603        b = bio_alloc_bioset(gfp_mask, 0, bs);
 604        if (!b)
 605                return NULL;
 606
 607        __bio_clone_fast(b, bio);
 608
 609        if (bio_integrity(bio)) {
 610                int ret;
 611
 612                ret = bio_integrity_clone(b, bio, gfp_mask);
 613
 614                if (ret < 0) {
 615                        bio_put(b);
 616                        return NULL;
 617                }
 618        }
 619
 620        return b;
 621}
 622EXPORT_SYMBOL(bio_clone_fast);
 623
 624/**
 625 *      bio_clone_bioset - clone a bio
 626 *      @bio_src: bio to clone
 627 *      @gfp_mask: allocation priority
 628 *      @bs: bio_set to allocate from
 629 *
 630 *      Clone bio. Caller will own the returned bio, but not the actual data it
 631 *      points to. Reference count of returned bio will be one.
 632 */
 633struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
 634                             struct bio_set *bs)
 635{
 636        struct bvec_iter iter;
 637        struct bio_vec bv;
 638        struct bio *bio;
 639
 640        /*
 641         * Pre immutable biovecs, __bio_clone() used to just do a memcpy from
 642         * bio_src->bi_io_vec to bio->bi_io_vec.
 643         *
 644         * We can't do that anymore, because:
 645         *
 646         *  - The point of cloning the biovec is to produce a bio with a biovec
 647         *    the caller can modify: bi_idx and bi_bvec_done should be 0.
 648         *
 649         *  - The original bio could've had more than BIO_MAX_PAGES biovecs; if
 650         *    we tried to clone the whole thing bio_alloc_bioset() would fail.
 651         *    But the clone should succeed as long as the number of biovecs we
 652         *    actually need to allocate is fewer than BIO_MAX_PAGES.
 653         *
 654         *  - Lastly, bi_vcnt should not be looked at or relied upon by code
 655         *    that does not own the bio - reason being drivers don't use it for
 656         *    iterating over the biovec anymore, so expecting it to be kept up
 657         *    to date (i.e. for clones that share the parent biovec) is just
 658         *    asking for trouble and would force extra work on
 659         *    __bio_clone_fast() anyways.
 660         */
 661
 662        bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
 663        if (!bio)
 664                return NULL;
 665        bio->bi_bdev            = bio_src->bi_bdev;
 666        bio->bi_opf             = bio_src->bi_opf;
 667        bio->bi_iter.bi_sector  = bio_src->bi_iter.bi_sector;
 668        bio->bi_iter.bi_size    = bio_src->bi_iter.bi_size;
 669
 670        switch (bio_op(bio)) {
 671        case REQ_OP_DISCARD:
 672        case REQ_OP_SECURE_ERASE:
 673                break;
 674        case REQ_OP_WRITE_SAME:
 675                bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
 676                break;
 677        default:
 678                bio_for_each_segment(bv, bio_src, iter)
 679                        bio->bi_io_vec[bio->bi_vcnt++] = bv;
 680                break;
 681        }
 682
 683        if (bio_integrity(bio_src)) {
 684                int ret;
 685
 686                ret = bio_integrity_clone(bio, bio_src, gfp_mask);
 687                if (ret < 0) {
 688                        bio_put(bio);
 689                        return NULL;
 690                }
 691        }
 692
 693        bio_clone_blkcg_association(bio, bio_src);
 694
 695        return bio;
 696}
 697EXPORT_SYMBOL(bio_clone_bioset);
 698
 699/**
 700 *      bio_add_pc_page -       attempt to add page to bio
 701 *      @q: the target queue
 702 *      @bio: destination bio
 703 *      @page: page to add
 704 *      @len: vec entry length
 705 *      @offset: vec entry offset
 706 *
 707 *      Attempt to add a page to the bio_vec maplist. This can fail for a
 708 *      number of reasons, such as the bio being full or target block device
 709 *      limitations. The target block device must allow bio's up to PAGE_SIZE,
 710 *      so it is always possible to add a single page to an empty bio.
 711 *
 712 *      This should only be used by REQ_PC bios.
 713 */
 714int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
 715                    *page, unsigned int len, unsigned int offset)
 716{
 717        int retried_segments = 0;
 718        struct bio_vec *bvec;
 719
 720        /*
 721         * cloned bio must not modify vec list
 722         */
 723        if (unlikely(bio_flagged(bio, BIO_CLONED)))
 724                return 0;
 725
 726        if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q))
 727                return 0;
 728
 729        /*
 730         * For filesystems with a blocksize smaller than the pagesize
 731         * we will often be called with the same page as last time and
 732         * a consecutive offset.  Optimize this special case.
 733         */
 734        if (bio->bi_vcnt > 0) {
 735                struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
 736
 737                if (page == prev->bv_page &&
 738                    offset == prev->bv_offset + prev->bv_len) {
 739                        prev->bv_len += len;
 740                        bio->bi_iter.bi_size += len;
 741                        goto done;
 742                }
 743
 744                /*
 745                 * If the queue doesn't support SG gaps and adding this
 746                 * offset would create a gap, disallow it.
 747                 */
 748                if (bvec_gap_to_prev(q, prev, offset))
 749                        return 0;
 750        }
 751
 752        if (bio->bi_vcnt >= bio->bi_max_vecs)
 753                return 0;
 754
 755        /*
 756         * setup the new entry, we might clear it again later if we
 757         * cannot add the page
 758         */
 759        bvec = &bio->bi_io_vec[bio->bi_vcnt];
 760        bvec->bv_page = page;
 761        bvec->bv_len = len;
 762        bvec->bv_offset = offset;
 763        bio->bi_vcnt++;
 764        bio->bi_phys_segments++;
 765        bio->bi_iter.bi_size += len;
 766
 767        /*
 768         * Perform a recount if the number of segments is greater
 769         * than queue_max_segments(q).
 770         */
 771
 772        while (bio->bi_phys_segments > queue_max_segments(q)) {
 773
 774                if (retried_segments)
 775                        goto failed;
 776
 777                retried_segments = 1;
 778                blk_recount_segments(q, bio);
 779        }
 780
 781        /* If we may be able to merge these biovecs, force a recount */
 782        if (bio->bi_vcnt > 1 && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
 783                bio_clear_flag(bio, BIO_SEG_VALID);
 784
 785 done:
 786        return len;
 787
 788 failed:
 789        bvec->bv_page = NULL;
 790        bvec->bv_len = 0;
 791        bvec->bv_offset = 0;
 792        bio->bi_vcnt--;
 793        bio->bi_iter.bi_size -= len;
 794        blk_recount_segments(q, bio);
 795        return 0;
 796}
 797EXPORT_SYMBOL(bio_add_pc_page);
 798
 799/**
 800 *      bio_add_page    -       attempt to add page to bio
 801 *      @bio: destination bio
 802 *      @page: page to add
 803 *      @len: vec entry length
 804 *      @offset: vec entry offset
 805 *
 806 *      Attempt to add a page to the bio_vec maplist. This will only fail
 807 *      if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
 808 */
 809int bio_add_page(struct bio *bio, struct page *page,
 810                 unsigned int len, unsigned int offset)
 811{
 812        struct bio_vec *bv;
 813
 814        /*
 815         * cloned bio must not modify vec list
 816         */
 817        if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
 818                return 0;
 819
 820        /*
 821         * For filesystems with a blocksize smaller than the pagesize
 822         * we will often be called with the same page as last time and
 823         * a consecutive offset.  Optimize this special case.
 824         */
 825        if (bio->bi_vcnt > 0) {
 826                bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
 827
 828                if (page == bv->bv_page &&
 829                    offset == bv->bv_offset + bv->bv_len) {
 830                        bv->bv_len += len;
 831                        goto done;
 832                }
 833        }
 834
 835        if (bio->bi_vcnt >= bio->bi_max_vecs)
 836                return 0;
 837
 838        bv              = &bio->bi_io_vec[bio->bi_vcnt];
 839        bv->bv_page     = page;
 840        bv->bv_len      = len;
 841        bv->bv_offset   = offset;
 842
 843        bio->bi_vcnt++;
 844done:
 845        bio->bi_iter.bi_size += len;
 846        return len;
 847}
 848EXPORT_SYMBOL(bio_add_page);
 849
 850struct submit_bio_ret {
 851        struct completion event;
 852        int error;
 853};
 854
 855static void submit_bio_wait_endio(struct bio *bio)
 856{
 857        struct submit_bio_ret *ret = bio->bi_private;
 858
 859        ret->error = bio->bi_error;
 860        complete(&ret->event);
 861}
 862
 863/**
 864 * submit_bio_wait - submit a bio, and wait until it completes
 865 * @bio: The &struct bio which describes the I/O
 866 *
 867 * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
 868 * bio_endio() on failure.
 869 */
 870int submit_bio_wait(struct bio *bio)
 871{
 872        struct submit_bio_ret ret;
 873
 874        init_completion(&ret.event);
 875        bio->bi_private = &ret;
 876        bio->bi_end_io = submit_bio_wait_endio;
 877        bio->bi_opf |= REQ_SYNC;
 878        submit_bio(bio);
 879        wait_for_completion_io(&ret.event);
 880
 881        return ret.error;
 882}
 883EXPORT_SYMBOL(submit_bio_wait);
 884
 885/**
 886 * bio_advance - increment/complete a bio by some number of bytes
 887 * @bio:        bio to advance
 888 * @bytes:      number of bytes to complete
 889 *
 890 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
 891 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
 892 * be updated on the last bvec as well.
 893 *
 894 * @bio will then represent the remaining, uncompleted portion of the io.
 895 */
 896void bio_advance(struct bio *bio, unsigned bytes)
 897{
 898        if (bio_integrity(bio))
 899                bio_integrity_advance(bio, bytes);
 900
 901        bio_advance_iter(bio, &bio->bi_iter, bytes);
 902}
 903EXPORT_SYMBOL(bio_advance);
 904
 905/**
 906 * bio_alloc_pages - allocates a single page for each bvec in a bio
 907 * @bio: bio to allocate pages for
 908 * @gfp_mask: flags for allocation
 909 *
 910 * Allocates pages up to @bio->bi_vcnt.
 911 *
 912 * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are
 913 * freed.
 914 */
 915int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
 916{
 917        int i;
 918        struct bio_vec *bv;
 919
 920        bio_for_each_segment_all(bv, bio, i) {
 921                bv->bv_page = alloc_page(gfp_mask);
 922                if (!bv->bv_page) {
 923                        while (--bv >= bio->bi_io_vec)
 924                                __free_page(bv->bv_page);
 925                        return -ENOMEM;
 926                }
 927        }
 928
 929        return 0;
 930}
 931EXPORT_SYMBOL(bio_alloc_pages);
 932
 933/**
 934 * bio_copy_data - copy contents of data buffers from one chain of bios to
 935 * another
 936 * @src: source bio list
 937 * @dst: destination bio list
 938 *
 939 * If @src and @dst are single bios, bi_next must be NULL - otherwise, treats
 940 * @src and @dst as linked lists of bios.
 941 *
 942 * Stops when it reaches the end of either @src or @dst - that is, copies
 943 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
 944 */
 945void bio_copy_data(struct bio *dst, struct bio *src)
 946{
 947        struct bvec_iter src_iter, dst_iter;
 948        struct bio_vec src_bv, dst_bv;
 949        void *src_p, *dst_p;
 950        unsigned bytes;
 951
 952        src_iter = src->bi_iter;
 953        dst_iter = dst->bi_iter;
 954
 955        while (1) {
 956                if (!src_iter.bi_size) {
 957                        src = src->bi_next;
 958                        if (!src)
 959                                break;
 960
 961                        src_iter = src->bi_iter;
 962                }
 963
 964                if (!dst_iter.bi_size) {
 965                        dst = dst->bi_next;
 966                        if (!dst)
 967                                break;
 968
 969                        dst_iter = dst->bi_iter;
 970                }
 971
 972                src_bv = bio_iter_iovec(src, src_iter);
 973                dst_bv = bio_iter_iovec(dst, dst_iter);
 974
 975                bytes = min(src_bv.bv_len, dst_bv.bv_len);
 976
 977                src_p = kmap_atomic(src_bv.bv_page);
 978                dst_p = kmap_atomic(dst_bv.bv_page);
 979
 980                memcpy(dst_p + dst_bv.bv_offset,
 981                       src_p + src_bv.bv_offset,
 982                       bytes);
 983
 984                kunmap_atomic(dst_p);
 985                kunmap_atomic(src_p);
 986
 987                bio_advance_iter(src, &src_iter, bytes);
 988                bio_advance_iter(dst, &dst_iter, bytes);
 989        }
 990}
 991EXPORT_SYMBOL(bio_copy_data);
 992
 993struct bio_map_data {
 994        int is_our_pages;
 995        struct iov_iter iter;
 996        struct iovec iov[];
 997};
 998
 999static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count,
1000                                               gfp_t gfp_mask)
1001{
1002        if (iov_count > UIO_MAXIOV)
1003                return NULL;
1004
1005        return kmalloc(sizeof(struct bio_map_data) +
1006                       sizeof(struct iovec) * iov_count, gfp_mask);
1007}
1008
1009/**
1010 * bio_copy_from_iter - copy all pages from iov_iter to bio
1011 * @bio: The &struct bio which describes the I/O as destination
1012 * @iter: iov_iter as source
1013 *
1014 * Copy all pages from iov_iter to bio.
1015 * Returns 0 on success, or error on failure.
1016 */
1017static int bio_copy_from_iter(struct bio *bio, struct iov_iter iter)
1018{
1019        int i;
1020        struct bio_vec *bvec;
1021
1022        bio_for_each_segment_all(bvec, bio, i) {
1023                ssize_t ret;
1024
1025                ret = copy_page_from_iter(bvec->bv_page,
1026                                          bvec->bv_offset,
1027                                          bvec->bv_len,
1028                                          &iter);
1029
1030                if (!iov_iter_count(&iter))
1031                        break;
1032
1033                if (ret < bvec->bv_len)
1034                        return -EFAULT;
1035        }
1036
1037        return 0;
1038}
1039
1040/**
1041 * bio_copy_to_iter - copy all pages from bio to iov_iter
1042 * @bio: The &struct bio which describes the I/O as source
1043 * @iter: iov_iter as destination
1044 *
1045 * Copy all pages from bio to iov_iter.
1046 * Returns 0 on success, or error on failure.
1047 */
1048static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
1049{
1050        int i;
1051        struct bio_vec *bvec;
1052
1053        bio_for_each_segment_all(bvec, bio, i) {
1054                ssize_t ret;
1055
1056                ret = copy_page_to_iter(bvec->bv_page,
1057                                        bvec->bv_offset,
1058                                        bvec->bv_len,
1059                                        &iter);
1060
1061                if (!iov_iter_count(&iter))
1062                        break;
1063
1064                if (ret < bvec->bv_len)
1065                        return -EFAULT;
1066        }
1067
1068        return 0;
1069}
1070
1071void bio_free_pages(struct bio *bio)
1072{
1073        struct bio_vec *bvec;
1074        int i;
1075
1076        bio_for_each_segment_all(bvec, bio, i)
1077                __free_page(bvec->bv_page);
1078}
1079EXPORT_SYMBOL(bio_free_pages);
1080
1081/**
1082 *      bio_uncopy_user -       finish previously mapped bio
1083 *      @bio: bio being terminated
1084 *
1085 *      Free pages allocated from bio_copy_user_iov() and write back data
1086 *      to user space in case of a read.
1087 */
1088int bio_uncopy_user(struct bio *bio)
1089{
1090        struct bio_map_data *bmd = bio->bi_private;
1091        int ret = 0;
1092
1093        if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1094                /*
1095                 * if we're in a workqueue, the request is orphaned, so
1096                 * don't copy into a random user address space, just free
1097                 * and return -EINTR so user space doesn't expect any data.
1098                 */
1099                if (!current->mm)
1100                        ret = -EINTR;
1101                else if (bio_data_dir(bio) == READ)
1102                        ret = bio_copy_to_iter(bio, bmd->iter);
1103                if (bmd->is_our_pages)
1104                        bio_free_pages(bio);
1105        }
1106        kfree(bmd);
1107        bio_put(bio);
1108        return ret;
1109}
1110
1111/**
1112 *      bio_copy_user_iov       -       copy user data to bio
1113 *      @q:             destination block queue
1114 *      @map_data:      pointer to the rq_map_data holding pages (if necessary)
1115 *      @iter:          iovec iterator
1116 *      @gfp_mask:      memory allocation flags
1117 *
1118 *      Prepares and returns a bio for indirect user io, bouncing data
1119 *      to/from kernel pages as necessary. Must be paired with
1120 *      call bio_uncopy_user() on io completion.
1121 */
1122struct bio *bio_copy_user_iov(struct request_queue *q,
1123                              struct rq_map_data *map_data,
1124                              const struct iov_iter *iter,
1125                              gfp_t gfp_mask)
1126{
1127        struct bio_map_data *bmd;
1128        struct page *page;
1129        struct bio *bio;
1130        int i, ret;
1131        int nr_pages = 0;
1132        unsigned int len = iter->count;
1133        unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
1134
1135        for (i = 0; i < iter->nr_segs; i++) {
1136                unsigned long uaddr;
1137                unsigned long end;
1138                unsigned long start;
1139
1140                uaddr = (unsigned long) iter->iov[i].iov_base;
1141                end = (uaddr + iter->iov[i].iov_len + PAGE_SIZE - 1)
1142                        >> PAGE_SHIFT;
1143                start = uaddr >> PAGE_SHIFT;
1144
1145                /*
1146                 * Overflow, abort
1147                 */
1148                if (end < start)
1149                        return ERR_PTR(-EINVAL);
1150
1151                nr_pages += end - start;
1152        }
1153
1154        if (offset)
1155                nr_pages++;
1156
1157        bmd = bio_alloc_map_data(iter->nr_segs, gfp_mask);
1158        if (!bmd)
1159                return ERR_PTR(-ENOMEM);
1160
1161        /*
1162         * We need to do a deep copy of the iov_iter including the iovecs.
1163         * The caller provided iov might point to an on-stack or otherwise
1164         * shortlived one.
1165         */
1166        bmd->is_our_pages = map_data ? 0 : 1;
1167        memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs);
1168        iov_iter_init(&bmd->iter, iter->type, bmd->iov,
1169                        iter->nr_segs, iter->count);
1170
1171        ret = -ENOMEM;
1172        bio = bio_kmalloc(gfp_mask, nr_pages);
1173        if (!bio)
1174                goto out_bmd;
1175
1176        if (iter->type & WRITE)
1177                bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1178
1179        ret = 0;
1180
1181        if (map_data) {
1182                nr_pages = 1 << map_data->page_order;
1183                i = map_data->offset / PAGE_SIZE;
1184        }
1185        while (len) {
1186                unsigned int bytes = PAGE_SIZE;
1187
1188                bytes -= offset;
1189
1190                if (bytes > len)
1191                        bytes = len;
1192
1193                if (map_data) {
1194                        if (i == map_data->nr_entries * nr_pages) {
1195                                ret = -ENOMEM;
1196                                break;
1197                        }
1198
1199                        page = map_data->pages[i / nr_pages];
1200                        page += (i % nr_pages);
1201
1202                        i++;
1203                } else {
1204                        page = alloc_page(q->bounce_gfp | gfp_mask);
1205                        if (!page) {
1206                                ret = -ENOMEM;
1207                                break;
1208                        }
1209                }
1210
1211                if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
1212                        break;
1213
1214                len -= bytes;
1215                offset = 0;
1216        }
1217
1218        if (ret)
1219                goto cleanup;
1220
1221        /*
1222         * success
1223         */
1224        if (((iter->type & WRITE) && (!map_data || !map_data->null_mapped)) ||
1225            (map_data && map_data->from_user)) {
1226                ret = bio_copy_from_iter(bio, *iter);
1227                if (ret)
1228                        goto cleanup;
1229        }
1230
1231        bio->bi_private = bmd;
1232        return bio;
1233cleanup:
1234        if (!map_data)
1235                bio_free_pages(bio);
1236        bio_put(bio);
1237out_bmd:
1238        kfree(bmd);
1239        return ERR_PTR(ret);
1240}
1241
1242/**
1243 *      bio_map_user_iov - map user iovec into bio
1244 *      @q:             the struct request_queue for the bio
1245 *      @iter:          iovec iterator
1246 *      @gfp_mask:      memory allocation flags
1247 *
1248 *      Map the user space address into a bio suitable for io to a block
1249 *      device. Returns an error pointer in case of error.
1250 */
1251struct bio *bio_map_user_iov(struct request_queue *q,
1252                             const struct iov_iter *iter,
1253                             gfp_t gfp_mask)
1254{
1255        int j;
1256        int nr_pages = 0;
1257        struct page **pages;
1258        struct bio *bio;
1259        int cur_page = 0;
1260        int ret, offset;
1261        struct iov_iter i;
1262        struct iovec iov;
1263
1264        iov_for_each(iov, i, *iter) {
1265                unsigned long uaddr = (unsigned long) iov.iov_base;
1266                unsigned long len = iov.iov_len;
1267                unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1268                unsigned long start = uaddr >> PAGE_SHIFT;
1269
1270                /*
1271                 * Overflow, abort
1272                 */
1273                if (end < start)
1274                        return ERR_PTR(-EINVAL);
1275
1276                nr_pages += end - start;
1277                /*
1278                 * buffer must be aligned to at least logical block size for now
1279                 */
1280                if (uaddr & queue_dma_alignment(q))
1281                        return ERR_PTR(-EINVAL);
1282        }
1283
1284        if (!nr_pages)
1285                return ERR_PTR(-EINVAL);
1286
1287        bio = bio_kmalloc(gfp_mask, nr_pages);
1288        if (!bio)
1289                return ERR_PTR(-ENOMEM);
1290
1291        ret = -ENOMEM;
1292        pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask);
1293        if (!pages)
1294                goto out;
1295
1296        iov_for_each(iov, i, *iter) {
1297                unsigned long uaddr = (unsigned long) iov.iov_base;
1298                unsigned long len = iov.iov_len;
1299                unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1300                unsigned long start = uaddr >> PAGE_SHIFT;
1301                const int local_nr_pages = end - start;
1302                const int page_limit = cur_page + local_nr_pages;
1303
1304                ret = get_user_pages_fast(uaddr, local_nr_pages,
1305                                (iter->type & WRITE) != WRITE,
1306                                &pages[cur_page]);
1307                if (ret < local_nr_pages) {
1308                        ret = -EFAULT;
1309                        goto out_unmap;
1310                }
1311
1312                offset = offset_in_page(uaddr);
1313                for (j = cur_page; j < page_limit; j++) {
1314                        unsigned int bytes = PAGE_SIZE - offset;
1315
1316                        if (len <= 0)
1317                                break;
1318                        
1319                        if (bytes > len)
1320                                bytes = len;
1321
1322                        /*
1323                         * sorry...
1324                         */
1325                        if (bio_add_pc_page(q, bio, pages[j], bytes, offset) <
1326                                            bytes)
1327                                break;
1328
1329                        len -= bytes;
1330                        offset = 0;
1331                }
1332
1333                cur_page = j;
1334                /*
1335                 * release the pages we didn't map into the bio, if any
1336                 */
1337                while (j < page_limit)
1338                        put_page(pages[j++]);
1339        }
1340
1341        kfree(pages);
1342
1343        /*
1344         * set data direction, and check if mapped pages need bouncing
1345         */
1346        if (iter->type & WRITE)
1347                bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1348
1349        bio_set_flag(bio, BIO_USER_MAPPED);
1350
1351        /*
1352         * subtle -- if __bio_map_user() ended up bouncing a bio,
1353         * it would normally disappear when its bi_end_io is run.
1354         * however, we need it for the unmap, so grab an extra
1355         * reference to it
1356         */
1357        bio_get(bio);
1358        return bio;
1359
1360 out_unmap:
1361        for (j = 0; j < nr_pages; j++) {
1362                if (!pages[j])
1363                        break;
1364                put_page(pages[j]);
1365        }
1366 out:
1367        kfree(pages);
1368        bio_put(bio);
1369        return ERR_PTR(ret);
1370}
1371
1372static void __bio_unmap_user(struct bio *bio)
1373{
1374        struct bio_vec *bvec;
1375        int i;
1376
1377        /*
1378         * make sure we dirty pages we wrote to
1379         */
1380        bio_for_each_segment_all(bvec, bio, i) {
1381                if (bio_data_dir(bio) == READ)
1382                        set_page_dirty_lock(bvec->bv_page);
1383
1384                put_page(bvec->bv_page);
1385        }
1386
1387        bio_put(bio);
1388}
1389
1390/**
1391 *      bio_unmap_user  -       unmap a bio
1392 *      @bio:           the bio being unmapped
1393 *
1394 *      Unmap a bio previously mapped by bio_map_user(). Must be called with
1395 *      a process context.
1396 *
1397 *      bio_unmap_user() may sleep.
1398 */
1399void bio_unmap_user(struct bio *bio)
1400{
1401        __bio_unmap_user(bio);
1402        bio_put(bio);
1403}
1404
1405static void bio_map_kern_endio(struct bio *bio)
1406{
1407        bio_put(bio);
1408}
1409
1410/**
1411 *      bio_map_kern    -       map kernel address into bio
1412 *      @q: the struct request_queue for the bio
1413 *      @data: pointer to buffer to map
1414 *      @len: length in bytes
1415 *      @gfp_mask: allocation flags for bio allocation
1416 *
1417 *      Map the kernel address into a bio suitable for io to a block
1418 *      device. Returns an error pointer in case of error.
1419 */
1420struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
1421                         gfp_t gfp_mask)
1422{
1423        unsigned long kaddr = (unsigned long)data;
1424        unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1425        unsigned long start = kaddr >> PAGE_SHIFT;
1426        const int nr_pages = end - start;
1427        int offset, i;
1428        struct bio *bio;
1429
1430        bio = bio_kmalloc(gfp_mask, nr_pages);
1431        if (!bio)
1432                return ERR_PTR(-ENOMEM);
1433
1434        offset = offset_in_page(kaddr);
1435        for (i = 0; i < nr_pages; i++) {
1436                unsigned int bytes = PAGE_SIZE - offset;
1437
1438                if (len <= 0)
1439                        break;
1440
1441                if (bytes > len)
1442                        bytes = len;
1443
1444                if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
1445                                    offset) < bytes) {
1446                        /* we don't support partial mappings */
1447                        bio_put(bio);
1448                        return ERR_PTR(-EINVAL);
1449                }
1450
1451                data += bytes;
1452                len -= bytes;
1453                offset = 0;
1454        }
1455
1456        bio->bi_end_io = bio_map_kern_endio;
1457        return bio;
1458}
1459EXPORT_SYMBOL(bio_map_kern);
1460
1461static void bio_copy_kern_endio(struct bio *bio)
1462{
1463        bio_free_pages(bio);
1464        bio_put(bio);
1465}
1466
1467static void bio_copy_kern_endio_read(struct bio *bio)
1468{
1469        char *p = bio->bi_private;
1470        struct bio_vec *bvec;
1471        int i;
1472
1473        bio_for_each_segment_all(bvec, bio, i) {
1474                memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
1475                p += bvec->bv_len;
1476        }
1477
1478        bio_copy_kern_endio(bio);
1479}
1480
1481/**
1482 *      bio_copy_kern   -       copy kernel address into bio
1483 *      @q: the struct request_queue for the bio
1484 *      @data: pointer to buffer to copy
1485 *      @len: length in bytes
1486 *      @gfp_mask: allocation flags for bio and page allocation
1487 *      @reading: data direction is READ
1488 *
1489 *      copy the kernel address into a bio suitable for io to a block
1490 *      device. Returns an error pointer in case of error.
1491 */
1492struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
1493                          gfp_t gfp_mask, int reading)
1494{
1495        unsigned long kaddr = (unsigned long)data;
1496        unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1497        unsigned long start = kaddr >> PAGE_SHIFT;
1498        struct bio *bio;
1499        void *p = data;
1500        int nr_pages = 0;
1501
1502        /*
1503         * Overflow, abort
1504         */
1505        if (end < start)
1506                return ERR_PTR(-EINVAL);
1507
1508        nr_pages = end - start;
1509        bio = bio_kmalloc(gfp_mask, nr_pages);
1510        if (!bio)
1511                return ERR_PTR(-ENOMEM);
1512
1513        while (len) {
1514                struct page *page;
1515                unsigned int bytes = PAGE_SIZE;
1516
1517                if (bytes > len)
1518                        bytes = len;
1519
1520                page = alloc_page(q->bounce_gfp | gfp_mask);
1521                if (!page)
1522                        goto cleanup;
1523
1524                if (!reading)
1525                        memcpy(page_address(page), p, bytes);
1526
1527                if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
1528                        break;
1529
1530                len -= bytes;
1531                p += bytes;
1532        }
1533
1534        if (reading) {
1535                bio->bi_end_io = bio_copy_kern_endio_read;
1536                bio->bi_private = data;
1537        } else {
1538                bio->bi_end_io = bio_copy_kern_endio;
1539                bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1540        }
1541
1542        return bio;
1543
1544cleanup:
1545        bio_free_pages(bio);
1546        bio_put(bio);
1547        return ERR_PTR(-ENOMEM);
1548}
1549
1550/*
1551 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1552 * for performing direct-IO in BIOs.
1553 *
1554 * The problem is that we cannot run set_page_dirty() from interrupt context
1555 * because the required locks are not interrupt-safe.  So what we can do is to
1556 * mark the pages dirty _before_ performing IO.  And in interrupt context,
1557 * check that the pages are still dirty.   If so, fine.  If not, redirty them
1558 * in process context.
1559 *
1560 * We special-case compound pages here: normally this means reads into hugetlb
1561 * pages.  The logic in here doesn't really work right for compound pages
1562 * because the VM does not uniformly chase down the head page in all cases.
1563 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1564 * handle them at all.  So we skip compound pages here at an early stage.
1565 *
1566 * Note that this code is very hard to test under normal circumstances because
1567 * direct-io pins the pages with get_user_pages().  This makes
1568 * is_page_cache_freeable return false, and the VM will not clean the pages.
1569 * But other code (eg, flusher threads) could clean the pages if they are mapped
1570 * pagecache.
1571 *
1572 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1573 * deferred bio dirtying paths.
1574 */
1575
1576/*
1577 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1578 */
1579void bio_set_pages_dirty(struct bio *bio)
1580{
1581        struct bio_vec *bvec;
1582        int i;
1583
1584        bio_for_each_segment_all(bvec, bio, i) {
1585                struct page *page = bvec->bv_page;
1586
1587                if (page && !PageCompound(page))
1588                        set_page_dirty_lock(page);
1589        }
1590}
1591
1592static void bio_release_pages(struct bio *bio)
1593{
1594        struct bio_vec *bvec;
1595        int i;
1596
1597        bio_for_each_segment_all(bvec, bio, i) {
1598                struct page *page = bvec->bv_page;
1599
1600                if (page)
1601                        put_page(page);
1602        }
1603}
1604
1605/*
1606 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1607 * If they are, then fine.  If, however, some pages are clean then they must
1608 * have been written out during the direct-IO read.  So we take another ref on
1609 * the BIO and the offending pages and re-dirty the pages in process context.
1610 *
1611 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1612 * here on.  It will run one put_page() against each page and will run one
1613 * bio_put() against the BIO.
1614 */
1615
1616static void bio_dirty_fn(struct work_struct *work);
1617
1618static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1619static DEFINE_SPINLOCK(bio_dirty_lock);
1620static struct bio *bio_dirty_list;
1621
1622/*
1623 * This runs in process context
1624 */
1625static void bio_dirty_fn(struct work_struct *work)
1626{
1627        unsigned long flags;
1628        struct bio *bio;
1629
1630        spin_lock_irqsave(&bio_dirty_lock, flags);
1631        bio = bio_dirty_list;
1632        bio_dirty_list = NULL;
1633        spin_unlock_irqrestore(&bio_dirty_lock, flags);
1634
1635        while (bio) {
1636                struct bio *next = bio->bi_private;
1637
1638                bio_set_pages_dirty(bio);
1639                bio_release_pages(bio);
1640                bio_put(bio);
1641                bio = next;
1642        }
1643}
1644
1645void bio_check_pages_dirty(struct bio *bio)
1646{
1647        struct bio_vec *bvec;
1648        int nr_clean_pages = 0;
1649        int i;
1650
1651        bio_for_each_segment_all(bvec, bio, i) {
1652                struct page *page = bvec->bv_page;
1653
1654                if (PageDirty(page) || PageCompound(page)) {
1655                        put_page(page);
1656                        bvec->bv_page = NULL;
1657                } else {
1658                        nr_clean_pages++;
1659                }
1660        }
1661
1662        if (nr_clean_pages) {
1663                unsigned long flags;
1664
1665                spin_lock_irqsave(&bio_dirty_lock, flags);
1666                bio->bi_private = bio_dirty_list;
1667                bio_dirty_list = bio;
1668                spin_unlock_irqrestore(&bio_dirty_lock, flags);
1669                schedule_work(&bio_dirty_work);
1670        } else {
1671                bio_put(bio);
1672        }
1673}
1674
1675void generic_start_io_acct(int rw, unsigned long sectors,
1676                           struct hd_struct *part)
1677{
1678        int cpu = part_stat_lock();
1679
1680        part_round_stats(cpu, part);
1681        part_stat_inc(cpu, part, ios[rw]);
1682        part_stat_add(cpu, part, sectors[rw], sectors);
1683        part_inc_in_flight(part, rw);
1684
1685        part_stat_unlock();
1686}
1687EXPORT_SYMBOL(generic_start_io_acct);
1688
1689void generic_end_io_acct(int rw, struct hd_struct *part,
1690                         unsigned long start_time)
1691{
1692        unsigned long duration = jiffies - start_time;
1693        int cpu = part_stat_lock();
1694
1695        part_stat_add(cpu, part, ticks[rw], duration);
1696        part_round_stats(cpu, part);
1697        part_dec_in_flight(part, rw);
1698
1699        part_stat_unlock();
1700}
1701EXPORT_SYMBOL(generic_end_io_acct);
1702
1703#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1704void bio_flush_dcache_pages(struct bio *bi)
1705{
1706        struct bio_vec bvec;
1707        struct bvec_iter iter;
1708
1709        bio_for_each_segment(bvec, bi, iter)
1710                flush_dcache_page(bvec.bv_page);
1711}
1712EXPORT_SYMBOL(bio_flush_dcache_pages);
1713#endif
1714
1715static inline bool bio_remaining_done(struct bio *bio)
1716{
1717        /*
1718         * If we're not chaining, then ->__bi_remaining is always 1 and
1719         * we always end io on the first invocation.
1720         */
1721        if (!bio_flagged(bio, BIO_CHAIN))
1722                return true;
1723
1724        BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1725
1726        if (atomic_dec_and_test(&bio->__bi_remaining)) {
1727                bio_clear_flag(bio, BIO_CHAIN);
1728                return true;
1729        }
1730
1731        return false;
1732}
1733
1734/**
1735 * bio_endio - end I/O on a bio
1736 * @bio:        bio
1737 *
1738 * Description:
1739 *   bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
1740 *   way to end I/O on a bio. No one should call bi_end_io() directly on a
1741 *   bio unless they own it and thus know that it has an end_io function.
1742 **/
1743void bio_endio(struct bio *bio)
1744{
1745again:
1746        if (!bio_remaining_done(bio))
1747                return;
1748
1749        /*
1750         * Need to have a real endio function for chained bios, otherwise
1751         * various corner cases will break (like stacking block devices that
1752         * save/restore bi_end_io) - however, we want to avoid unbounded
1753         * recursion and blowing the stack. Tail call optimization would
1754         * handle this, but compiling with frame pointers also disables
1755         * gcc's sibling call optimization.
1756         */
1757        if (bio->bi_end_io == bio_chain_endio) {
1758                bio = __bio_chain_endio(bio);
1759                goto again;
1760        }
1761
1762        if (bio->bi_end_io)
1763                bio->bi_end_io(bio);
1764}
1765EXPORT_SYMBOL(bio_endio);
1766
1767/**
1768 * bio_split - split a bio
1769 * @bio:        bio to split
1770 * @sectors:    number of sectors to split from the front of @bio
1771 * @gfp:        gfp mask
1772 * @bs:         bio set to allocate from
1773 *
1774 * Allocates and returns a new bio which represents @sectors from the start of
1775 * @bio, and updates @bio to represent the remaining sectors.
1776 *
1777 * Unless this is a discard request the newly allocated bio will point
1778 * to @bio's bi_io_vec; it is the caller's responsibility to ensure that
1779 * @bio is not freed before the split.
1780 */
1781struct bio *bio_split(struct bio *bio, int sectors,
1782                      gfp_t gfp, struct bio_set *bs)
1783{
1784        struct bio *split = NULL;
1785
1786        BUG_ON(sectors <= 0);
1787        BUG_ON(sectors >= bio_sectors(bio));
1788
1789        /*
1790         * Discards need a mutable bio_vec to accommodate the payload
1791         * required by the DSM TRIM and UNMAP commands.
1792         */
1793        if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE)
1794                split = bio_clone_bioset(bio, gfp, bs);
1795        else
1796                split = bio_clone_fast(bio, gfp, bs);
1797
1798        if (!split)
1799                return NULL;
1800
1801        split->bi_iter.bi_size = sectors << 9;
1802
1803        if (bio_integrity(split))
1804                bio_integrity_trim(split, 0, sectors);
1805
1806        bio_advance(bio, split->bi_iter.bi_size);
1807
1808        return split;
1809}
1810EXPORT_SYMBOL(bio_split);
1811
1812/**
1813 * bio_trim - trim a bio
1814 * @bio:        bio to trim
1815 * @offset:     number of sectors to trim from the front of @bio
1816 * @size:       size we want to trim @bio to, in sectors
1817 */
1818void bio_trim(struct bio *bio, int offset, int size)
1819{
1820        /* 'bio' is a cloned bio which we need to trim to match
1821         * the given offset and size.
1822         */
1823
1824        size <<= 9;
1825        if (offset == 0 && size == bio->bi_iter.bi_size)
1826                return;
1827
1828        bio_clear_flag(bio, BIO_SEG_VALID);
1829
1830        bio_advance(bio, offset << 9);
1831
1832        bio->bi_iter.bi_size = size;
1833}
1834EXPORT_SYMBOL_GPL(bio_trim);
1835
1836/*
1837 * create memory pools for biovec's in a bio_set.
1838 * use the global biovec slabs created for general use.
1839 */
1840mempool_t *biovec_create_pool(int pool_entries)
1841{
1842        struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX;
1843
1844        return mempool_create_slab_pool(pool_entries, bp->slab);
1845}
1846
1847void bioset_free(struct bio_set *bs)
1848{
1849        if (bs->rescue_workqueue)
1850                destroy_workqueue(bs->rescue_workqueue);
1851
1852        if (bs->bio_pool)
1853                mempool_destroy(bs->bio_pool);
1854
1855        if (bs->bvec_pool)
1856                mempool_destroy(bs->bvec_pool);
1857
1858        bioset_integrity_free(bs);
1859        bio_put_slab(bs);
1860
1861        kfree(bs);
1862}
1863EXPORT_SYMBOL(bioset_free);
1864
1865static struct bio_set *__bioset_create(unsigned int pool_size,
1866                                       unsigned int front_pad,
1867                                       bool create_bvec_pool)
1868{
1869        unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1870        struct bio_set *bs;
1871
1872        bs = kzalloc(sizeof(*bs), GFP_KERNEL);
1873        if (!bs)
1874                return NULL;
1875
1876        bs->front_pad = front_pad;
1877
1878        spin_lock_init(&bs->rescue_lock);
1879        bio_list_init(&bs->rescue_list);
1880        INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1881
1882        bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
1883        if (!bs->bio_slab) {
1884                kfree(bs);
1885                return NULL;
1886        }
1887
1888        bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab);
1889        if (!bs->bio_pool)
1890                goto bad;
1891
1892        if (create_bvec_pool) {
1893                bs->bvec_pool = biovec_create_pool(pool_size);
1894                if (!bs->bvec_pool)
1895                        goto bad;
1896        }
1897
1898        bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
1899        if (!bs->rescue_workqueue)
1900                goto bad;
1901
1902        return bs;
1903bad:
1904        bioset_free(bs);
1905        return NULL;
1906}
1907
1908/**
1909 * bioset_create  - Create a bio_set
1910 * @pool_size:  Number of bio and bio_vecs to cache in the mempool
1911 * @front_pad:  Number of bytes to allocate in front of the returned bio
1912 *
1913 * Description:
1914 *    Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1915 *    to ask for a number of bytes to be allocated in front of the bio.
1916 *    Front pad allocation is useful for embedding the bio inside
1917 *    another structure, to avoid allocating extra data to go with the bio.
1918 *    Note that the bio must be embedded at the END of that structure always,
1919 *    or things will break badly.
1920 */
1921struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
1922{
1923        return __bioset_create(pool_size, front_pad, true);
1924}
1925EXPORT_SYMBOL(bioset_create);
1926
1927/**
1928 * bioset_create_nobvec  - Create a bio_set without bio_vec mempool
1929 * @pool_size:  Number of bio to cache in the mempool
1930 * @front_pad:  Number of bytes to allocate in front of the returned bio
1931 *
1932 * Description:
1933 *    Same functionality as bioset_create() except that mempool is not
1934 *    created for bio_vecs. Saving some memory for bio_clone_fast() users.
1935 */
1936struct bio_set *bioset_create_nobvec(unsigned int pool_size, unsigned int front_pad)
1937{
1938        return __bioset_create(pool_size, front_pad, false);
1939}
1940EXPORT_SYMBOL(bioset_create_nobvec);
1941
1942#ifdef CONFIG_BLK_CGROUP
1943
1944/**
1945 * bio_associate_blkcg - associate a bio with the specified blkcg
1946 * @bio: target bio
1947 * @blkcg_css: css of the blkcg to associate
1948 *
1949 * Associate @bio with the blkcg specified by @blkcg_css.  Block layer will
1950 * treat @bio as if it were issued by a task which belongs to the blkcg.
1951 *
1952 * This function takes an extra reference of @blkcg_css which will be put
1953 * when @bio is released.  The caller must own @bio and is responsible for
1954 * synchronizing calls to this function.
1955 */
1956int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
1957{
1958        if (unlikely(bio->bi_css))
1959                return -EBUSY;
1960        css_get(blkcg_css);
1961        bio->bi_css = blkcg_css;
1962        return 0;
1963}
1964EXPORT_SYMBOL_GPL(bio_associate_blkcg);
1965
1966/**
1967 * bio_associate_current - associate a bio with %current
1968 * @bio: target bio
1969 *
1970 * Associate @bio with %current if it hasn't been associated yet.  Block
1971 * layer will treat @bio as if it were issued by %current no matter which
1972 * task actually issues it.
1973 *
1974 * This function takes an extra reference of @task's io_context and blkcg
1975 * which will be put when @bio is released.  The caller must own @bio,
1976 * ensure %current->io_context exists, and is responsible for synchronizing
1977 * calls to this function.
1978 */
1979int bio_associate_current(struct bio *bio)
1980{
1981        struct io_context *ioc;
1982
1983        if (bio->bi_css)
1984                return -EBUSY;
1985
1986        ioc = current->io_context;
1987        if (!ioc)
1988                return -ENOENT;
1989
1990        get_io_context_active(ioc);
1991        bio->bi_ioc = ioc;
1992        bio->bi_css = task_get_css(current, io_cgrp_id);
1993        return 0;
1994}
1995EXPORT_SYMBOL_GPL(bio_associate_current);
1996
1997/**
1998 * bio_disassociate_task - undo bio_associate_current()
1999 * @bio: target bio
2000 */
2001void bio_disassociate_task(struct bio *bio)
2002{
2003        if (bio->bi_ioc) {
2004                put_io_context(bio->bi_ioc);
2005                bio->bi_ioc = NULL;
2006        }
2007        if (bio->bi_css) {
2008                css_put(bio->bi_css);
2009                bio->bi_css = NULL;
2010        }
2011}
2012
2013/**
2014 * bio_clone_blkcg_association - clone blkcg association from src to dst bio
2015 * @dst: destination bio
2016 * @src: source bio
2017 */
2018void bio_clone_blkcg_association(struct bio *dst, struct bio *src)
2019{
2020        if (src->bi_css)
2021                WARN_ON(bio_associate_blkcg(dst, src->bi_css));
2022}
2023
2024#endif /* CONFIG_BLK_CGROUP */
2025
2026static void __init biovec_init_slabs(void)
2027{
2028        int i;
2029
2030        for (i = 0; i < BVEC_POOL_NR; i++) {
2031                int size;
2032                struct biovec_slab *bvs = bvec_slabs + i;
2033
2034                if (bvs->nr_vecs <= BIO_INLINE_VECS) {
2035                        bvs->slab = NULL;
2036                        continue;
2037                }
2038
2039                size = bvs->nr_vecs * sizeof(struct bio_vec);
2040                bvs->slab = kmem_cache_create(bvs->name, size, 0,
2041                                SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2042        }
2043}
2044
2045static int __init init_bio(void)
2046{
2047        bio_slab_max = 2;
2048        bio_slab_nr = 0;
2049        bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL);
2050        if (!bio_slabs)
2051                panic("bio: can't allocate bios\n");
2052
2053        bio_integrity_init();
2054        biovec_init_slabs();
2055
2056        fs_bio_set = bioset_create(BIO_POOL_SIZE, 0);
2057        if (!fs_bio_set)
2058                panic("bio: can't allocate bios\n");
2059
2060        if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE))
2061                panic("bio: can't create integrity pool\n");
2062
2063        return 0;
2064}
2065subsys_initcall(init_bio);
2066