linux/include/linux/bio.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 *
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public Licens
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
  17 */
  18#ifndef __LINUX_BIO_H
  19#define __LINUX_BIO_H
  20
  21#include <linux/highmem.h>
  22#include <linux/mempool.h>
  23#include <linux/ioprio.h>
  24
  25#ifdef CONFIG_BLOCK
  26/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
  27#include <linux/blk_types.h>
  28
  29#define BIO_DEBUG
  30
  31#ifdef BIO_DEBUG
  32#define BIO_BUG_ON      BUG_ON
  33#else
  34#define BIO_BUG_ON
  35#endif
  36
  37#ifdef CONFIG_THP_SWAP
  38#if HPAGE_PMD_NR > 256
  39#define BIO_MAX_PAGES           HPAGE_PMD_NR
  40#else
  41#define BIO_MAX_PAGES           256
  42#endif
  43#else
  44#define BIO_MAX_PAGES           256
  45#endif
  46
  47#define bio_prio(bio)                   (bio)->bi_ioprio
  48#define bio_set_prio(bio, prio)         ((bio)->bi_ioprio = prio)
  49
  50#define bio_iter_iovec(bio, iter)                               \
  51        bvec_iter_bvec((bio)->bi_io_vec, (iter))
  52
  53#define bio_iter_page(bio, iter)                                \
  54        bvec_iter_page((bio)->bi_io_vec, (iter))
  55#define bio_iter_len(bio, iter)                                 \
  56        bvec_iter_len((bio)->bi_io_vec, (iter))
  57#define bio_iter_offset(bio, iter)                              \
  58        bvec_iter_offset((bio)->bi_io_vec, (iter))
  59
  60#define bio_page(bio)           bio_iter_page((bio), (bio)->bi_iter)
  61#define bio_offset(bio)         bio_iter_offset((bio), (bio)->bi_iter)
  62#define bio_iovec(bio)          bio_iter_iovec((bio), (bio)->bi_iter)
  63
  64#define bio_multiple_segments(bio)                              \
  65        ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
  66
  67#define bvec_iter_sectors(iter) ((iter).bi_size >> 9)
  68#define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))
  69
  70#define bio_sectors(bio)        bvec_iter_sectors((bio)->bi_iter)
  71#define bio_end_sector(bio)     bvec_iter_end_sector((bio)->bi_iter)
  72
  73/*
  74 * Return the data direction, READ or WRITE.
  75 */
  76#define bio_data_dir(bio) \
  77        (op_is_write(bio_op(bio)) ? WRITE : READ)
  78
  79/*
  80 * Check whether this bio carries any data or not. A NULL bio is allowed.
  81 */
  82static inline bool bio_has_data(struct bio *bio)
  83{
  84        if (bio &&
  85            bio->bi_iter.bi_size &&
  86            bio_op(bio) != REQ_OP_DISCARD &&
  87            bio_op(bio) != REQ_OP_SECURE_ERASE &&
  88            bio_op(bio) != REQ_OP_WRITE_ZEROES)
  89                return true;
  90
  91        return false;
  92}
  93
  94static inline bool bio_no_advance_iter(const struct bio *bio)
  95{
  96        return bio_op(bio) == REQ_OP_DISCARD ||
  97               bio_op(bio) == REQ_OP_SECURE_ERASE ||
  98               bio_op(bio) == REQ_OP_WRITE_SAME ||
  99               bio_op(bio) == REQ_OP_WRITE_ZEROES;
 100}
 101
 102static inline bool bio_mergeable(struct bio *bio)
 103{
 104        if (bio->bi_opf & REQ_NOMERGE_FLAGS)
 105                return false;
 106
 107        return true;
 108}
 109
 110static inline unsigned int bio_cur_bytes(struct bio *bio)
 111{
 112        if (bio_has_data(bio))
 113                return bio_iovec(bio).bv_len;
 114        else /* dataless requests such as discard */
 115                return bio->bi_iter.bi_size;
 116}
 117
 118static inline void *bio_data(struct bio *bio)
 119{
 120        if (bio_has_data(bio))
 121                return page_address(bio_page(bio)) + bio_offset(bio);
 122
 123        return NULL;
 124}
 125
 126static inline bool bio_full(struct bio *bio)
 127{
 128        return bio->bi_vcnt >= bio->bi_max_vecs;
 129}
 130
 131/*
 132 * drivers should _never_ use the all version - the bio may have been split
 133 * before it got to the driver and the driver won't own all of it
 134 */
 135#define bio_for_each_segment_all(bvl, bio, i)                           \
 136        for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++)
 137
 138static inline void bio_advance_iter(const struct bio *bio,
 139                                    struct bvec_iter *iter, unsigned int bytes)
 140{
 141        iter->bi_sector += bytes >> 9;
 142
 143        if (bio_no_advance_iter(bio))
 144                iter->bi_size -= bytes;
 145        else
 146                bvec_iter_advance(bio->bi_io_vec, iter, bytes);
 147                /* TODO: It is reasonable to complete bio with error here. */
 148}
 149
 150#define __bio_for_each_segment(bvl, bio, iter, start)                   \
 151        for (iter = (start);                                            \
 152             (iter).bi_size &&                                          \
 153                ((bvl = bio_iter_iovec((bio), (iter))), 1);             \
 154             bio_advance_iter((bio), &(iter), (bvl).bv_len))
 155
 156#define bio_for_each_segment(bvl, bio, iter)                            \
 157        __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
 158
 159#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
 160
 161static inline unsigned bio_segments(struct bio *bio)
 162{
 163        unsigned segs = 0;
 164        struct bio_vec bv;
 165        struct bvec_iter iter;
 166
 167        /*
 168         * We special case discard/write same/write zeroes, because they
 169         * interpret bi_size differently:
 170         */
 171
 172        switch (bio_op(bio)) {
 173        case REQ_OP_DISCARD:
 174        case REQ_OP_SECURE_ERASE:
 175        case REQ_OP_WRITE_ZEROES:
 176                return 0;
 177        case REQ_OP_WRITE_SAME:
 178                return 1;
 179        default:
 180                break;
 181        }
 182
 183        bio_for_each_segment(bv, bio, iter)
 184                segs++;
 185
 186        return segs;
 187}
 188
 189/*
 190 * get a reference to a bio, so it won't disappear. the intended use is
 191 * something like:
 192 *
 193 * bio_get(bio);
 194 * submit_bio(rw, bio);
 195 * if (bio->bi_flags ...)
 196 *      do_something
 197 * bio_put(bio);
 198 *
 199 * without the bio_get(), it could potentially complete I/O before submit_bio
 200 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
 201 * runs
 202 */
 203static inline void bio_get(struct bio *bio)
 204{
 205        bio->bi_flags |= (1 << BIO_REFFED);
 206        smp_mb__before_atomic();
 207        atomic_inc(&bio->__bi_cnt);
 208}
 209
 210static inline void bio_cnt_set(struct bio *bio, unsigned int count)
 211{
 212        if (count != 1) {
 213                bio->bi_flags |= (1 << BIO_REFFED);
 214                smp_mb__before_atomic();
 215        }
 216        atomic_set(&bio->__bi_cnt, count);
 217}
 218
 219static inline bool bio_flagged(struct bio *bio, unsigned int bit)
 220{
 221        return (bio->bi_flags & (1U << bit)) != 0;
 222}
 223
 224static inline void bio_set_flag(struct bio *bio, unsigned int bit)
 225{
 226        bio->bi_flags |= (1U << bit);
 227}
 228
 229static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
 230{
 231        bio->bi_flags &= ~(1U << bit);
 232}
 233
 234static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
 235{
 236        *bv = bio_iovec(bio);
 237}
 238
 239static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
 240{
 241        struct bvec_iter iter = bio->bi_iter;
 242        int idx;
 243
 244        if (unlikely(!bio_multiple_segments(bio))) {
 245                *bv = bio_iovec(bio);
 246                return;
 247        }
 248
 249        bio_advance_iter(bio, &iter, iter.bi_size);
 250
 251        if (!iter.bi_bvec_done)
 252                idx = iter.bi_idx - 1;
 253        else    /* in the middle of bvec */
 254                idx = iter.bi_idx;
 255
 256        *bv = bio->bi_io_vec[idx];
 257
 258        /*
 259         * iter.bi_bvec_done records actual length of the last bvec
 260         * if this bio ends in the middle of one io vector
 261         */
 262        if (iter.bi_bvec_done)
 263                bv->bv_len = iter.bi_bvec_done;
 264}
 265
 266static inline struct bio_vec *bio_first_bvec_all(struct bio *bio)
 267{
 268        WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
 269        return bio->bi_io_vec;
 270}
 271
 272static inline struct page *bio_first_page_all(struct bio *bio)
 273{
 274        return bio_first_bvec_all(bio)->bv_page;
 275}
 276
 277static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
 278{
 279        WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
 280        return &bio->bi_io_vec[bio->bi_vcnt - 1];
 281}
 282
 283enum bip_flags {
 284        BIP_BLOCK_INTEGRITY     = 1 << 0, /* block layer owns integrity data */
 285        BIP_MAPPED_INTEGRITY    = 1 << 1, /* ref tag has been remapped */
 286        BIP_CTRL_NOCHECK        = 1 << 2, /* disable HBA integrity checking */
 287        BIP_DISK_NOCHECK        = 1 << 3, /* disable disk integrity checking */
 288        BIP_IP_CHECKSUM         = 1 << 4, /* IP checksum */
 289};
 290
 291/*
 292 * bio integrity payload
 293 */
 294struct bio_integrity_payload {
 295        struct bio              *bip_bio;       /* parent bio */
 296
 297        struct bvec_iter        bip_iter;
 298
 299        unsigned short          bip_slab;       /* slab the bip came from */
 300        unsigned short          bip_vcnt;       /* # of integrity bio_vecs */
 301        unsigned short          bip_max_vcnt;   /* integrity bio_vec slots */
 302        unsigned short          bip_flags;      /* control flags */
 303
 304        struct bvec_iter        bio_iter;       /* for rewinding parent bio */
 305
 306        struct work_struct      bip_work;       /* I/O completion */
 307
 308        struct bio_vec          *bip_vec;
 309
 310        RH_KABI_RESERVE(1)
 311        RH_KABI_RESERVE(2)
 312
 313        struct bio_vec          bip_inline_vecs[0];/* embedded bvec array */
 314};
 315
 316#if defined(CONFIG_BLK_DEV_INTEGRITY)
 317
 318static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
 319{
 320        if (bio->bi_opf & REQ_INTEGRITY)
 321                return bio->bi_integrity;
 322
 323        return NULL;
 324}
 325
 326static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
 327{
 328        struct bio_integrity_payload *bip = bio_integrity(bio);
 329
 330        if (bip)
 331                return bip->bip_flags & flag;
 332
 333        return false;
 334}
 335
 336static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
 337{
 338        return bip->bip_iter.bi_sector;
 339}
 340
 341static inline void bip_set_seed(struct bio_integrity_payload *bip,
 342                                sector_t seed)
 343{
 344        bip->bip_iter.bi_sector = seed;
 345}
 346
 347#endif /* CONFIG_BLK_DEV_INTEGRITY */
 348
 349extern void bio_trim(struct bio *bio, int offset, int size);
 350extern struct bio *bio_split(struct bio *bio, int sectors,
 351                             gfp_t gfp, struct bio_set *bs);
 352
 353/**
 354 * bio_next_split - get next @sectors from a bio, splitting if necessary
 355 * @bio:        bio to split
 356 * @sectors:    number of sectors to split from the front of @bio
 357 * @gfp:        gfp mask
 358 * @bs:         bio set to allocate from
 359 *
 360 * Returns a bio representing the next @sectors of @bio - if the bio is smaller
 361 * than @sectors, returns the original bio unchanged.
 362 */
 363static inline struct bio *bio_next_split(struct bio *bio, int sectors,
 364                                         gfp_t gfp, struct bio_set *bs)
 365{
 366        if (sectors >= bio_sectors(bio))
 367                return bio;
 368
 369        return bio_split(bio, sectors, gfp, bs);
 370}
 371
 372enum {
 373        BIOSET_NEED_BVECS = BIT(0),
 374        BIOSET_NEED_RESCUER = BIT(1),
 375};
 376extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags);
 377extern void bioset_exit(struct bio_set *);
 378extern int biovec_init_pool(mempool_t *pool, int pool_entries);
 379extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
 380
 381extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *);
 382extern void bio_put(struct bio *);
 383
 384extern void __bio_clone_fast(struct bio *, struct bio *);
 385extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
 386
 387extern struct bio_set fs_bio_set;
 388
 389static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
 390{
 391        return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set);
 392}
 393
 394static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
 395{
 396        return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
 397}
 398
 399extern blk_qc_t submit_bio(struct bio *);
 400
 401extern void bio_endio(struct bio *);
 402
 403static inline void bio_io_error(struct bio *bio)
 404{
 405        bio->bi_status = BLK_STS_IOERR;
 406        bio_endio(bio);
 407}
 408
 409static inline void bio_wouldblock_error(struct bio *bio)
 410{
 411        bio_set_flag(bio, BIO_QUIET);
 412        bio->bi_status = BLK_STS_AGAIN;
 413        bio_endio(bio);
 414}
 415
 416struct request_queue;
 417extern int bio_phys_segments(struct request_queue *, struct bio *);
 418
 419extern int submit_bio_wait(struct bio *bio);
 420extern void bio_advance(struct bio *, unsigned);
 421
 422extern void bio_init(struct bio *bio, struct bio_vec *table,
 423                     unsigned short max_vecs);
 424extern void bio_uninit(struct bio *);
 425extern void bio_reset(struct bio *);
 426void bio_chain(struct bio *, struct bio *);
 427
 428extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
 429extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
 430                           unsigned int, unsigned int);
 431bool __bio_try_merge_page(struct bio *bio, struct page *page,
 432                unsigned int len, unsigned int off);
 433void __bio_add_page(struct bio *bio, struct page *page,
 434                unsigned int len, unsigned int off);
 435int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
 436struct rq_map_data;
 437extern struct bio *bio_map_user_iov(struct request_queue *,
 438                                    struct iov_iter *, gfp_t);
 439extern void bio_unmap_user(struct bio *);
 440extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
 441                                gfp_t);
 442extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
 443                                 gfp_t, int);
 444extern void bio_set_pages_dirty(struct bio *bio);
 445extern void bio_check_pages_dirty(struct bio *bio);
 446
 447void generic_start_io_acct(struct request_queue *q, int op,
 448                                unsigned long sectors, struct hd_struct *part);
 449void generic_end_io_acct(struct request_queue *q, int op,
 450                                struct hd_struct *part,
 451                                unsigned long start_time);
 452
 453extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
 454                               struct bio *src, struct bvec_iter *src_iter);
 455extern void bio_copy_data(struct bio *dst, struct bio *src);
 456extern void bio_list_copy_data(struct bio *dst, struct bio *src);
 457extern void bio_free_pages(struct bio *bio);
 458
 459extern struct bio *bio_copy_user_iov(struct request_queue *,
 460                                     struct rq_map_data *,
 461                                     struct iov_iter *,
 462                                     gfp_t);
 463extern int bio_uncopy_user(struct bio *);
 464void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
 465void bio_truncate(struct bio *bio, unsigned new_size);
 466void guard_bio_eod(struct bio *bio);
 467
 468static inline void zero_fill_bio(struct bio *bio)
 469{
 470        zero_fill_bio_iter(bio, bio->bi_iter);
 471}
 472
 473extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
 474extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
 475extern unsigned int bvec_nr_vecs(unsigned short idx);
 476extern const char *bio_devname(struct bio *bio, char *buffer);
 477
 478#define bio_set_dev(bio, bdev)                  \
 479do {                                            \
 480        if ((bio)->bi_disk != (bdev)->bd_disk)  \
 481                bio_clear_flag(bio, BIO_THROTTLED);\
 482        (bio)->bi_disk = (bdev)->bd_disk;       \
 483        (bio)->bi_partno = (bdev)->bd_partno;   \
 484        bio_associate_blkg(bio);                \
 485} while (0)
 486
 487#define bio_copy_dev(dst, src)                  \
 488do {                                            \
 489        (dst)->bi_disk = (src)->bi_disk;        \
 490        (dst)->bi_partno = (src)->bi_partno;    \
 491        bio_clone_blkg_association(dst, src);   \
 492} while (0)
 493
 494#define bio_dev(bio) \
 495        disk_devt((bio)->bi_disk)
 496
 497#ifdef CONFIG_BLK_CGROUP
 498void bio_associate_blkg(struct bio *bio);
 499void bio_associate_blkg_from_css(struct bio *bio,
 500                                 struct cgroup_subsys_state *css);
 501void bio_clone_blkg_association(struct bio *dst, struct bio *src);
 502#else   /* CONFIG_BLK_CGROUP */
 503static inline void bio_associate_blkg(struct bio *bio) { }
 504static inline void bio_associate_blkg_from_css(struct bio *bio,
 505                                               struct cgroup_subsys_state *css)
 506{ }
 507static inline void bio_clone_blkg_association(struct bio *dst,
 508                                              struct bio *src) { }
 509#endif  /* CONFIG_BLK_CGROUP */
 510
 511#ifdef CONFIG_HIGHMEM
 512/*
 513 * remember never ever reenable interrupts between a bvec_kmap_irq and
 514 * bvec_kunmap_irq!
 515 */
 516static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
 517{
 518        unsigned long addr;
 519
 520        /*
 521         * might not be a highmem page, but the preempt/irq count
 522         * balancing is a lot nicer this way
 523         */
 524        local_irq_save(*flags);
 525        addr = (unsigned long) kmap_atomic(bvec->bv_page);
 526
 527        BUG_ON(addr & ~PAGE_MASK);
 528
 529        return (char *) addr + bvec->bv_offset;
 530}
 531
 532static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
 533{
 534        unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
 535
 536        kunmap_atomic((void *) ptr);
 537        local_irq_restore(*flags);
 538}
 539
 540#else
 541static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
 542{
 543        return page_address(bvec->bv_page) + bvec->bv_offset;
 544}
 545
 546static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
 547{
 548        *flags = 0;
 549}
 550#endif
 551
 552/*
 553 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
 554 *
 555 * A bio_list anchors a singly-linked list of bios chained through the bi_next
 556 * member of the bio.  The bio_list also caches the last list member to allow
 557 * fast access to the tail.
 558 */
 559struct bio_list {
 560        struct bio *head;
 561        struct bio *tail;
 562};
 563
 564static inline int bio_list_empty(const struct bio_list *bl)
 565{
 566        return bl->head == NULL;
 567}
 568
 569static inline void bio_list_init(struct bio_list *bl)
 570{
 571        bl->head = bl->tail = NULL;
 572}
 573
 574#define BIO_EMPTY_LIST  { NULL, NULL }
 575
 576#define bio_list_for_each(bio, bl) \
 577        for (bio = (bl)->head; bio; bio = bio->bi_next)
 578
 579static inline unsigned bio_list_size(const struct bio_list *bl)
 580{
 581        unsigned sz = 0;
 582        struct bio *bio;
 583
 584        bio_list_for_each(bio, bl)
 585                sz++;
 586
 587        return sz;
 588}
 589
 590static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
 591{
 592        bio->bi_next = NULL;
 593
 594        if (bl->tail)
 595                bl->tail->bi_next = bio;
 596        else
 597                bl->head = bio;
 598
 599        bl->tail = bio;
 600}
 601
 602static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
 603{
 604        bio->bi_next = bl->head;
 605
 606        bl->head = bio;
 607
 608        if (!bl->tail)
 609                bl->tail = bio;
 610}
 611
 612static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
 613{
 614        if (!bl2->head)
 615                return;
 616
 617        if (bl->tail)
 618                bl->tail->bi_next = bl2->head;
 619        else
 620                bl->head = bl2->head;
 621
 622        bl->tail = bl2->tail;
 623}
 624
 625static inline void bio_list_merge_head(struct bio_list *bl,
 626                                       struct bio_list *bl2)
 627{
 628        if (!bl2->head)
 629                return;
 630
 631        if (bl->head)
 632                bl2->tail->bi_next = bl->head;
 633        else
 634                bl->tail = bl2->tail;
 635
 636        bl->head = bl2->head;
 637}
 638
 639static inline struct bio *bio_list_peek(struct bio_list *bl)
 640{
 641        return bl->head;
 642}
 643
 644static inline struct bio *bio_list_pop(struct bio_list *bl)
 645{
 646        struct bio *bio = bl->head;
 647
 648        if (bio) {
 649                bl->head = bl->head->bi_next;
 650                if (!bl->head)
 651                        bl->tail = NULL;
 652
 653                bio->bi_next = NULL;
 654        }
 655
 656        return bio;
 657}
 658
 659static inline struct bio *bio_list_get(struct bio_list *bl)
 660{
 661        struct bio *bio = bl->head;
 662
 663        bl->head = bl->tail = NULL;
 664
 665        return bio;
 666}
 667
 668/*
 669 * Increment chain count for the bio. Make sure the CHAIN flag update
 670 * is visible before the raised count.
 671 */
 672static inline void bio_inc_remaining(struct bio *bio)
 673{
 674        bio_set_flag(bio, BIO_CHAIN);
 675        smp_mb__before_atomic();
 676        atomic_inc(&bio->__bi_remaining);
 677}
 678
 679/*
 680 * bio_set is used to allow other portions of the IO system to
 681 * allocate their own private memory pools for bio and iovec structures.
 682 * These memory pools in turn all allocate from the bio_slab
 683 * and the bvec_slabs[].
 684 */
 685#define BIO_POOL_SIZE 2
 686
 687struct bio_set {
 688        struct kmem_cache *bio_slab;
 689        unsigned int front_pad;
 690
 691        mempool_t bio_pool;
 692        mempool_t bvec_pool;
 693#if defined(CONFIG_BLK_DEV_INTEGRITY)
 694        mempool_t bio_integrity_pool;
 695        mempool_t bvec_integrity_pool;
 696#endif
 697
 698        /*
 699         * Deadlock avoidance for stacking block drivers: see comments in
 700         * bio_alloc_bioset() for details
 701         */
 702        spinlock_t              rescue_lock;
 703        struct bio_list         rescue_list;
 704        struct work_struct      rescue_work;
 705        struct workqueue_struct *rescue_workqueue;
 706
 707        RH_KABI_RESERVE(1)
 708        RH_KABI_RESERVE(2)
 709        RH_KABI_RESERVE(3)
 710        RH_KABI_RESERVE(4)
 711};
 712
 713struct biovec_slab {
 714        int nr_vecs;
 715        char *name;
 716        struct kmem_cache *slab;
 717};
 718
 719static inline bool bioset_initialized(struct bio_set *bs)
 720{
 721        return bs->bio_slab != NULL;
 722}
 723
 724/*
 725 * a small number of entries is fine, not going to be performance critical.
 726 * basically we just need to survive
 727 */
 728#define BIO_SPLIT_ENTRIES 2
 729
 730#if defined(CONFIG_BLK_DEV_INTEGRITY)
 731
 732#define bip_for_each_vec(bvl, bip, iter)                                \
 733        for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
 734
 735#define bio_for_each_integrity_vec(_bvl, _bio, _iter)                   \
 736        for_each_bio(_bio)                                              \
 737                bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
 738
 739extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
 740extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
 741extern bool bio_integrity_prep(struct bio *);
 742extern void bio_integrity_advance(struct bio *, unsigned int);
 743extern void bio_integrity_trim(struct bio *);
 744extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
 745extern int bioset_integrity_create(struct bio_set *, int);
 746extern void bioset_integrity_free(struct bio_set *);
 747extern void bio_integrity_init(void);
 748
 749#else /* CONFIG_BLK_DEV_INTEGRITY */
 750
 751static inline void *bio_integrity(struct bio *bio)
 752{
 753        return NULL;
 754}
 755
 756static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
 757{
 758        return 0;
 759}
 760
 761static inline void bioset_integrity_free (struct bio_set *bs)
 762{
 763        return;
 764}
 765
 766static inline bool bio_integrity_prep(struct bio *bio)
 767{
 768        return true;
 769}
 770
 771static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
 772                                      gfp_t gfp_mask)
 773{
 774        return 0;
 775}
 776
 777static inline void bio_integrity_advance(struct bio *bio,
 778                                         unsigned int bytes_done)
 779{
 780        return;
 781}
 782
 783static inline void bio_integrity_trim(struct bio *bio)
 784{
 785        return;
 786}
 787
 788static inline void bio_integrity_init(void)
 789{
 790        return;
 791}
 792
 793static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
 794{
 795        return false;
 796}
 797
 798static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp,
 799                                                                unsigned int nr)
 800{
 801        return ERR_PTR(-EINVAL);
 802}
 803
 804static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
 805                                        unsigned int len, unsigned int offset)
 806{
 807        return 0;
 808}
 809
 810#endif /* CONFIG_BLK_DEV_INTEGRITY */
 811
 812/*
 813 * Mark a bio as polled. Note that for async polled IO, the caller must
 814 * expect -EWOULDBLOCK if we cannot allocate a request (or other resources).
 815 * We cannot block waiting for requests on polled IO, as those completions
 816 * must be found by the caller. This is different than IRQ driven IO, where
 817 * it's safe to wait for IO to complete.
 818 */
 819static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
 820{
 821        bio->bi_opf |= REQ_HIPRI;
 822        if (!is_sync_kiocb(kiocb))
 823                bio->bi_opf |= REQ_NOWAIT;
 824}
 825
 826#endif /* CONFIG_BLOCK */
 827#endif /* __LINUX_BIO_H */
 828