linux/include/linux/bio.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
   4 */
   5#ifndef __LINUX_BIO_H
   6#define __LINUX_BIO_H
   7
   8#include <linux/mempool.h>
   9#include <linux/ioprio.h>
  10/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
  11#include <linux/blk_types.h>
  12#include <linux/uio.h>
  13
  14#define BIO_DEBUG
  15
  16#ifdef BIO_DEBUG
  17#define BIO_BUG_ON      BUG_ON
  18#else
  19#define BIO_BUG_ON
  20#endif
  21
  22#define BIO_MAX_VECS            256U
  23
  24static inline unsigned int bio_max_segs(unsigned int nr_segs)
  25{
  26        return min(nr_segs, BIO_MAX_VECS);
  27}
  28
  29#define bio_prio(bio)                   (bio)->bi_ioprio
  30#define bio_set_prio(bio, prio)         ((bio)->bi_ioprio = prio)
  31
  32#define bio_iter_iovec(bio, iter)                               \
  33        bvec_iter_bvec((bio)->bi_io_vec, (iter))
  34
  35#define bio_iter_page(bio, iter)                                \
  36        bvec_iter_page((bio)->bi_io_vec, (iter))
  37#define bio_iter_len(bio, iter)                                 \
  38        bvec_iter_len((bio)->bi_io_vec, (iter))
  39#define bio_iter_offset(bio, iter)                              \
  40        bvec_iter_offset((bio)->bi_io_vec, (iter))
  41
  42#define bio_page(bio)           bio_iter_page((bio), (bio)->bi_iter)
  43#define bio_offset(bio)         bio_iter_offset((bio), (bio)->bi_iter)
  44#define bio_iovec(bio)          bio_iter_iovec((bio), (bio)->bi_iter)
  45
  46#define bvec_iter_sectors(iter) ((iter).bi_size >> 9)
  47#define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))
  48
  49#define bio_sectors(bio)        bvec_iter_sectors((bio)->bi_iter)
  50#define bio_end_sector(bio)     bvec_iter_end_sector((bio)->bi_iter)
  51
  52/*
  53 * Return the data direction, READ or WRITE.
  54 */
  55#define bio_data_dir(bio) \
  56        (op_is_write(bio_op(bio)) ? WRITE : READ)
  57
  58/*
  59 * Check whether this bio carries any data or not. A NULL bio is allowed.
  60 */
  61static inline bool bio_has_data(struct bio *bio)
  62{
  63        if (bio &&
  64            bio->bi_iter.bi_size &&
  65            bio_op(bio) != REQ_OP_DISCARD &&
  66            bio_op(bio) != REQ_OP_SECURE_ERASE &&
  67            bio_op(bio) != REQ_OP_WRITE_ZEROES)
  68                return true;
  69
  70        return false;
  71}
  72
  73static inline bool bio_no_advance_iter(const struct bio *bio)
  74{
  75        return bio_op(bio) == REQ_OP_DISCARD ||
  76               bio_op(bio) == REQ_OP_SECURE_ERASE ||
  77               bio_op(bio) == REQ_OP_WRITE_SAME ||
  78               bio_op(bio) == REQ_OP_WRITE_ZEROES;
  79}
  80
  81static inline bool bio_mergeable(struct bio *bio)
  82{
  83        if (bio->bi_opf & REQ_NOMERGE_FLAGS)
  84                return false;
  85
  86        return true;
  87}
  88
  89static inline unsigned int bio_cur_bytes(struct bio *bio)
  90{
  91        if (bio_has_data(bio))
  92                return bio_iovec(bio).bv_len;
  93        else /* dataless requests such as discard */
  94                return bio->bi_iter.bi_size;
  95}
  96
  97static inline void *bio_data(struct bio *bio)
  98{
  99        if (bio_has_data(bio))
 100                return page_address(bio_page(bio)) + bio_offset(bio);
 101
 102        return NULL;
 103}
 104
 105/**
 106 * bio_full - check if the bio is full
 107 * @bio:        bio to check
 108 * @len:        length of one segment to be added
 109 *
 110 * Return true if @bio is full and one segment with @len bytes can't be
 111 * added to the bio, otherwise return false
 112 */
 113static inline bool bio_full(struct bio *bio, unsigned len)
 114{
 115        if (bio->bi_vcnt >= bio->bi_max_vecs)
 116                return true;
 117
 118        if (bio->bi_iter.bi_size > UINT_MAX - len)
 119                return true;
 120
 121        return false;
 122}
 123
 124static inline bool bio_next_segment(const struct bio *bio,
 125                                    struct bvec_iter_all *iter)
 126{
 127        if (iter->idx >= bio->bi_vcnt)
 128                return false;
 129
 130        bvec_advance(&bio->bi_io_vec[iter->idx], iter);
 131        return true;
 132}
 133
 134/*
 135 * drivers should _never_ use the all version - the bio may have been split
 136 * before it got to the driver and the driver won't own all of it
 137 */
 138#define bio_for_each_segment_all(bvl, bio, iter) \
 139        for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); )
 140
 141static inline void bio_advance_iter(const struct bio *bio,
 142                                    struct bvec_iter *iter, unsigned int bytes)
 143{
 144        iter->bi_sector += bytes >> 9;
 145
 146        if (bio_no_advance_iter(bio))
 147                iter->bi_size -= bytes;
 148        else
 149                bvec_iter_advance(bio->bi_io_vec, iter, bytes);
 150                /* TODO: It is reasonable to complete bio with error here. */
 151}
 152
 153/* @bytes should be less or equal to bvec[i->bi_idx].bv_len */
 154static inline void bio_advance_iter_single(const struct bio *bio,
 155                                           struct bvec_iter *iter,
 156                                           unsigned int bytes)
 157{
 158        iter->bi_sector += bytes >> 9;
 159
 160        if (bio_no_advance_iter(bio))
 161                iter->bi_size -= bytes;
 162        else
 163                bvec_iter_advance_single(bio->bi_io_vec, iter, bytes);
 164}
 165
 166#define __bio_for_each_segment(bvl, bio, iter, start)                   \
 167        for (iter = (start);                                            \
 168             (iter).bi_size &&                                          \
 169                ((bvl = bio_iter_iovec((bio), (iter))), 1);             \
 170             bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
 171
 172#define bio_for_each_segment(bvl, bio, iter)                            \
 173        __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
 174
 175#define __bio_for_each_bvec(bvl, bio, iter, start)              \
 176        for (iter = (start);                                            \
 177             (iter).bi_size &&                                          \
 178                ((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \
 179             bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
 180
 181/* iterate over multi-page bvec */
 182#define bio_for_each_bvec(bvl, bio, iter)                       \
 183        __bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter)
 184
 185/*
 186 * Iterate over all multi-page bvecs. Drivers shouldn't use this version for the
 187 * same reasons as bio_for_each_segment_all().
 188 */
 189#define bio_for_each_bvec_all(bvl, bio, i)              \
 190        for (i = 0, bvl = bio_first_bvec_all(bio);      \
 191             i < (bio)->bi_vcnt; i++, bvl++)            \
 192
 193#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
 194
 195static inline unsigned bio_segments(struct bio *bio)
 196{
 197        unsigned segs = 0;
 198        struct bio_vec bv;
 199        struct bvec_iter iter;
 200
 201        /*
 202         * We special case discard/write same/write zeroes, because they
 203         * interpret bi_size differently:
 204         */
 205
 206        switch (bio_op(bio)) {
 207        case REQ_OP_DISCARD:
 208        case REQ_OP_SECURE_ERASE:
 209        case REQ_OP_WRITE_ZEROES:
 210                return 0;
 211        case REQ_OP_WRITE_SAME:
 212                return 1;
 213        default:
 214                break;
 215        }
 216
 217        bio_for_each_segment(bv, bio, iter)
 218                segs++;
 219
 220        return segs;
 221}
 222
 223/*
 224 * get a reference to a bio, so it won't disappear. the intended use is
 225 * something like:
 226 *
 227 * bio_get(bio);
 228 * submit_bio(rw, bio);
 229 * if (bio->bi_flags ...)
 230 *      do_something
 231 * bio_put(bio);
 232 *
 233 * without the bio_get(), it could potentially complete I/O before submit_bio
 234 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
 235 * runs
 236 */
 237static inline void bio_get(struct bio *bio)
 238{
 239        bio->bi_flags |= (1 << BIO_REFFED);
 240        smp_mb__before_atomic();
 241        atomic_inc(&bio->__bi_cnt);
 242}
 243
 244static inline void bio_cnt_set(struct bio *bio, unsigned int count)
 245{
 246        if (count != 1) {
 247                bio->bi_flags |= (1 << BIO_REFFED);
 248                smp_mb();
 249        }
 250        atomic_set(&bio->__bi_cnt, count);
 251}
 252
 253static inline bool bio_flagged(struct bio *bio, unsigned int bit)
 254{
 255        return (bio->bi_flags & (1U << bit)) != 0;
 256}
 257
 258static inline void bio_set_flag(struct bio *bio, unsigned int bit)
 259{
 260        bio->bi_flags |= (1U << bit);
 261}
 262
 263static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
 264{
 265        bio->bi_flags &= ~(1U << bit);
 266}
 267
 268static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
 269{
 270        *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
 271}
 272
 273static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
 274{
 275        struct bvec_iter iter = bio->bi_iter;
 276        int idx;
 277
 278        bio_get_first_bvec(bio, bv);
 279        if (bv->bv_len == bio->bi_iter.bi_size)
 280                return;         /* this bio only has a single bvec */
 281
 282        bio_advance_iter(bio, &iter, iter.bi_size);
 283
 284        if (!iter.bi_bvec_done)
 285                idx = iter.bi_idx - 1;
 286        else    /* in the middle of bvec */
 287                idx = iter.bi_idx;
 288
 289        *bv = bio->bi_io_vec[idx];
 290
 291        /*
 292         * iter.bi_bvec_done records actual length of the last bvec
 293         * if this bio ends in the middle of one io vector
 294         */
 295        if (iter.bi_bvec_done)
 296                bv->bv_len = iter.bi_bvec_done;
 297}
 298
 299static inline struct bio_vec *bio_first_bvec_all(struct bio *bio)
 300{
 301        WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
 302        return bio->bi_io_vec;
 303}
 304
 305static inline struct page *bio_first_page_all(struct bio *bio)
 306{
 307        return bio_first_bvec_all(bio)->bv_page;
 308}
 309
 310static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
 311{
 312        WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
 313        return &bio->bi_io_vec[bio->bi_vcnt - 1];
 314}
 315
 316enum bip_flags {
 317        BIP_BLOCK_INTEGRITY     = 1 << 0, /* block layer owns integrity data */
 318        BIP_MAPPED_INTEGRITY    = 1 << 1, /* ref tag has been remapped */
 319        BIP_CTRL_NOCHECK        = 1 << 2, /* disable HBA integrity checking */
 320        BIP_DISK_NOCHECK        = 1 << 3, /* disable disk integrity checking */
 321        BIP_IP_CHECKSUM         = 1 << 4, /* IP checksum */
 322};
 323
 324/*
 325 * bio integrity payload
 326 */
 327struct bio_integrity_payload {
 328        struct bio              *bip_bio;       /* parent bio */
 329
 330        struct bvec_iter        bip_iter;
 331
 332        unsigned short          bip_vcnt;       /* # of integrity bio_vecs */
 333        unsigned short          bip_max_vcnt;   /* integrity bio_vec slots */
 334        unsigned short          bip_flags;      /* control flags */
 335
 336        struct bvec_iter        bio_iter;       /* for rewinding parent bio */
 337
 338        struct work_struct      bip_work;       /* I/O completion */
 339
 340        struct bio_vec          *bip_vec;
 341        struct bio_vec          bip_inline_vecs[];/* embedded bvec array */
 342};
 343
 344#if defined(CONFIG_BLK_DEV_INTEGRITY)
 345
 346static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
 347{
 348        if (bio->bi_opf & REQ_INTEGRITY)
 349                return bio->bi_integrity;
 350
 351        return NULL;
 352}
 353
 354static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
 355{
 356        struct bio_integrity_payload *bip = bio_integrity(bio);
 357
 358        if (bip)
 359                return bip->bip_flags & flag;
 360
 361        return false;
 362}
 363
 364static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
 365{
 366        return bip->bip_iter.bi_sector;
 367}
 368
 369static inline void bip_set_seed(struct bio_integrity_payload *bip,
 370                                sector_t seed)
 371{
 372        bip->bip_iter.bi_sector = seed;
 373}
 374
 375#endif /* CONFIG_BLK_DEV_INTEGRITY */
 376
 377void bio_trim(struct bio *bio, sector_t offset, sector_t size);
 378extern struct bio *bio_split(struct bio *bio, int sectors,
 379                             gfp_t gfp, struct bio_set *bs);
 380
 381/**
 382 * bio_next_split - get next @sectors from a bio, splitting if necessary
 383 * @bio:        bio to split
 384 * @sectors:    number of sectors to split from the front of @bio
 385 * @gfp:        gfp mask
 386 * @bs:         bio set to allocate from
 387 *
 388 * Returns a bio representing the next @sectors of @bio - if the bio is smaller
 389 * than @sectors, returns the original bio unchanged.
 390 */
 391static inline struct bio *bio_next_split(struct bio *bio, int sectors,
 392                                         gfp_t gfp, struct bio_set *bs)
 393{
 394        if (sectors >= bio_sectors(bio))
 395                return bio;
 396
 397        return bio_split(bio, sectors, gfp, bs);
 398}
 399
 400enum {
 401        BIOSET_NEED_BVECS = BIT(0),
 402        BIOSET_NEED_RESCUER = BIT(1),
 403        BIOSET_PERCPU_CACHE = BIT(2),
 404};
 405extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags);
 406extern void bioset_exit(struct bio_set *);
 407extern int biovec_init_pool(mempool_t *pool, int pool_entries);
 408extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
 409
 410struct bio *bio_alloc_bioset(gfp_t gfp, unsigned short nr_iovecs,
 411                struct bio_set *bs);
 412struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs,
 413                struct bio_set *bs);
 414struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs);
 415extern void bio_put(struct bio *);
 416
 417extern void __bio_clone_fast(struct bio *, struct bio *);
 418extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
 419
 420extern struct bio_set fs_bio_set;
 421
 422static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned short nr_iovecs)
 423{
 424        return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set);
 425}
 426
 427extern blk_qc_t submit_bio(struct bio *);
 428
 429extern void bio_endio(struct bio *);
 430
 431static inline void bio_io_error(struct bio *bio)
 432{
 433        bio->bi_status = BLK_STS_IOERR;
 434        bio_endio(bio);
 435}
 436
 437static inline void bio_wouldblock_error(struct bio *bio)
 438{
 439        bio_set_flag(bio, BIO_QUIET);
 440        bio->bi_status = BLK_STS_AGAIN;
 441        bio_endio(bio);
 442}
 443
 444/*
 445 * Calculate number of bvec segments that should be allocated to fit data
 446 * pointed by @iter. If @iter is backed by bvec it's going to be reused
 447 * instead of allocating a new one.
 448 */
 449static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs)
 450{
 451        if (iov_iter_is_bvec(iter))
 452                return 0;
 453        return iov_iter_npages(iter, max_segs);
 454}
 455
 456struct request_queue;
 457
 458extern int submit_bio_wait(struct bio *bio);
 459extern void bio_advance(struct bio *, unsigned);
 460
 461extern void bio_init(struct bio *bio, struct bio_vec *table,
 462                     unsigned short max_vecs);
 463extern void bio_uninit(struct bio *);
 464extern void bio_reset(struct bio *);
 465void bio_chain(struct bio *, struct bio *);
 466
 467extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
 468extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
 469                           unsigned int, unsigned int);
 470int bio_add_zone_append_page(struct bio *bio, struct page *page,
 471                             unsigned int len, unsigned int offset);
 472bool __bio_try_merge_page(struct bio *bio, struct page *page,
 473                unsigned int len, unsigned int off, bool *same_page);
 474void __bio_add_page(struct bio *bio, struct page *page,
 475                unsigned int len, unsigned int off);
 476int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
 477void bio_release_pages(struct bio *bio, bool mark_dirty);
 478extern void bio_set_pages_dirty(struct bio *bio);
 479extern void bio_check_pages_dirty(struct bio *bio);
 480
 481extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
 482                               struct bio *src, struct bvec_iter *src_iter);
 483extern void bio_copy_data(struct bio *dst, struct bio *src);
 484extern void bio_free_pages(struct bio *bio);
 485void bio_truncate(struct bio *bio, unsigned new_size);
 486void guard_bio_eod(struct bio *bio);
 487void zero_fill_bio(struct bio *bio);
 488
 489extern const char *bio_devname(struct bio *bio, char *buffer);
 490
 491#define bio_set_dev(bio, bdev)                          \
 492do {                                                    \
 493        bio_clear_flag(bio, BIO_REMAPPED);              \
 494        if ((bio)->bi_bdev != (bdev))                   \
 495                bio_clear_flag(bio, BIO_THROTTLED);     \
 496        (bio)->bi_bdev = (bdev);                        \
 497        bio_associate_blkg(bio);                        \
 498} while (0)
 499
 500#define bio_copy_dev(dst, src)                  \
 501do {                                            \
 502        bio_clear_flag(dst, BIO_REMAPPED);              \
 503        (dst)->bi_bdev = (src)->bi_bdev;        \
 504        bio_clone_blkg_association(dst, src);   \
 505} while (0)
 506
 507#define bio_dev(bio) \
 508        disk_devt((bio)->bi_bdev->bd_disk)
 509
 510#ifdef CONFIG_BLK_CGROUP
 511void bio_associate_blkg(struct bio *bio);
 512void bio_associate_blkg_from_css(struct bio *bio,
 513                                 struct cgroup_subsys_state *css);
 514void bio_clone_blkg_association(struct bio *dst, struct bio *src);
 515#else   /* CONFIG_BLK_CGROUP */
 516static inline void bio_associate_blkg(struct bio *bio) { }
 517static inline void bio_associate_blkg_from_css(struct bio *bio,
 518                                               struct cgroup_subsys_state *css)
 519{ }
 520static inline void bio_clone_blkg_association(struct bio *dst,
 521                                              struct bio *src) { }
 522#endif  /* CONFIG_BLK_CGROUP */
 523
 524/*
 525 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
 526 *
 527 * A bio_list anchors a singly-linked list of bios chained through the bi_next
 528 * member of the bio.  The bio_list also caches the last list member to allow
 529 * fast access to the tail.
 530 */
 531struct bio_list {
 532        struct bio *head;
 533        struct bio *tail;
 534};
 535
 536static inline int bio_list_empty(const struct bio_list *bl)
 537{
 538        return bl->head == NULL;
 539}
 540
 541static inline void bio_list_init(struct bio_list *bl)
 542{
 543        bl->head = bl->tail = NULL;
 544}
 545
 546#define BIO_EMPTY_LIST  { NULL, NULL }
 547
 548#define bio_list_for_each(bio, bl) \
 549        for (bio = (bl)->head; bio; bio = bio->bi_next)
 550
 551static inline unsigned bio_list_size(const struct bio_list *bl)
 552{
 553        unsigned sz = 0;
 554        struct bio *bio;
 555
 556        bio_list_for_each(bio, bl)
 557                sz++;
 558
 559        return sz;
 560}
 561
 562static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
 563{
 564        bio->bi_next = NULL;
 565
 566        if (bl->tail)
 567                bl->tail->bi_next = bio;
 568        else
 569                bl->head = bio;
 570
 571        bl->tail = bio;
 572}
 573
 574static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
 575{
 576        bio->bi_next = bl->head;
 577
 578        bl->head = bio;
 579
 580        if (!bl->tail)
 581                bl->tail = bio;
 582}
 583
 584static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
 585{
 586        if (!bl2->head)
 587                return;
 588
 589        if (bl->tail)
 590                bl->tail->bi_next = bl2->head;
 591        else
 592                bl->head = bl2->head;
 593
 594        bl->tail = bl2->tail;
 595}
 596
 597static inline void bio_list_merge_head(struct bio_list *bl,
 598                                       struct bio_list *bl2)
 599{
 600        if (!bl2->head)
 601                return;
 602
 603        if (bl->head)
 604                bl2->tail->bi_next = bl->head;
 605        else
 606                bl->tail = bl2->tail;
 607
 608        bl->head = bl2->head;
 609}
 610
 611static inline struct bio *bio_list_peek(struct bio_list *bl)
 612{
 613        return bl->head;
 614}
 615
 616static inline struct bio *bio_list_pop(struct bio_list *bl)
 617{
 618        struct bio *bio = bl->head;
 619
 620        if (bio) {
 621                bl->head = bl->head->bi_next;
 622                if (!bl->head)
 623                        bl->tail = NULL;
 624
 625                bio->bi_next = NULL;
 626        }
 627
 628        return bio;
 629}
 630
 631static inline struct bio *bio_list_get(struct bio_list *bl)
 632{
 633        struct bio *bio = bl->head;
 634
 635        bl->head = bl->tail = NULL;
 636
 637        return bio;
 638}
 639
 640/*
 641 * Increment chain count for the bio. Make sure the CHAIN flag update
 642 * is visible before the raised count.
 643 */
 644static inline void bio_inc_remaining(struct bio *bio)
 645{
 646        bio_set_flag(bio, BIO_CHAIN);
 647        smp_mb__before_atomic();
 648        atomic_inc(&bio->__bi_remaining);
 649}
 650
 651/*
 652 * bio_set is used to allow other portions of the IO system to
 653 * allocate their own private memory pools for bio and iovec structures.
 654 * These memory pools in turn all allocate from the bio_slab
 655 * and the bvec_slabs[].
 656 */
 657#define BIO_POOL_SIZE 2
 658
 659struct bio_set {
 660        struct kmem_cache *bio_slab;
 661        unsigned int front_pad;
 662
 663        /*
 664         * per-cpu bio alloc cache
 665         */
 666        struct bio_alloc_cache __percpu *cache;
 667
 668        mempool_t bio_pool;
 669        mempool_t bvec_pool;
 670#if defined(CONFIG_BLK_DEV_INTEGRITY)
 671        mempool_t bio_integrity_pool;
 672        mempool_t bvec_integrity_pool;
 673#endif
 674
 675        unsigned int back_pad;
 676        /*
 677         * Deadlock avoidance for stacking block drivers: see comments in
 678         * bio_alloc_bioset() for details
 679         */
 680        spinlock_t              rescue_lock;
 681        struct bio_list         rescue_list;
 682        struct work_struct      rescue_work;
 683        struct workqueue_struct *rescue_workqueue;
 684
 685        /*
 686         * Hot un-plug notifier for the per-cpu cache, if used
 687         */
 688        struct hlist_node cpuhp_dead;
 689};
 690
 691static inline bool bioset_initialized(struct bio_set *bs)
 692{
 693        return bs->bio_slab != NULL;
 694}
 695
 696#if defined(CONFIG_BLK_DEV_INTEGRITY)
 697
 698#define bip_for_each_vec(bvl, bip, iter)                                \
 699        for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
 700
 701#define bio_for_each_integrity_vec(_bvl, _bio, _iter)                   \
 702        for_each_bio(_bio)                                              \
 703                bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
 704
 705extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
 706extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
 707extern bool bio_integrity_prep(struct bio *);
 708extern void bio_integrity_advance(struct bio *, unsigned int);
 709extern void bio_integrity_trim(struct bio *);
 710extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
 711extern int bioset_integrity_create(struct bio_set *, int);
 712extern void bioset_integrity_free(struct bio_set *);
 713extern void bio_integrity_init(void);
 714
 715#else /* CONFIG_BLK_DEV_INTEGRITY */
 716
 717static inline void *bio_integrity(struct bio *bio)
 718{
 719        return NULL;
 720}
 721
 722static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
 723{
 724        return 0;
 725}
 726
 727static inline void bioset_integrity_free (struct bio_set *bs)
 728{
 729        return;
 730}
 731
 732static inline bool bio_integrity_prep(struct bio *bio)
 733{
 734        return true;
 735}
 736
 737static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
 738                                      gfp_t gfp_mask)
 739{
 740        return 0;
 741}
 742
 743static inline void bio_integrity_advance(struct bio *bio,
 744                                         unsigned int bytes_done)
 745{
 746        return;
 747}
 748
 749static inline void bio_integrity_trim(struct bio *bio)
 750{
 751        return;
 752}
 753
 754static inline void bio_integrity_init(void)
 755{
 756        return;
 757}
 758
 759static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
 760{
 761        return false;
 762}
 763
 764static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp,
 765                                                                unsigned int nr)
 766{
 767        return ERR_PTR(-EINVAL);
 768}
 769
 770static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
 771                                        unsigned int len, unsigned int offset)
 772{
 773        return 0;
 774}
 775
 776#endif /* CONFIG_BLK_DEV_INTEGRITY */
 777
 778/*
 779 * Mark a bio as polled. Note that for async polled IO, the caller must
 780 * expect -EWOULDBLOCK if we cannot allocate a request (or other resources).
 781 * We cannot block waiting for requests on polled IO, as those completions
 782 * must be found by the caller. This is different than IRQ driven IO, where
 783 * it's safe to wait for IO to complete.
 784 */
 785static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
 786{
 787        bio->bi_opf |= REQ_HIPRI;
 788        if (!is_sync_kiocb(kiocb))
 789                bio->bi_opf |= REQ_NOWAIT;
 790}
 791
 792struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp);
 793
 794#endif /* __LINUX_BIO_H */
 795