1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#ifndef __LINUX_BIO_H
21#define __LINUX_BIO_H
22
23#include <linux/highmem.h>
24#include <linux/mempool.h>
25#include <linux/ioprio.h>
26#include <linux/bug.h>
27
28#ifdef CONFIG_BLOCK
29
30#include <asm/io.h>
31
32
33#include <linux/blk_types.h>
34
35#define BIO_DEBUG
36
37#ifdef BIO_DEBUG
38#define BIO_BUG_ON BUG_ON
39#else
40#define BIO_BUG_ON
41#endif
42
43#define BIO_MAX_PAGES 256
44#define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_SHIFT)
45#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9)
46
47
48
49
50#define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS)
51#define bio_prio(bio) ((bio)->bi_rw >> BIO_PRIO_SHIFT)
52#define bio_prio_valid(bio) ioprio_valid(bio_prio(bio))
53
54#define bio_set_prio(bio, prio) do { \
55 WARN_ON(prio >= (1 << IOPRIO_BITS)); \
56 (bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1); \
57 (bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT); \
58} while (0)
59
60
61
62
63
64#define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx])
65
66#define bvec_iter_page(bvec, iter) \
67 (__bvec_iter_bvec((bvec), (iter))->bv_page)
68
69#define bvec_iter_len(bvec, iter) \
70 min((iter).bi_size, \
71 __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done)
72
73#define bvec_iter_offset(bvec, iter) \
74 (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done)
75
76#define bvec_iter_bvec(bvec, iter) \
77((struct bio_vec) { \
78 .bv_page = bvec_iter_page((bvec), (iter)), \
79 .bv_len = bvec_iter_len((bvec), (iter)), \
80 .bv_offset = bvec_iter_offset((bvec), (iter)), \
81})
82
83#define bio_iter_iovec(bio, iter) \
84 bvec_iter_bvec((bio)->bi_io_vec, (iter))
85
86#define bio_iter_page(bio, iter) \
87 bvec_iter_page((bio)->bi_io_vec, (iter))
88#define bio_iter_len(bio, iter) \
89 bvec_iter_len((bio)->bi_io_vec, (iter))
90#define bio_iter_offset(bio, iter) \
91 bvec_iter_offset((bio)->bi_io_vec, (iter))
92
93#define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter)
94#define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter)
95#define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter)
96
97#define bio_multiple_segments(bio) \
98 ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
99#define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9)
100#define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio)))
101
102
103
104
105static inline bool bio_has_data(struct bio *bio)
106{
107 if (bio &&
108 bio->bi_iter.bi_size &&
109 !(bio->bi_rw & REQ_DISCARD))
110 return true;
111
112 return false;
113}
114
115static inline bool bio_is_rw(struct bio *bio)
116{
117 if (!bio_has_data(bio))
118 return false;
119
120 if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
121 return false;
122
123 return true;
124}
125
126static inline bool bio_mergeable(struct bio *bio)
127{
128 if (bio->bi_rw & REQ_NOMERGE_FLAGS)
129 return false;
130
131 return true;
132}
133
134static inline unsigned int bio_cur_bytes(struct bio *bio)
135{
136 if (bio_has_data(bio))
137 return bio_iovec(bio).bv_len;
138 else
139 return bio->bi_iter.bi_size;
140}
141
142static inline void *bio_data(struct bio *bio)
143{
144 if (bio_has_data(bio))
145 return page_address(bio_page(bio)) + bio_offset(bio);
146
147 return NULL;
148}
149
150
151
152
153#define bio_to_phys(bio) (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
154#define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
155
156
157
158
159
160
161
162#define __bio_kmap_atomic(bio, iter) \
163 (kmap_atomic(bio_iter_iovec((bio), (iter)).bv_page) + \
164 bio_iter_iovec((bio), (iter)).bv_offset)
165
166#define __bio_kunmap_atomic(addr) kunmap_atomic(addr)
167
168
169
170
171
172
173#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
174 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
175
176
177
178
179#ifndef BIOVEC_PHYS_MERGEABLE
180#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
181 __BIOVEC_PHYS_MERGEABLE(vec1, vec2)
182#endif
183
184#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
185 (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
186#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
187 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
188
189
190
191
192
193#define bio_for_each_segment_all(bvl, bio, i) \
194 for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++)
195
196static inline void bvec_iter_advance(struct bio_vec *bv, struct bvec_iter *iter,
197 unsigned bytes)
198{
199 WARN_ONCE(bytes > iter->bi_size,
200 "Attempted to advance past end of bvec iter\n");
201
202 while (bytes) {
203 unsigned len = min(bytes, bvec_iter_len(bv, *iter));
204
205 bytes -= len;
206 iter->bi_size -= len;
207 iter->bi_bvec_done += len;
208
209 if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) {
210 iter->bi_bvec_done = 0;
211 iter->bi_idx++;
212 }
213 }
214}
215
216#define for_each_bvec(bvl, bio_vec, iter, start) \
217 for (iter = (start); \
218 (iter).bi_size && \
219 ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
220 bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
221
222
223static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
224 unsigned bytes)
225{
226 iter->bi_sector += bytes >> 9;
227
228 if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
229 iter->bi_size -= bytes;
230 else
231 bvec_iter_advance(bio->bi_io_vec, iter, bytes);
232}
233
234#define __bio_for_each_segment(bvl, bio, iter, start) \
235 for (iter = (start); \
236 (iter).bi_size && \
237 ((bvl = bio_iter_iovec((bio), (iter))), 1); \
238 bio_advance_iter((bio), &(iter), (bvl).bv_len))
239
240#define bio_for_each_segment(bvl, bio, iter) \
241 __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
242
243#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
244
245static inline unsigned bio_segments(struct bio *bio)
246{
247 unsigned segs = 0;
248 struct bio_vec bv;
249 struct bvec_iter iter;
250
251
252
253
254
255
256 if (bio->bi_rw & REQ_DISCARD)
257 return 1;
258
259 if (bio->bi_rw & REQ_WRITE_SAME)
260 return 1;
261
262 bio_for_each_segment(bv, bio, iter)
263 segs++;
264
265 return segs;
266}
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282static inline void bio_get(struct bio *bio)
283{
284 bio->bi_flags |= (1 << BIO_REFFED);
285 smp_mb__before_atomic();
286 atomic_inc(&bio->__bi_cnt);
287}
288
289static inline void bio_cnt_set(struct bio *bio, unsigned int count)
290{
291 if (count != 1) {
292 bio->bi_flags |= (1 << BIO_REFFED);
293 smp_mb__before_atomic();
294 }
295 atomic_set(&bio->__bi_cnt, count);
296}
297
298static inline bool bio_flagged(struct bio *bio, unsigned int bit)
299{
300 return (bio->bi_flags & (1U << bit)) != 0;
301}
302
303static inline void bio_set_flag(struct bio *bio, unsigned int bit)
304{
305 bio->bi_flags |= (1U << bit);
306}
307
308static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
309{
310 bio->bi_flags &= ~(1U << bit);
311}
312
313static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
314{
315 *bv = bio_iovec(bio);
316}
317
318static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
319{
320 struct bvec_iter iter = bio->bi_iter;
321 int idx;
322
323 if (unlikely(!bio_multiple_segments(bio))) {
324 *bv = bio_iovec(bio);
325 return;
326 }
327
328 bio_advance_iter(bio, &iter, iter.bi_size);
329
330 if (!iter.bi_bvec_done)
331 idx = iter.bi_idx - 1;
332 else
333 idx = iter.bi_idx;
334
335 *bv = bio->bi_io_vec[idx];
336
337
338
339
340
341 if (iter.bi_bvec_done)
342 bv->bv_len = iter.bi_bvec_done;
343}
344
345enum bip_flags {
346 BIP_BLOCK_INTEGRITY = 1 << 0,
347 BIP_MAPPED_INTEGRITY = 1 << 1,
348 BIP_CTRL_NOCHECK = 1 << 2,
349 BIP_DISK_NOCHECK = 1 << 3,
350 BIP_IP_CHECKSUM = 1 << 4,
351};
352
353
354
355
356struct bio_integrity_payload {
357 struct bio *bip_bio;
358
359 struct bvec_iter bip_iter;
360
361 bio_end_io_t *bip_end_io;
362
363 unsigned short bip_slab;
364 unsigned short bip_vcnt;
365 unsigned short bip_max_vcnt;
366 unsigned short bip_flags;
367
368 struct work_struct bip_work;
369
370 struct bio_vec *bip_vec;
371 struct bio_vec bip_inline_vecs[0];
372};
373
374#if defined(CONFIG_BLK_DEV_INTEGRITY)
375
376static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
377{
378 if (bio->bi_rw & REQ_INTEGRITY)
379 return bio->bi_integrity;
380
381 return NULL;
382}
383
384static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
385{
386 struct bio_integrity_payload *bip = bio_integrity(bio);
387
388 if (bip)
389 return bip->bip_flags & flag;
390
391 return false;
392}
393
394static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
395{
396 return bip->bip_iter.bi_sector;
397}
398
399static inline void bip_set_seed(struct bio_integrity_payload *bip,
400 sector_t seed)
401{
402 bip->bip_iter.bi_sector = seed;
403}
404
405#endif
406
407extern void bio_trim(struct bio *bio, int offset, int size);
408extern struct bio *bio_split(struct bio *bio, int sectors,
409 gfp_t gfp, struct bio_set *bs);
410
411
412
413
414
415
416
417
418
419
420
421static inline struct bio *bio_next_split(struct bio *bio, int sectors,
422 gfp_t gfp, struct bio_set *bs)
423{
424 if (sectors >= bio_sectors(bio))
425 return bio;
426
427 return bio_split(bio, sectors, gfp, bs);
428}
429
430extern struct bio_set *bioset_create(unsigned int, unsigned int);
431extern struct bio_set *bioset_create_nobvec(unsigned int, unsigned int);
432extern void bioset_free(struct bio_set *);
433extern mempool_t *biovec_create_pool(int pool_entries);
434
435extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
436extern void bio_put(struct bio *);
437
438extern void __bio_clone_fast(struct bio *, struct bio *);
439extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
440extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
441
442extern struct bio_set *fs_bio_set;
443
444static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
445{
446 return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
447}
448
449static inline struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
450{
451 return bio_clone_bioset(bio, gfp_mask, fs_bio_set);
452}
453
454static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
455{
456 return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
457}
458
459static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
460{
461 return bio_clone_bioset(bio, gfp_mask, NULL);
462
463}
464
465extern void bio_endio(struct bio *);
466
467static inline void bio_io_error(struct bio *bio)
468{
469 bio->bi_error = -EIO;
470 bio_endio(bio);
471}
472
473struct request_queue;
474extern int bio_phys_segments(struct request_queue *, struct bio *);
475
476extern int submit_bio_wait(int rw, struct bio *bio);
477extern void bio_advance(struct bio *, unsigned);
478
479extern void bio_init(struct bio *);
480extern void bio_reset(struct bio *);
481void bio_chain(struct bio *, struct bio *);
482
483extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
484extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
485 unsigned int, unsigned int);
486struct rq_map_data;
487extern struct bio *bio_map_user_iov(struct request_queue *,
488 const struct iov_iter *, gfp_t);
489extern void bio_unmap_user(struct bio *);
490extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
491 gfp_t);
492extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
493 gfp_t, int);
494extern void bio_set_pages_dirty(struct bio *bio);
495extern void bio_check_pages_dirty(struct bio *bio);
496
497void generic_start_io_acct(int rw, unsigned long sectors,
498 struct hd_struct *part);
499void generic_end_io_acct(int rw, struct hd_struct *part,
500 unsigned long start_time);
501
502#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
503# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
504#endif
505#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
506extern void bio_flush_dcache_pages(struct bio *bi);
507#else
508static inline void bio_flush_dcache_pages(struct bio *bi)
509{
510}
511#endif
512
513extern void bio_copy_data(struct bio *dst, struct bio *src);
514extern int bio_alloc_pages(struct bio *bio, gfp_t gfp);
515
516extern struct bio *bio_copy_user_iov(struct request_queue *,
517 struct rq_map_data *,
518 const struct iov_iter *,
519 gfp_t);
520extern int bio_uncopy_user(struct bio *);
521void zero_fill_bio(struct bio *bio);
522extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
523extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
524extern unsigned int bvec_nr_vecs(unsigned short idx);
525
526#ifdef CONFIG_BLK_CGROUP
527int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
528int bio_associate_current(struct bio *bio);
529void bio_disassociate_task(struct bio *bio);
530#else
531static inline int bio_associate_blkcg(struct bio *bio,
532 struct cgroup_subsys_state *blkcg_css) { return 0; }
533static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
534static inline void bio_disassociate_task(struct bio *bio) { }
535#endif
536
537#ifdef CONFIG_HIGHMEM
538
539
540
541
542static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
543{
544 unsigned long addr;
545
546
547
548
549
550 local_irq_save(*flags);
551 addr = (unsigned long) kmap_atomic(bvec->bv_page);
552
553 BUG_ON(addr & ~PAGE_MASK);
554
555 return (char *) addr + bvec->bv_offset;
556}
557
558static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
559{
560 unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
561
562 kunmap_atomic((void *) ptr);
563 local_irq_restore(*flags);
564}
565
566#else
567static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
568{
569 return page_address(bvec->bv_page) + bvec->bv_offset;
570}
571
572static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
573{
574 *flags = 0;
575}
576#endif
577
578static inline char *__bio_kmap_irq(struct bio *bio, struct bvec_iter iter,
579 unsigned long *flags)
580{
581 return bvec_kmap_irq(&bio_iter_iovec(bio, iter), flags);
582}
583#define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags)
584
585#define bio_kmap_irq(bio, flags) \
586 __bio_kmap_irq((bio), (bio)->bi_iter, (flags))
587#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
588
589
590
591
592
593
594
595
596struct bio_list {
597 struct bio *head;
598 struct bio *tail;
599};
600
601static inline int bio_list_empty(const struct bio_list *bl)
602{
603 return bl->head == NULL;
604}
605
606static inline void bio_list_init(struct bio_list *bl)
607{
608 bl->head = bl->tail = NULL;
609}
610
611#define BIO_EMPTY_LIST { NULL, NULL }
612
613#define bio_list_for_each(bio, bl) \
614 for (bio = (bl)->head; bio; bio = bio->bi_next)
615
616static inline unsigned bio_list_size(const struct bio_list *bl)
617{
618 unsigned sz = 0;
619 struct bio *bio;
620
621 bio_list_for_each(bio, bl)
622 sz++;
623
624 return sz;
625}
626
627static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
628{
629 bio->bi_next = NULL;
630
631 if (bl->tail)
632 bl->tail->bi_next = bio;
633 else
634 bl->head = bio;
635
636 bl->tail = bio;
637}
638
639static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
640{
641 bio->bi_next = bl->head;
642
643 bl->head = bio;
644
645 if (!bl->tail)
646 bl->tail = bio;
647}
648
649static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
650{
651 if (!bl2->head)
652 return;
653
654 if (bl->tail)
655 bl->tail->bi_next = bl2->head;
656 else
657 bl->head = bl2->head;
658
659 bl->tail = bl2->tail;
660}
661
662static inline void bio_list_merge_head(struct bio_list *bl,
663 struct bio_list *bl2)
664{
665 if (!bl2->head)
666 return;
667
668 if (bl->head)
669 bl2->tail->bi_next = bl->head;
670 else
671 bl->tail = bl2->tail;
672
673 bl->head = bl2->head;
674}
675
676static inline struct bio *bio_list_peek(struct bio_list *bl)
677{
678 return bl->head;
679}
680
681static inline struct bio *bio_list_pop(struct bio_list *bl)
682{
683 struct bio *bio = bl->head;
684
685 if (bio) {
686 bl->head = bl->head->bi_next;
687 if (!bl->head)
688 bl->tail = NULL;
689
690 bio->bi_next = NULL;
691 }
692
693 return bio;
694}
695
696static inline struct bio *bio_list_get(struct bio_list *bl)
697{
698 struct bio *bio = bl->head;
699
700 bl->head = bl->tail = NULL;
701
702 return bio;
703}
704
705
706
707
708
709
710
711#define BIO_POOL_SIZE 2
712#define BIOVEC_NR_POOLS 6
713#define BIOVEC_MAX_IDX (BIOVEC_NR_POOLS - 1)
714
715struct bio_set {
716 struct kmem_cache *bio_slab;
717 unsigned int front_pad;
718
719 mempool_t *bio_pool;
720 mempool_t *bvec_pool;
721#if defined(CONFIG_BLK_DEV_INTEGRITY)
722 mempool_t *bio_integrity_pool;
723 mempool_t *bvec_integrity_pool;
724#endif
725
726
727
728
729
730 spinlock_t rescue_lock;
731 struct bio_list rescue_list;
732 struct work_struct rescue_work;
733 struct workqueue_struct *rescue_workqueue;
734};
735
736struct biovec_slab {
737 int nr_vecs;
738 char *name;
739 struct kmem_cache *slab;
740};
741
742
743
744
745
746#define BIO_SPLIT_ENTRIES 2
747
748#if defined(CONFIG_BLK_DEV_INTEGRITY)
749
750#define bip_for_each_vec(bvl, bip, iter) \
751 for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
752
753#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \
754 for_each_bio(_bio) \
755 bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
756
757extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
758extern void bio_integrity_free(struct bio *);
759extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
760extern bool bio_integrity_enabled(struct bio *bio);
761extern int bio_integrity_prep(struct bio *);
762extern void bio_integrity_endio(struct bio *);
763extern void bio_integrity_advance(struct bio *, unsigned int);
764extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
765extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
766extern int bioset_integrity_create(struct bio_set *, int);
767extern void bioset_integrity_free(struct bio_set *);
768extern void bio_integrity_init(void);
769
770#else
771
772static inline void *bio_integrity(struct bio *bio)
773{
774 return NULL;
775}
776
777static inline bool bio_integrity_enabled(struct bio *bio)
778{
779 return false;
780}
781
782static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
783{
784 return 0;
785}
786
787static inline void bioset_integrity_free (struct bio_set *bs)
788{
789 return;
790}
791
792static inline int bio_integrity_prep(struct bio *bio)
793{
794 return 0;
795}
796
797static inline void bio_integrity_free(struct bio *bio)
798{
799 return;
800}
801
802static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
803 gfp_t gfp_mask)
804{
805 return 0;
806}
807
808static inline void bio_integrity_advance(struct bio *bio,
809 unsigned int bytes_done)
810{
811 return;
812}
813
814static inline void bio_integrity_trim(struct bio *bio, unsigned int offset,
815 unsigned int sectors)
816{
817 return;
818}
819
820static inline void bio_integrity_init(void)
821{
822 return;
823}
824
825static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
826{
827 return false;
828}
829
830static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp,
831 unsigned int nr)
832{
833 return ERR_PTR(-EINVAL);
834}
835
836static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
837 unsigned int len, unsigned int offset)
838{
839 return 0;
840}
841
842#endif
843
844#endif
845#endif
846