1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#ifndef __LINUX_BIO_H
21#define __LINUX_BIO_H
22
23#include <linux/highmem.h>
24#include <linux/mempool.h>
25#include <linux/ioprio.h>
26#include <linux/bug.h>
27
28#ifdef CONFIG_BLOCK
29
30#include <asm/io.h>
31
32
33#include <linux/blk_types.h>
34
35#define BIO_DEBUG
36
37#ifdef BIO_DEBUG
38#define BIO_BUG_ON BUG_ON
39#else
40#define BIO_BUG_ON
41#endif
42
43#define BIO_MAX_PAGES 256
44#define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT)
45#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9)
46
47
48
49
50#define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS)
51#define bio_prio(bio) ((bio)->bi_rw >> BIO_PRIO_SHIFT)
52#define bio_prio_valid(bio) ioprio_valid(bio_prio(bio))
53
54#define bio_set_prio(bio, prio) do { \
55 WARN_ON(prio >= (1 << IOPRIO_BITS)); \
56 (bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1); \
57 (bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT); \
58} while (0)
59
60
61
62
63
64#define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx])
65
66#define bvec_iter_page(bvec, iter) \
67 (__bvec_iter_bvec((bvec), (iter))->bv_page)
68
69#define bvec_iter_len(bvec, iter) \
70 min((iter).bi_size, \
71 __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done)
72
73#define bvec_iter_offset(bvec, iter) \
74 (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done)
75
76#define bvec_iter_bvec(bvec, iter) \
77((struct bio_vec) { \
78 .bv_page = bvec_iter_page((bvec), (iter)), \
79 .bv_len = bvec_iter_len((bvec), (iter)), \
80 .bv_offset = bvec_iter_offset((bvec), (iter)), \
81})
82
83#define bio_iter_iovec(bio, iter) \
84 bvec_iter_bvec((bio)->bi_io_vec, (iter))
85
86#define bio_iter_page(bio, iter) \
87 bvec_iter_page((bio)->bi_io_vec, (iter))
88#define bio_iter_len(bio, iter) \
89 bvec_iter_len((bio)->bi_io_vec, (iter))
90#define bio_iter_offset(bio, iter) \
91 bvec_iter_offset((bio)->bi_io_vec, (iter))
92
93#define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter)
94#define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter)
95#define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter)
96
97#define bio_multiple_segments(bio) \
98 ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
99#define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9)
100#define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio)))
101
102
103
104
105static inline bool bio_has_data(struct bio *bio)
106{
107 if (bio &&
108 bio->bi_iter.bi_size &&
109 !(bio->bi_rw & REQ_DISCARD))
110 return true;
111
112 return false;
113}
114
115static inline bool bio_is_rw(struct bio *bio)
116{
117 if (!bio_has_data(bio))
118 return false;
119
120 if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
121 return false;
122
123 return true;
124}
125
126static inline bool bio_mergeable(struct bio *bio)
127{
128 if (bio->bi_rw & REQ_NOMERGE_FLAGS)
129 return false;
130
131 return true;
132}
133
134static inline unsigned int bio_cur_bytes(struct bio *bio)
135{
136 if (bio_has_data(bio))
137 return bio_iovec(bio).bv_len;
138 else
139 return bio->bi_iter.bi_size;
140}
141
142static inline void *bio_data(struct bio *bio)
143{
144 if (bio_has_data(bio))
145 return page_address(bio_page(bio)) + bio_offset(bio);
146
147 return NULL;
148}
149
150
151
152
153#define bio_to_phys(bio) (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
154#define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
155
156
157
158
159
160
161
162#define __bio_kmap_atomic(bio, iter) \
163 (kmap_atomic(bio_iter_iovec((bio), (iter)).bv_page) + \
164 bio_iter_iovec((bio), (iter)).bv_offset)
165
166#define __bio_kunmap_atomic(addr) kunmap_atomic(addr)
167
168
169
170
171
172
173#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
174 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
175
176
177
178
179#ifndef BIOVEC_PHYS_MERGEABLE
180#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
181 __BIOVEC_PHYS_MERGEABLE(vec1, vec2)
182#endif
183
184#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
185 (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
186#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
187 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
188
189#define bio_io_error(bio) bio_endio((bio), -EIO)
190
191
192
193
194
195#define bio_for_each_segment_all(bvl, bio, i) \
196 for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++)
197
198static inline void bvec_iter_advance(struct bio_vec *bv, struct bvec_iter *iter,
199 unsigned bytes)
200{
201 WARN_ONCE(bytes > iter->bi_size,
202 "Attempted to advance past end of bvec iter\n");
203
204 while (bytes) {
205 unsigned len = min(bytes, bvec_iter_len(bv, *iter));
206
207 bytes -= len;
208 iter->bi_size -= len;
209 iter->bi_bvec_done += len;
210
211 if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) {
212 iter->bi_bvec_done = 0;
213 iter->bi_idx++;
214 }
215 }
216}
217
218#define for_each_bvec(bvl, bio_vec, iter, start) \
219 for ((iter) = start; \
220 (bvl) = bvec_iter_bvec((bio_vec), (iter)), \
221 (iter).bi_size; \
222 bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
223
224
225static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
226 unsigned bytes)
227{
228 iter->bi_sector += bytes >> 9;
229
230 if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
231 iter->bi_size -= bytes;
232 else
233 bvec_iter_advance(bio->bi_io_vec, iter, bytes);
234}
235
236#define __bio_for_each_segment(bvl, bio, iter, start) \
237 for (iter = (start); \
238 (iter).bi_size && \
239 ((bvl = bio_iter_iovec((bio), (iter))), 1); \
240 bio_advance_iter((bio), &(iter), (bvl).bv_len))
241
242#define bio_for_each_segment(bvl, bio, iter) \
243 __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
244
245#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
246
247static inline unsigned bio_segments(struct bio *bio)
248{
249 unsigned segs = 0;
250 struct bio_vec bv;
251 struct bvec_iter iter;
252
253
254
255
256
257
258 if (bio->bi_rw & REQ_DISCARD)
259 return 1;
260
261 if (bio->bi_rw & REQ_WRITE_SAME)
262 return 1;
263
264 bio_for_each_segment(bv, bio, iter)
265 segs++;
266
267 return segs;
268}
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284#define bio_get(bio) atomic_inc(&(bio)->bi_cnt)
285
286#if defined(CONFIG_BLK_DEV_INTEGRITY)
287
288
289
290struct bio_integrity_payload {
291 struct bio *bip_bio;
292
293 struct bvec_iter bip_iter;
294
295
296 void *bip_buf;
297
298 bio_end_io_t *bip_end_io;
299
300 unsigned short bip_slab;
301 unsigned short bip_vcnt;
302 unsigned bip_owns_buf:1;
303
304 struct work_struct bip_work;
305
306 struct bio_vec *bip_vec;
307 struct bio_vec bip_inline_vecs[0];
308};
309#endif
310
311extern void bio_trim(struct bio *bio, int offset, int size);
312extern struct bio *bio_split(struct bio *bio, int sectors,
313 gfp_t gfp, struct bio_set *bs);
314
315
316
317
318
319
320
321
322
323
324
325static inline struct bio *bio_next_split(struct bio *bio, int sectors,
326 gfp_t gfp, struct bio_set *bs)
327{
328 if (sectors >= bio_sectors(bio))
329 return bio;
330
331 return bio_split(bio, sectors, gfp, bs);
332}
333
334extern struct bio_set *bioset_create(unsigned int, unsigned int);
335extern void bioset_free(struct bio_set *);
336extern mempool_t *biovec_create_pool(struct bio_set *bs, int pool_entries);
337
338extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
339extern void bio_put(struct bio *);
340
341extern void __bio_clone_fast(struct bio *, struct bio *);
342extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
343extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
344
345extern struct bio_set *fs_bio_set;
346unsigned int bio_integrity_tag_size(struct bio *bio);
347
348static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
349{
350 return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
351}
352
353static inline struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
354{
355 return bio_clone_bioset(bio, gfp_mask, fs_bio_set);
356}
357
358static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
359{
360 return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
361}
362
363static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
364{
365 return bio_clone_bioset(bio, gfp_mask, NULL);
366
367}
368
369extern void bio_endio(struct bio *, int);
370extern void bio_endio_nodec(struct bio *, int);
371struct request_queue;
372extern int bio_phys_segments(struct request_queue *, struct bio *);
373
374extern int submit_bio_wait(int rw, struct bio *bio);
375extern void bio_advance(struct bio *, unsigned);
376
377extern void bio_init(struct bio *);
378extern void bio_reset(struct bio *);
379void bio_chain(struct bio *, struct bio *);
380
381extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
382extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
383 unsigned int, unsigned int);
384extern int bio_get_nr_vecs(struct block_device *);
385extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
386 unsigned long, unsigned int, int, gfp_t);
387struct sg_iovec;
388struct rq_map_data;
389extern struct bio *bio_map_user_iov(struct request_queue *,
390 struct block_device *,
391 struct sg_iovec *, int, int, gfp_t);
392extern void bio_unmap_user(struct bio *);
393extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
394 gfp_t);
395extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
396 gfp_t, int);
397extern void bio_set_pages_dirty(struct bio *bio);
398extern void bio_check_pages_dirty(struct bio *bio);
399
400#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
401# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
402#endif
403#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
404extern void bio_flush_dcache_pages(struct bio *bi);
405#else
406static inline void bio_flush_dcache_pages(struct bio *bi)
407{
408}
409#endif
410
411extern void bio_copy_data(struct bio *dst, struct bio *src);
412extern int bio_alloc_pages(struct bio *bio, gfp_t gfp);
413
414extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *,
415 unsigned long, unsigned int, int, gfp_t);
416extern struct bio *bio_copy_user_iov(struct request_queue *,
417 struct rq_map_data *, struct sg_iovec *,
418 int, int, gfp_t);
419extern int bio_uncopy_user(struct bio *);
420void zero_fill_bio(struct bio *bio);
421extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
422extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
423extern unsigned int bvec_nr_vecs(unsigned short idx);
424
425#ifdef CONFIG_BLK_CGROUP
426int bio_associate_current(struct bio *bio);
427void bio_disassociate_task(struct bio *bio);
428#else
429static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
430static inline void bio_disassociate_task(struct bio *bio) { }
431#endif
432
433#ifdef CONFIG_HIGHMEM
434
435
436
437
438static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
439{
440 unsigned long addr;
441
442
443
444
445
446 local_irq_save(*flags);
447 addr = (unsigned long) kmap_atomic(bvec->bv_page);
448
449 BUG_ON(addr & ~PAGE_MASK);
450
451 return (char *) addr + bvec->bv_offset;
452}
453
454static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
455{
456 unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
457
458 kunmap_atomic((void *) ptr);
459 local_irq_restore(*flags);
460}
461
462#else
463static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
464{
465 return page_address(bvec->bv_page) + bvec->bv_offset;
466}
467
468static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
469{
470 *flags = 0;
471}
472#endif
473
474static inline char *__bio_kmap_irq(struct bio *bio, struct bvec_iter iter,
475 unsigned long *flags)
476{
477 return bvec_kmap_irq(&bio_iter_iovec(bio, iter), flags);
478}
479#define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags)
480
481#define bio_kmap_irq(bio, flags) \
482 __bio_kmap_irq((bio), (bio)->bi_iter, (flags))
483#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
484
485
486
487
488
489
490
491
492struct bio_list {
493 struct bio *head;
494 struct bio *tail;
495};
496
497static inline int bio_list_empty(const struct bio_list *bl)
498{
499 return bl->head == NULL;
500}
501
502static inline void bio_list_init(struct bio_list *bl)
503{
504 bl->head = bl->tail = NULL;
505}
506
507#define BIO_EMPTY_LIST { NULL, NULL }
508
509#define bio_list_for_each(bio, bl) \
510 for (bio = (bl)->head; bio; bio = bio->bi_next)
511
512static inline unsigned bio_list_size(const struct bio_list *bl)
513{
514 unsigned sz = 0;
515 struct bio *bio;
516
517 bio_list_for_each(bio, bl)
518 sz++;
519
520 return sz;
521}
522
523static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
524{
525 bio->bi_next = NULL;
526
527 if (bl->tail)
528 bl->tail->bi_next = bio;
529 else
530 bl->head = bio;
531
532 bl->tail = bio;
533}
534
535static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
536{
537 bio->bi_next = bl->head;
538
539 bl->head = bio;
540
541 if (!bl->tail)
542 bl->tail = bio;
543}
544
545static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
546{
547 if (!bl2->head)
548 return;
549
550 if (bl->tail)
551 bl->tail->bi_next = bl2->head;
552 else
553 bl->head = bl2->head;
554
555 bl->tail = bl2->tail;
556}
557
558static inline void bio_list_merge_head(struct bio_list *bl,
559 struct bio_list *bl2)
560{
561 if (!bl2->head)
562 return;
563
564 if (bl->head)
565 bl2->tail->bi_next = bl->head;
566 else
567 bl->tail = bl2->tail;
568
569 bl->head = bl2->head;
570}
571
572static inline struct bio *bio_list_peek(struct bio_list *bl)
573{
574 return bl->head;
575}
576
577static inline struct bio *bio_list_pop(struct bio_list *bl)
578{
579 struct bio *bio = bl->head;
580
581 if (bio) {
582 bl->head = bl->head->bi_next;
583 if (!bl->head)
584 bl->tail = NULL;
585
586 bio->bi_next = NULL;
587 }
588
589 return bio;
590}
591
592static inline struct bio *bio_list_get(struct bio_list *bl)
593{
594 struct bio *bio = bl->head;
595
596 bl->head = bl->tail = NULL;
597
598 return bio;
599}
600
601
602
603
604
605
606
607#define BIO_POOL_SIZE 2
608#define BIOVEC_NR_POOLS 6
609#define BIOVEC_MAX_IDX (BIOVEC_NR_POOLS - 1)
610
611struct bio_set {
612 struct kmem_cache *bio_slab;
613 unsigned int front_pad;
614
615 mempool_t *bio_pool;
616 mempool_t *bvec_pool;
617#if defined(CONFIG_BLK_DEV_INTEGRITY)
618 mempool_t *bio_integrity_pool;
619 mempool_t *bvec_integrity_pool;
620#endif
621
622
623
624
625
626 spinlock_t rescue_lock;
627 struct bio_list rescue_list;
628 struct work_struct rescue_work;
629 struct workqueue_struct *rescue_workqueue;
630};
631
632struct biovec_slab {
633 int nr_vecs;
634 char *name;
635 struct kmem_cache *slab;
636};
637
638
639
640
641
642#define BIO_SPLIT_ENTRIES 2
643
644#if defined(CONFIG_BLK_DEV_INTEGRITY)
645
646
647
648#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)]))
649
650#define bip_for_each_vec(bvl, bip, iter) \
651 for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
652
653#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \
654 for_each_bio(_bio) \
655 bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
656
657#define bio_integrity(bio) (bio->bi_integrity != NULL)
658
659extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
660extern void bio_integrity_free(struct bio *);
661extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
662extern int bio_integrity_enabled(struct bio *bio);
663extern int bio_integrity_set_tag(struct bio *, void *, unsigned int);
664extern int bio_integrity_get_tag(struct bio *, void *, unsigned int);
665extern int bio_integrity_prep(struct bio *);
666extern void bio_integrity_endio(struct bio *, int);
667extern void bio_integrity_advance(struct bio *, unsigned int);
668extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
669extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
670extern int bioset_integrity_create(struct bio_set *, int);
671extern void bioset_integrity_free(struct bio_set *);
672extern void bio_integrity_init(void);
673
674#else
675
676static inline int bio_integrity(struct bio *bio)
677{
678 return 0;
679}
680
681static inline int bio_integrity_enabled(struct bio *bio)
682{
683 return 0;
684}
685
686static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
687{
688 return 0;
689}
690
691static inline void bioset_integrity_free (struct bio_set *bs)
692{
693 return;
694}
695
696static inline int bio_integrity_prep(struct bio *bio)
697{
698 return 0;
699}
700
701static inline void bio_integrity_free(struct bio *bio)
702{
703 return;
704}
705
706static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
707 gfp_t gfp_mask)
708{
709 return 0;
710}
711
712static inline void bio_integrity_advance(struct bio *bio,
713 unsigned int bytes_done)
714{
715 return;
716}
717
718static inline void bio_integrity_trim(struct bio *bio, unsigned int offset,
719 unsigned int sectors)
720{
721 return;
722}
723
724static inline void bio_integrity_init(void)
725{
726 return;
727}
728
729#endif
730
731#endif
732#endif
733