1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#ifndef __LINUX_BIO_H
21#define __LINUX_BIO_H
22
23#include <linux/highmem.h>
24#include <linux/mempool.h>
25#include <linux/ioprio.h>
26#include <linux/bug.h>
27
28#ifdef CONFIG_BLOCK
29
30#include <asm/io.h>
31
32
33#include <linux/blk_types.h>
34
35#define BIO_DEBUG
36
37#ifdef BIO_DEBUG
38#define BIO_BUG_ON BUG_ON
39#else
40#define BIO_BUG_ON
41#endif
42
43#define BIO_MAX_PAGES 256
44#define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT)
45#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9)
46
47#define bio_op(bio) (op_from_rq_bits((bio)->bi_rw))
48#define bio_set_op_attrs(bio, op, flags) ((bio)->bi_rw |= (op | flags))
49
50
51
52
53#define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS)
54#define bio_prio(bio) ((bio)->bi_rw >> BIO_PRIO_SHIFT)
55#define bio_prio_valid(bio) ioprio_valid(bio_prio(bio))
56
57#define bio_set_prio(bio, prio) do { \
58 WARN_ON(prio >= (1 << IOPRIO_BITS)); \
59 (bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1); \
60 (bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT); \
61} while (0)
62
63
64
65
66
67#define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(idx)]))
68#define bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_idx)
69#define bio_page(bio) bio_iovec((bio))->bv_page
70#define bio_offset(bio) bio_iovec((bio))->bv_offset
71#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
72#define bio_sectors(bio) ((bio)->bi_size >> 9)
73#define bio_end_sector(bio) ((bio)->bi_sector + bio_sectors((bio)))
74
75static inline unsigned int bio_cur_bytes(struct bio *bio)
76{
77 if (bio->bi_vcnt)
78 return bio_iovec(bio)->bv_len;
79 else
80 return bio->bi_size;
81}
82
83static inline void *bio_data(struct bio *bio)
84{
85 if (bio->bi_vcnt)
86 return page_address(bio_page(bio)) + bio_offset(bio);
87
88 return NULL;
89}
90
91
92
93
94#define bio_to_phys(bio) (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
95#define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
96
97
98
99
100
101
102
103#define __bio_kmap_atomic(bio, idx, kmtype) \
104 (kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page) + \
105 bio_iovec_idx((bio), (idx))->bv_offset)
106
107#define __bio_kunmap_atomic(addr, kmtype) kunmap_atomic(addr)
108
109
110
111
112
113#define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
114#define __BVEC_START(bio) bio_iovec_idx((bio), (bio)->bi_idx)
115
116
117#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
118 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
119
120
121
122
123#ifndef BIOVEC_PHYS_MERGEABLE
124#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
125 __BIOVEC_PHYS_MERGEABLE(vec1, vec2)
126#endif
127
128#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
129 (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
130#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
131 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
132#define BIO_SEG_BOUNDARY(q, b1, b2) \
133 BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
134
135#define bio_io_error(bio) bio_endio((bio), -EIO)
136
137
138
139
140
141#define __bio_for_each_segment(bvl, bio, i, start_idx) \
142 for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx); \
143 i < (bio)->bi_vcnt; \
144 bvl++, i++)
145
146
147
148
149
150#define bio_for_each_segment_all(bvl, bio, i) \
151 for (i = 0; \
152 bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \
153 i++)
154
155#define bio_for_each_segment(bvl, bio, i) \
156 for (i = (bio)->bi_idx; \
157 bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \
158 i++)
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174#define bio_get(bio) atomic_inc(&(bio)->bi_cnt)
175
176#if defined(CONFIG_BLK_DEV_INTEGRITY)
177
178
179
180struct bio_integrity_payload {
181 struct bio *bip_bio;
182
183 sector_t bip_sector;
184
185 void *bip_buf;
186 bio_end_io_t *bip_end_io;
187
188 unsigned int bip_size;
189
190 unsigned short bip_slab;
191 unsigned short bip_vcnt;
192 unsigned short bip_idx;
193 unsigned bip_owns_buf:1;
194
195 struct work_struct bip_work;
196
197 struct bio_vec *bip_vec;
198 struct bio_vec bip_inline_vecs[0];
199};
200#endif
201
202
203
204
205
206
207
208
209
210struct bio_pair2 {
211 struct bio *master_bio, *bio1, *bio2;
212 atomic_t cnt;
213 int error;
214 struct bio __bio;
215};
216extern struct bio_pair2 *bio_split2(struct bio *bi, int first_sectors);
217extern void bio_pair2_release(struct bio_pair2 *dbio);
218
219
220
221
222
223
224
225
226
227
228
229struct bio_pair {
230 struct bio bio1, bio2;
231 struct bio_vec bv1, bv2;
232#if defined(CONFIG_BLK_DEV_INTEGRITY)
233 struct bio_integrity_payload bip1, bip2;
234 struct bio_vec iv1, iv2;
235#endif
236 atomic_t cnt;
237 int error;
238};
239extern struct bio_pair *bio_split(struct bio *bi, int first_sectors);
240extern void bio_pair_release(struct bio_pair *dbio);
241extern void bio_trim(struct bio *bio, int offset, int size);
242
243extern struct bio_set *bioset_create(unsigned int, unsigned int);
244extern void bioset_free(struct bio_set *);
245extern mempool_t *biovec_create_pool(struct bio_set *bs, int pool_entries);
246
247extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
248extern void bio_put(struct bio *);
249
250extern void __bio_clone(struct bio *, struct bio *);
251extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
252
253extern struct bio_set *fs_bio_set;
254
255static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
256{
257 return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
258}
259
260static inline struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
261{
262 return bio_clone_bioset(bio, gfp_mask, fs_bio_set);
263}
264
265static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
266{
267 return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
268}
269
270static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
271{
272 return bio_clone_bioset(bio, gfp_mask, NULL);
273
274}
275
276extern void bio_endio(struct bio *, int);
277struct request_queue;
278extern int bio_phys_segments(struct request_queue *, struct bio *);
279
280extern int submit_bio_wait(int rw, struct bio *bio);
281extern void bio_advance(struct bio *, unsigned);
282
283extern void bio_init(struct bio *);
284extern void bio_init_aux(struct bio *bio, struct bio_aux *bio_aux);
285extern void bio_reset(struct bio *);
286void bio_chain(struct bio *, struct bio *);
287
288extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
289extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
290 unsigned int, unsigned int);
291extern int bio_get_nr_vecs(struct block_device *);
292extern sector_t bio_sector_offset(struct bio *, unsigned short, unsigned int);
293extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
294 unsigned long, unsigned int, int, gfp_t);
295struct sg_iovec;
296struct rq_map_data;
297extern struct bio *bio_map_user_iov(struct request_queue *,
298 struct block_device *,
299 struct sg_iovec *, int, int, gfp_t);
300extern void bio_unmap_user(struct bio *);
301extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
302 gfp_t);
303extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
304 gfp_t, int);
305extern void bio_set_pages_dirty(struct bio *bio);
306extern void bio_check_pages_dirty(struct bio *bio);
307
308void generic_start_io_acct(struct request_queue *q, int rw,
309 unsigned long sectors, struct hd_struct *part);
310void generic_end_io_acct(struct request_queue *q, int rw,
311 struct hd_struct *part,
312 unsigned long start_time);
313
314#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
315# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
316#endif
317#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
318extern void bio_flush_dcache_pages(struct bio *bi);
319#else
320static inline void bio_flush_dcache_pages(struct bio *bi)
321{
322}
323#endif
324
325extern void bio_copy_data(struct bio *dst, struct bio *src);
326extern int bio_alloc_pages(struct bio *bio, gfp_t gfp);
327
328extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *,
329 unsigned long, unsigned int, int, gfp_t);
330extern struct bio *bio_copy_user_iov(struct request_queue *,
331 struct rq_map_data *, struct sg_iovec *,
332 int, int, gfp_t);
333extern int bio_uncopy_user(struct bio *);
334void zero_fill_bio(struct bio *bio);
335extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
336extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
337extern unsigned int bvec_nr_vecs(unsigned short idx);
338
339#ifdef CONFIG_BLK_CGROUP
340int bio_associate_current(struct bio *bio);
341void bio_disassociate_task(struct bio *bio);
342#else
343static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
344static inline void bio_disassociate_task(struct bio *bio) { }
345#endif
346
347#ifdef CONFIG_HIGHMEM
348
349
350
351
352static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
353{
354 unsigned long addr;
355
356
357
358
359
360 local_irq_save(*flags);
361 addr = (unsigned long) kmap_atomic(bvec->bv_page);
362
363 BUG_ON(addr & ~PAGE_MASK);
364
365 return (char *) addr + bvec->bv_offset;
366}
367
368static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
369{
370 unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
371
372 kunmap_atomic((void *) ptr);
373 local_irq_restore(*flags);
374}
375
376#else
377static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
378{
379 return page_address(bvec->bv_page) + bvec->bv_offset;
380}
381
382static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
383{
384 *flags = 0;
385}
386#endif
387
388static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
389 unsigned long *flags)
390{
391 return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags);
392}
393#define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags)
394
395#define bio_kmap_irq(bio, flags) \
396 __bio_kmap_irq((bio), (bio)->bi_idx, (flags))
397#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
398
399
400
401
402static inline bool bio_has_data(struct bio *bio)
403{
404 if (bio && bio->bi_vcnt)
405 return true;
406
407 return false;
408}
409
410static inline bool bio_is_rw(struct bio *bio)
411{
412 if (!bio_has_data(bio))
413 return false;
414
415 if (bio->bi_rw & REQ_WRITE_SAME)
416 return false;
417
418 return true;
419}
420
421static inline bool bio_mergeable(struct bio *bio)
422{
423 if (bio->bi_rw & REQ_NOMERGE_FLAGS)
424 return false;
425
426 return true;
427}
428
429
430
431
432
433
434
435
436struct bio_list {
437 struct bio *head;
438 struct bio *tail;
439};
440
441static inline int bio_list_empty(const struct bio_list *bl)
442{
443 return bl->head == NULL;
444}
445
446static inline void bio_list_init(struct bio_list *bl)
447{
448 bl->head = bl->tail = NULL;
449}
450
451#define BIO_EMPTY_LIST { NULL, NULL }
452
453#define bio_list_for_each(bio, bl) \
454 for (bio = (bl)->head; bio; bio = bio->bi_next)
455
456static inline unsigned bio_list_size(const struct bio_list *bl)
457{
458 unsigned sz = 0;
459 struct bio *bio;
460
461 bio_list_for_each(bio, bl)
462 sz++;
463
464 return sz;
465}
466
467static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
468{
469 bio->bi_next = NULL;
470
471 if (bl->tail)
472 bl->tail->bi_next = bio;
473 else
474 bl->head = bio;
475
476 bl->tail = bio;
477}
478
479static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
480{
481 bio->bi_next = bl->head;
482
483 bl->head = bio;
484
485 if (!bl->tail)
486 bl->tail = bio;
487}
488
489static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
490{
491 if (!bl2->head)
492 return;
493
494 if (bl->tail)
495 bl->tail->bi_next = bl2->head;
496 else
497 bl->head = bl2->head;
498
499 bl->tail = bl2->tail;
500}
501
502static inline void bio_list_merge_head(struct bio_list *bl,
503 struct bio_list *bl2)
504{
505 if (!bl2->head)
506 return;
507
508 if (bl->head)
509 bl2->tail->bi_next = bl->head;
510 else
511 bl->tail = bl2->tail;
512
513 bl->head = bl2->head;
514}
515
516static inline struct bio *bio_list_peek(struct bio_list *bl)
517{
518 return bl->head;
519}
520
521static inline struct bio *bio_list_pop(struct bio_list *bl)
522{
523 struct bio *bio = bl->head;
524
525 if (bio) {
526 bl->head = bl->head->bi_next;
527 if (!bl->head)
528 bl->tail = NULL;
529
530 bio->bi_next = NULL;
531 }
532
533 return bio;
534}
535
536static inline struct bio *bio_list_get(struct bio_list *bl)
537{
538 struct bio *bio = bl->head;
539
540 bl->head = bl->tail = NULL;
541
542 return bio;
543}
544
545
546
547
548
549static inline void bio_inc_remaining(struct bio *bio)
550{
551 if (WARN_ON_ONCE(!bio->bio_aux))
552 return;
553
554 bio->bio_aux->bi_flags |= (1 << BIO_AUX_CHAIN);
555 smp_mb__before_atomic();
556 atomic_inc(&bio->bio_aux->__bi_remaining);
557}
558
559
560
561
562
563
564
565#define BIO_POOL_SIZE 2
566#define BIOVEC_NR_POOLS 6
567#define BIOVEC_MAX_IDX (BIOVEC_NR_POOLS - 1)
568
569struct bio_set {
570 struct kmem_cache *bio_slab;
571 unsigned int front_pad;
572
573 mempool_t *bio_pool;
574 mempool_t *bvec_pool;
575#if defined(CONFIG_BLK_DEV_INTEGRITY)
576 mempool_t *bio_integrity_pool;
577 mempool_t *bvec_integrity_pool;
578#endif
579
580
581
582
583
584 spinlock_t rescue_lock;
585 struct bio_list rescue_list;
586 struct work_struct rescue_work;
587 struct workqueue_struct *rescue_workqueue;
588};
589
590struct biovec_slab {
591 int nr_vecs;
592 char *name;
593 struct kmem_cache *slab;
594};
595
596
597
598
599
600#define BIO_SPLIT_ENTRIES 2
601
602#if defined(CONFIG_BLK_DEV_INTEGRITY)
603
604#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)]))
605#define bip_vec(bip) bip_vec_idx(bip, 0)
606
607#define __bip_for_each_vec(bvl, bip, i, start_idx) \
608 for (bvl = bip_vec_idx((bip), (start_idx)), i = (start_idx); \
609 i < (bip)->bip_vcnt; \
610 bvl++, i++)
611
612#define bip_for_each_vec(bvl, bip, i) \
613 __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx)
614
615#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \
616 for_each_bio(_bio) \
617 bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
618
619#define bio_integrity(bio) (bio->bi_integrity != NULL)
620
621extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
622extern void bio_integrity_free(struct bio *);
623extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
624extern int bio_integrity_enabled(struct bio *bio);
625extern int bio_integrity_set_tag(struct bio *, void *, unsigned int);
626extern int bio_integrity_get_tag(struct bio *, void *, unsigned int);
627extern int bio_integrity_prep(struct bio *);
628extern void bio_integrity_endio(struct bio *, int);
629extern void bio_integrity_advance(struct bio *, unsigned int);
630extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
631extern void bio_integrity_split(struct bio *, struct bio_pair *, int);
632extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
633extern int bioset_integrity_create(struct bio_set *, int);
634extern void bioset_integrity_free(struct bio_set *);
635extern void bio_integrity_init(void);
636
637#else
638
639static inline int bio_integrity(struct bio *bio)
640{
641 return 0;
642}
643
644static inline int bio_integrity_enabled(struct bio *bio)
645{
646 return 0;
647}
648
649static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
650{
651 return 0;
652}
653
654static inline void bioset_integrity_free (struct bio_set *bs)
655{
656 return;
657}
658
659static inline int bio_integrity_prep(struct bio *bio)
660{
661 return 0;
662}
663
664static inline void bio_integrity_free(struct bio *bio)
665{
666 return;
667}
668
669static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
670 gfp_t gfp_mask)
671{
672 return 0;
673}
674
675static inline void bio_integrity_split(struct bio *bio, struct bio_pair *bp,
676 int sectors)
677{
678 return;
679}
680
681static inline void bio_integrity_advance(struct bio *bio,
682 unsigned int bytes_done)
683{
684 return;
685}
686
687static inline void bio_integrity_trim(struct bio *bio, unsigned int offset,
688 unsigned int sectors)
689{
690 return;
691}
692
693static inline void bio_integrity_init(void)
694{
695 return;
696}
697
698#endif
699
700#endif
701#endif
702