1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/bio.h>
21#include <linux/blkdev.h>
22#include <linux/uio.h>
23#include <linux/iocontext.h>
24#include <linux/slab.h>
25#include <linux/init.h>
26#include <linux/kernel.h>
27#include <linux/export.h>
28#include <linux/mempool.h>
29#include <linux/workqueue.h>
30#include <linux/cgroup.h>
31
32#include <trace/events/block.h>
33
34
35
36
37
38#define BIO_INLINE_VECS 4
39
40
41
42
43
44
45#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
46static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
47 BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
48};
49#undef BV
50
51
52
53
54
55struct bio_set *fs_bio_set;
56EXPORT_SYMBOL(fs_bio_set);
57
58
59
60
61struct bio_slab {
62 struct kmem_cache *slab;
63 unsigned int slab_ref;
64 unsigned int slab_size;
65 char name[8];
66};
67static DEFINE_MUTEX(bio_slab_lock);
68static struct bio_slab *bio_slabs;
69static unsigned int bio_slab_nr, bio_slab_max;
70
71static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
72{
73 unsigned int sz = sizeof(struct bio) + extra_size;
74 struct kmem_cache *slab = NULL;
75 struct bio_slab *bslab, *new_bio_slabs;
76 unsigned int new_bio_slab_max;
77 unsigned int i, entry = -1;
78
79 mutex_lock(&bio_slab_lock);
80
81 i = 0;
82 while (i < bio_slab_nr) {
83 bslab = &bio_slabs[i];
84
85 if (!bslab->slab && entry == -1)
86 entry = i;
87 else if (bslab->slab_size == sz) {
88 slab = bslab->slab;
89 bslab->slab_ref++;
90 break;
91 }
92 i++;
93 }
94
95 if (slab)
96 goto out_unlock;
97
98 if (bio_slab_nr == bio_slab_max && entry == -1) {
99 new_bio_slab_max = bio_slab_max << 1;
100 new_bio_slabs = krealloc(bio_slabs,
101 new_bio_slab_max * sizeof(struct bio_slab),
102 GFP_KERNEL);
103 if (!new_bio_slabs)
104 goto out_unlock;
105 bio_slab_max = new_bio_slab_max;
106 bio_slabs = new_bio_slabs;
107 }
108 if (entry == -1)
109 entry = bio_slab_nr++;
110
111 bslab = &bio_slabs[entry];
112
113 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
114 slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN,
115 SLAB_HWCACHE_ALIGN, NULL);
116 if (!slab)
117 goto out_unlock;
118
119 bslab->slab = slab;
120 bslab->slab_ref = 1;
121 bslab->slab_size = sz;
122out_unlock:
123 mutex_unlock(&bio_slab_lock);
124 return slab;
125}
126
127static void bio_put_slab(struct bio_set *bs)
128{
129 struct bio_slab *bslab = NULL;
130 unsigned int i;
131
132 mutex_lock(&bio_slab_lock);
133
134 for (i = 0; i < bio_slab_nr; i++) {
135 if (bs->bio_slab == bio_slabs[i].slab) {
136 bslab = &bio_slabs[i];
137 break;
138 }
139 }
140
141 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
142 goto out;
143
144 WARN_ON(!bslab->slab_ref);
145
146 if (--bslab->slab_ref)
147 goto out;
148
149 kmem_cache_destroy(bslab->slab);
150 bslab->slab = NULL;
151
152out:
153 mutex_unlock(&bio_slab_lock);
154}
155
156unsigned int bvec_nr_vecs(unsigned short idx)
157{
158 return bvec_slabs[idx].nr_vecs;
159}
160
161void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
162{
163 BIO_BUG_ON(idx >= BIOVEC_NR_POOLS);
164
165 if (idx == BIOVEC_MAX_IDX)
166 mempool_free(bv, pool);
167 else {
168 struct biovec_slab *bvs = bvec_slabs + idx;
169
170 kmem_cache_free(bvs->slab, bv);
171 }
172}
173
174struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
175 mempool_t *pool)
176{
177 struct bio_vec *bvl;
178
179
180
181
182 switch (nr) {
183 case 1:
184 *idx = 0;
185 break;
186 case 2 ... 4:
187 *idx = 1;
188 break;
189 case 5 ... 16:
190 *idx = 2;
191 break;
192 case 17 ... 64:
193 *idx = 3;
194 break;
195 case 65 ... 128:
196 *idx = 4;
197 break;
198 case 129 ... BIO_MAX_PAGES:
199 *idx = 5;
200 break;
201 default:
202 return NULL;
203 }
204
205
206
207
208
209 if (*idx == BIOVEC_MAX_IDX) {
210fallback:
211 bvl = mempool_alloc(pool, gfp_mask);
212 } else {
213 struct biovec_slab *bvs = bvec_slabs + *idx;
214 gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO);
215
216
217
218
219
220
221 __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
222
223
224
225
226
227 bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
228 if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) {
229 *idx = BIOVEC_MAX_IDX;
230 goto fallback;
231 }
232 }
233
234 return bvl;
235}
236
237static void __bio_free(struct bio *bio)
238{
239 bio_disassociate_task(bio);
240
241 if (bio_integrity(bio))
242 bio_integrity_free(bio);
243}
244
245static void bio_free(struct bio *bio)
246{
247 struct bio_set *bs = bio->bi_pool;
248 void *p;
249
250 __bio_free(bio);
251
252 if (bs) {
253 if (bio_flagged(bio, BIO_OWNS_VEC))
254 bvec_free(bs->bvec_pool, bio->bi_io_vec, BIO_POOL_IDX(bio));
255
256
257
258
259 p = bio;
260 p -= bs->front_pad;
261
262 mempool_free(p, bs->bio_pool);
263 } else {
264
265 kfree(bio);
266 }
267}
268
269void bio_init(struct bio *bio)
270{
271 memset(bio, 0, sizeof(*bio));
272 atomic_set(&bio->__bi_remaining, 1);
273 atomic_set(&bio->__bi_cnt, 1);
274}
275EXPORT_SYMBOL(bio_init);
276
277
278
279
280
281
282
283
284
285
286
287void bio_reset(struct bio *bio)
288{
289 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
290
291 __bio_free(bio);
292
293 memset(bio, 0, BIO_RESET_BYTES);
294 bio->bi_flags = flags;
295 atomic_set(&bio->__bi_remaining, 1);
296}
297EXPORT_SYMBOL(bio_reset);
298
299static struct bio *__bio_chain_endio(struct bio *bio)
300{
301 struct bio *parent = bio->bi_private;
302
303 if (!parent->bi_error)
304 parent->bi_error = bio->bi_error;
305 bio_put(bio);
306 return parent;
307}
308
309static void bio_chain_endio(struct bio *bio)
310{
311 bio_endio(__bio_chain_endio(bio));
312}
313
314
315
316
317
318static inline void bio_inc_remaining(struct bio *bio)
319{
320 bio_set_flag(bio, BIO_CHAIN);
321 smp_mb__before_atomic();
322 atomic_inc(&bio->__bi_remaining);
323}
324
325
326
327
328
329
330
331
332
333
334
335
336void bio_chain(struct bio *bio, struct bio *parent)
337{
338 BUG_ON(bio->bi_private || bio->bi_end_io);
339
340 bio->bi_private = parent;
341 bio->bi_end_io = bio_chain_endio;
342 bio_inc_remaining(parent);
343}
344EXPORT_SYMBOL(bio_chain);
345
346static void bio_alloc_rescue(struct work_struct *work)
347{
348 struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
349 struct bio *bio;
350
351 while (1) {
352 spin_lock(&bs->rescue_lock);
353 bio = bio_list_pop(&bs->rescue_list);
354 spin_unlock(&bs->rescue_lock);
355
356 if (!bio)
357 break;
358
359 generic_make_request(bio);
360 }
361}
362
363static void punt_bios_to_rescuer(struct bio_set *bs)
364{
365 struct bio_list punt, nopunt;
366 struct bio *bio;
367
368
369
370
371
372
373
374
375
376
377
378
379 bio_list_init(&punt);
380 bio_list_init(&nopunt);
381
382 while ((bio = bio_list_pop(current->bio_list)))
383 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
384
385 *current->bio_list = nopunt;
386
387 spin_lock(&bs->rescue_lock);
388 bio_list_merge(&bs->rescue_list, &punt);
389 spin_unlock(&bs->rescue_lock);
390
391 queue_work(bs->rescue_workqueue, &bs->rescue_work);
392}
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
430{
431 gfp_t saved_gfp = gfp_mask;
432 unsigned front_pad;
433 unsigned inline_vecs;
434 unsigned long idx = BIO_POOL_NONE;
435 struct bio_vec *bvl = NULL;
436 struct bio *bio;
437 void *p;
438
439 if (!bs) {
440 if (nr_iovecs > UIO_MAXIOV)
441 return NULL;
442
443 p = kmalloc(sizeof(struct bio) +
444 nr_iovecs * sizeof(struct bio_vec),
445 gfp_mask);
446 front_pad = 0;
447 inline_vecs = nr_iovecs;
448 } else {
449
450 if (WARN_ON_ONCE(!bs->bvec_pool && nr_iovecs > 0))
451 return NULL;
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473 if (current->bio_list && !bio_list_empty(current->bio_list))
474 gfp_mask &= ~__GFP_DIRECT_RECLAIM;
475
476 p = mempool_alloc(bs->bio_pool, gfp_mask);
477 if (!p && gfp_mask != saved_gfp) {
478 punt_bios_to_rescuer(bs);
479 gfp_mask = saved_gfp;
480 p = mempool_alloc(bs->bio_pool, gfp_mask);
481 }
482
483 front_pad = bs->front_pad;
484 inline_vecs = BIO_INLINE_VECS;
485 }
486
487 if (unlikely(!p))
488 return NULL;
489
490 bio = p + front_pad;
491 bio_init(bio);
492
493 if (nr_iovecs > inline_vecs) {
494 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
495 if (!bvl && gfp_mask != saved_gfp) {
496 punt_bios_to_rescuer(bs);
497 gfp_mask = saved_gfp;
498 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
499 }
500
501 if (unlikely(!bvl))
502 goto err_free;
503
504 bio_set_flag(bio, BIO_OWNS_VEC);
505 } else if (nr_iovecs) {
506 bvl = bio->bi_inline_vecs;
507 }
508
509 bio->bi_pool = bs;
510 bio->bi_flags |= idx << BIO_POOL_OFFSET;
511 bio->bi_max_vecs = nr_iovecs;
512 bio->bi_io_vec = bvl;
513 return bio;
514
515err_free:
516 mempool_free(p, bs->bio_pool);
517 return NULL;
518}
519EXPORT_SYMBOL(bio_alloc_bioset);
520
521void zero_fill_bio(struct bio *bio)
522{
523 unsigned long flags;
524 struct bio_vec bv;
525 struct bvec_iter iter;
526
527 bio_for_each_segment(bv, bio, iter) {
528 char *data = bvec_kmap_irq(&bv, &flags);
529 memset(data, 0, bv.bv_len);
530 flush_dcache_page(bv.bv_page);
531 bvec_kunmap_irq(data, &flags);
532 }
533}
534EXPORT_SYMBOL(zero_fill_bio);
535
536
537
538
539
540
541
542
543
544void bio_put(struct bio *bio)
545{
546 if (!bio_flagged(bio, BIO_REFFED))
547 bio_free(bio);
548 else {
549 BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
550
551
552
553
554 if (atomic_dec_and_test(&bio->__bi_cnt))
555 bio_free(bio);
556 }
557}
558EXPORT_SYMBOL(bio_put);
559
560inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
561{
562 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
563 blk_recount_segments(q, bio);
564
565 return bio->bi_phys_segments;
566}
567EXPORT_SYMBOL(bio_phys_segments);
568
569
570
571
572
573
574
575
576
577
578
579
580void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
581{
582 BUG_ON(bio->bi_pool && BIO_POOL_IDX(bio) != BIO_POOL_NONE);
583
584
585
586
587
588 bio->bi_bdev = bio_src->bi_bdev;
589 bio_set_flag(bio, BIO_CLONED);
590 bio->bi_rw = bio_src->bi_rw;
591 bio->bi_iter = bio_src->bi_iter;
592 bio->bi_io_vec = bio_src->bi_io_vec;
593}
594EXPORT_SYMBOL(__bio_clone_fast);
595
596
597
598
599
600
601
602
603
604struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
605{
606 struct bio *b;
607
608 b = bio_alloc_bioset(gfp_mask, 0, bs);
609 if (!b)
610 return NULL;
611
612 __bio_clone_fast(b, bio);
613
614 if (bio_integrity(bio)) {
615 int ret;
616
617 ret = bio_integrity_clone(b, bio, gfp_mask);
618
619 if (ret < 0) {
620 bio_put(b);
621 return NULL;
622 }
623 }
624
625 return b;
626}
627EXPORT_SYMBOL(bio_clone_fast);
628
629
630
631
632
633
634
635
636
637
638struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
639 struct bio_set *bs)
640{
641 struct bvec_iter iter;
642 struct bio_vec bv;
643 struct bio *bio;
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667 bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
668 if (!bio)
669 return NULL;
670
671 bio->bi_bdev = bio_src->bi_bdev;
672 bio->bi_rw = bio_src->bi_rw;
673 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
674 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
675
676 if (bio->bi_rw & REQ_DISCARD)
677 goto integrity_clone;
678
679 if (bio->bi_rw & REQ_WRITE_SAME) {
680 bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
681 goto integrity_clone;
682 }
683
684 bio_for_each_segment(bv, bio_src, iter)
685 bio->bi_io_vec[bio->bi_vcnt++] = bv;
686
687integrity_clone:
688 if (bio_integrity(bio_src)) {
689 int ret;
690
691 ret = bio_integrity_clone(bio, bio_src, gfp_mask);
692 if (ret < 0) {
693 bio_put(bio);
694 return NULL;
695 }
696 }
697
698 return bio;
699}
700EXPORT_SYMBOL(bio_clone_bioset);
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
718 *page, unsigned int len, unsigned int offset)
719{
720 int retried_segments = 0;
721 struct bio_vec *bvec;
722
723
724
725
726 if (unlikely(bio_flagged(bio, BIO_CLONED)))
727 return 0;
728
729 if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q))
730 return 0;
731
732
733
734
735
736
737 if (bio->bi_vcnt > 0) {
738 struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
739
740 if (page == prev->bv_page &&
741 offset == prev->bv_offset + prev->bv_len) {
742 prev->bv_len += len;
743 bio->bi_iter.bi_size += len;
744 goto done;
745 }
746
747
748
749
750
751 if (bvec_gap_to_prev(q, prev, offset))
752 return 0;
753 }
754
755 if (bio->bi_vcnt >= bio->bi_max_vecs)
756 return 0;
757
758
759
760
761
762 bvec = &bio->bi_io_vec[bio->bi_vcnt];
763 bvec->bv_page = page;
764 bvec->bv_len = len;
765 bvec->bv_offset = offset;
766 bio->bi_vcnt++;
767 bio->bi_phys_segments++;
768 bio->bi_iter.bi_size += len;
769
770
771
772
773
774
775 while (bio->bi_phys_segments > queue_max_segments(q)) {
776
777 if (retried_segments)
778 goto failed;
779
780 retried_segments = 1;
781 blk_recount_segments(q, bio);
782 }
783
784
785 if (bio->bi_vcnt > 1 && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
786 bio_clear_flag(bio, BIO_SEG_VALID);
787
788 done:
789 return len;
790
791 failed:
792 bvec->bv_page = NULL;
793 bvec->bv_len = 0;
794 bvec->bv_offset = 0;
795 bio->bi_vcnt--;
796 bio->bi_iter.bi_size -= len;
797 blk_recount_segments(q, bio);
798 return 0;
799}
800EXPORT_SYMBOL(bio_add_pc_page);
801
802
803
804
805
806
807
808
809
810
811
812int bio_add_page(struct bio *bio, struct page *page,
813 unsigned int len, unsigned int offset)
814{
815 struct bio_vec *bv;
816
817
818
819
820 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
821 return 0;
822
823
824
825
826
827
828 if (bio->bi_vcnt > 0) {
829 bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
830
831 if (page == bv->bv_page &&
832 offset == bv->bv_offset + bv->bv_len) {
833 bv->bv_len += len;
834 goto done;
835 }
836 }
837
838 if (bio->bi_vcnt >= bio->bi_max_vecs)
839 return 0;
840
841 bv = &bio->bi_io_vec[bio->bi_vcnt];
842 bv->bv_page = page;
843 bv->bv_len = len;
844 bv->bv_offset = offset;
845
846 bio->bi_vcnt++;
847done:
848 bio->bi_iter.bi_size += len;
849 return len;
850}
851EXPORT_SYMBOL(bio_add_page);
852
853struct submit_bio_ret {
854 struct completion event;
855 int error;
856};
857
858static void submit_bio_wait_endio(struct bio *bio)
859{
860 struct submit_bio_ret *ret = bio->bi_private;
861
862 ret->error = bio->bi_error;
863 complete(&ret->event);
864}
865
866
867
868
869
870
871
872
873
874int submit_bio_wait(int rw, struct bio *bio)
875{
876 struct submit_bio_ret ret;
877
878 rw |= REQ_SYNC;
879 init_completion(&ret.event);
880 bio->bi_private = &ret;
881 bio->bi_end_io = submit_bio_wait_endio;
882 submit_bio(rw, bio);
883 wait_for_completion_io(&ret.event);
884
885 return ret.error;
886}
887EXPORT_SYMBOL(submit_bio_wait);
888
889
890
891
892
893
894
895
896
897
898
899
900void bio_advance(struct bio *bio, unsigned bytes)
901{
902 if (bio_integrity(bio))
903 bio_integrity_advance(bio, bytes);
904
905 bio_advance_iter(bio, &bio->bi_iter, bytes);
906}
907EXPORT_SYMBOL(bio_advance);
908
909
910
911
912
913
914
915
916
917
918
919int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
920{
921 int i;
922 struct bio_vec *bv;
923
924 bio_for_each_segment_all(bv, bio, i) {
925 bv->bv_page = alloc_page(gfp_mask);
926 if (!bv->bv_page) {
927 while (--bv >= bio->bi_io_vec)
928 __free_page(bv->bv_page);
929 return -ENOMEM;
930 }
931 }
932
933 return 0;
934}
935EXPORT_SYMBOL(bio_alloc_pages);
936
937
938
939
940
941
942
943
944
945
946
947
948
949void bio_copy_data(struct bio *dst, struct bio *src)
950{
951 struct bvec_iter src_iter, dst_iter;
952 struct bio_vec src_bv, dst_bv;
953 void *src_p, *dst_p;
954 unsigned bytes;
955
956 src_iter = src->bi_iter;
957 dst_iter = dst->bi_iter;
958
959 while (1) {
960 if (!src_iter.bi_size) {
961 src = src->bi_next;
962 if (!src)
963 break;
964
965 src_iter = src->bi_iter;
966 }
967
968 if (!dst_iter.bi_size) {
969 dst = dst->bi_next;
970 if (!dst)
971 break;
972
973 dst_iter = dst->bi_iter;
974 }
975
976 src_bv = bio_iter_iovec(src, src_iter);
977 dst_bv = bio_iter_iovec(dst, dst_iter);
978
979 bytes = min(src_bv.bv_len, dst_bv.bv_len);
980
981 src_p = kmap_atomic(src_bv.bv_page);
982 dst_p = kmap_atomic(dst_bv.bv_page);
983
984 memcpy(dst_p + dst_bv.bv_offset,
985 src_p + src_bv.bv_offset,
986 bytes);
987
988 kunmap_atomic(dst_p);
989 kunmap_atomic(src_p);
990
991 bio_advance_iter(src, &src_iter, bytes);
992 bio_advance_iter(dst, &dst_iter, bytes);
993 }
994}
995EXPORT_SYMBOL(bio_copy_data);
996
997struct bio_map_data {
998 int is_our_pages;
999 struct iov_iter iter;
1000 struct iovec iov[];
1001};
1002
1003static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count,
1004 gfp_t gfp_mask)
1005{
1006 if (iov_count > UIO_MAXIOV)
1007 return NULL;
1008
1009 return kmalloc(sizeof(struct bio_map_data) +
1010 sizeof(struct iovec) * iov_count, gfp_mask);
1011}
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021static int bio_copy_from_iter(struct bio *bio, struct iov_iter iter)
1022{
1023 int i;
1024 struct bio_vec *bvec;
1025
1026 bio_for_each_segment_all(bvec, bio, i) {
1027 ssize_t ret;
1028
1029 ret = copy_page_from_iter(bvec->bv_page,
1030 bvec->bv_offset,
1031 bvec->bv_len,
1032 &iter);
1033
1034 if (!iov_iter_count(&iter))
1035 break;
1036
1037 if (ret < bvec->bv_len)
1038 return -EFAULT;
1039 }
1040
1041 return 0;
1042}
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
1053{
1054 int i;
1055 struct bio_vec *bvec;
1056
1057 bio_for_each_segment_all(bvec, bio, i) {
1058 ssize_t ret;
1059
1060 ret = copy_page_to_iter(bvec->bv_page,
1061 bvec->bv_offset,
1062 bvec->bv_len,
1063 &iter);
1064
1065 if (!iov_iter_count(&iter))
1066 break;
1067
1068 if (ret < bvec->bv_len)
1069 return -EFAULT;
1070 }
1071
1072 return 0;
1073}
1074
1075static void bio_free_pages(struct bio *bio)
1076{
1077 struct bio_vec *bvec;
1078 int i;
1079
1080 bio_for_each_segment_all(bvec, bio, i)
1081 __free_page(bvec->bv_page);
1082}
1083
1084
1085
1086
1087
1088
1089
1090
1091int bio_uncopy_user(struct bio *bio)
1092{
1093 struct bio_map_data *bmd = bio->bi_private;
1094 int ret = 0;
1095
1096 if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1097
1098
1099
1100
1101
1102 if (!current->mm)
1103 ret = -EINTR;
1104 else if (bio_data_dir(bio) == READ)
1105 ret = bio_copy_to_iter(bio, bmd->iter);
1106 if (bmd->is_our_pages)
1107 bio_free_pages(bio);
1108 }
1109 kfree(bmd);
1110 bio_put(bio);
1111 return ret;
1112}
1113EXPORT_SYMBOL(bio_uncopy_user);
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126struct bio *bio_copy_user_iov(struct request_queue *q,
1127 struct rq_map_data *map_data,
1128 const struct iov_iter *iter,
1129 gfp_t gfp_mask)
1130{
1131 struct bio_map_data *bmd;
1132 struct page *page;
1133 struct bio *bio;
1134 int i, ret;
1135 int nr_pages = 0;
1136 unsigned int len = iter->count;
1137 unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
1138
1139 for (i = 0; i < iter->nr_segs; i++) {
1140 unsigned long uaddr;
1141 unsigned long end;
1142 unsigned long start;
1143
1144 uaddr = (unsigned long) iter->iov[i].iov_base;
1145 end = (uaddr + iter->iov[i].iov_len + PAGE_SIZE - 1)
1146 >> PAGE_SHIFT;
1147 start = uaddr >> PAGE_SHIFT;
1148
1149
1150
1151
1152 if (end < start)
1153 return ERR_PTR(-EINVAL);
1154
1155 nr_pages += end - start;
1156 }
1157
1158 if (offset)
1159 nr_pages++;
1160
1161 bmd = bio_alloc_map_data(iter->nr_segs, gfp_mask);
1162 if (!bmd)
1163 return ERR_PTR(-ENOMEM);
1164
1165
1166
1167
1168
1169
1170 bmd->is_our_pages = map_data ? 0 : 1;
1171 memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs);
1172 iov_iter_init(&bmd->iter, iter->type, bmd->iov,
1173 iter->nr_segs, iter->count);
1174
1175 ret = -ENOMEM;
1176 bio = bio_kmalloc(gfp_mask, nr_pages);
1177 if (!bio)
1178 goto out_bmd;
1179
1180 if (iter->type & WRITE)
1181 bio->bi_rw |= REQ_WRITE;
1182
1183 ret = 0;
1184
1185 if (map_data) {
1186 nr_pages = 1 << map_data->page_order;
1187 i = map_data->offset / PAGE_SIZE;
1188 }
1189 while (len) {
1190 unsigned int bytes = PAGE_SIZE;
1191
1192 bytes -= offset;
1193
1194 if (bytes > len)
1195 bytes = len;
1196
1197 if (map_data) {
1198 if (i == map_data->nr_entries * nr_pages) {
1199 ret = -ENOMEM;
1200 break;
1201 }
1202
1203 page = map_data->pages[i / nr_pages];
1204 page += (i % nr_pages);
1205
1206 i++;
1207 } else {
1208 page = alloc_page(q->bounce_gfp | gfp_mask);
1209 if (!page) {
1210 ret = -ENOMEM;
1211 break;
1212 }
1213 }
1214
1215 if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
1216 break;
1217
1218 len -= bytes;
1219 offset = 0;
1220 }
1221
1222 if (ret)
1223 goto cleanup;
1224
1225
1226
1227
1228 if (((iter->type & WRITE) && (!map_data || !map_data->null_mapped)) ||
1229 (map_data && map_data->from_user)) {
1230 ret = bio_copy_from_iter(bio, *iter);
1231 if (ret)
1232 goto cleanup;
1233 }
1234
1235 bio->bi_private = bmd;
1236 return bio;
1237cleanup:
1238 if (!map_data)
1239 bio_free_pages(bio);
1240 bio_put(bio);
1241out_bmd:
1242 kfree(bmd);
1243 return ERR_PTR(ret);
1244}
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255struct bio *bio_map_user_iov(struct request_queue *q,
1256 const struct iov_iter *iter,
1257 gfp_t gfp_mask)
1258{
1259 int j;
1260 int nr_pages = 0;
1261 struct page **pages;
1262 struct bio *bio;
1263 int cur_page = 0;
1264 int ret, offset;
1265 struct iov_iter i;
1266 struct iovec iov;
1267
1268 iov_for_each(iov, i, *iter) {
1269 unsigned long uaddr = (unsigned long) iov.iov_base;
1270 unsigned long len = iov.iov_len;
1271 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1272 unsigned long start = uaddr >> PAGE_SHIFT;
1273
1274
1275
1276
1277 if (end < start)
1278 return ERR_PTR(-EINVAL);
1279
1280 nr_pages += end - start;
1281
1282
1283
1284 if (uaddr & queue_dma_alignment(q))
1285 return ERR_PTR(-EINVAL);
1286 }
1287
1288 if (!nr_pages)
1289 return ERR_PTR(-EINVAL);
1290
1291 bio = bio_kmalloc(gfp_mask, nr_pages);
1292 if (!bio)
1293 return ERR_PTR(-ENOMEM);
1294
1295 ret = -ENOMEM;
1296 pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask);
1297 if (!pages)
1298 goto out;
1299
1300 iov_for_each(iov, i, *iter) {
1301 unsigned long uaddr = (unsigned long) iov.iov_base;
1302 unsigned long len = iov.iov_len;
1303 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1304 unsigned long start = uaddr >> PAGE_SHIFT;
1305 const int local_nr_pages = end - start;
1306 const int page_limit = cur_page + local_nr_pages;
1307
1308 ret = get_user_pages_fast(uaddr, local_nr_pages,
1309 (iter->type & WRITE) != WRITE,
1310 &pages[cur_page]);
1311 if (ret < local_nr_pages) {
1312 ret = -EFAULT;
1313 goto out_unmap;
1314 }
1315
1316 offset = offset_in_page(uaddr);
1317 for (j = cur_page; j < page_limit; j++) {
1318 unsigned int bytes = PAGE_SIZE - offset;
1319
1320 if (len <= 0)
1321 break;
1322
1323 if (bytes > len)
1324 bytes = len;
1325
1326
1327
1328
1329 if (bio_add_pc_page(q, bio, pages[j], bytes, offset) <
1330 bytes)
1331 break;
1332
1333 len -= bytes;
1334 offset = 0;
1335 }
1336
1337 cur_page = j;
1338
1339
1340
1341 while (j < page_limit)
1342 put_page(pages[j++]);
1343 }
1344
1345 kfree(pages);
1346
1347
1348
1349
1350 if (iter->type & WRITE)
1351 bio->bi_rw |= REQ_WRITE;
1352
1353 bio_set_flag(bio, BIO_USER_MAPPED);
1354
1355
1356
1357
1358
1359
1360
1361 bio_get(bio);
1362 return bio;
1363
1364 out_unmap:
1365 for (j = 0; j < nr_pages; j++) {
1366 if (!pages[j])
1367 break;
1368 put_page(pages[j]);
1369 }
1370 out:
1371 kfree(pages);
1372 bio_put(bio);
1373 return ERR_PTR(ret);
1374}
1375
1376static void __bio_unmap_user(struct bio *bio)
1377{
1378 struct bio_vec *bvec;
1379 int i;
1380
1381
1382
1383
1384 bio_for_each_segment_all(bvec, bio, i) {
1385 if (bio_data_dir(bio) == READ)
1386 set_page_dirty_lock(bvec->bv_page);
1387
1388 put_page(bvec->bv_page);
1389 }
1390
1391 bio_put(bio);
1392}
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403void bio_unmap_user(struct bio *bio)
1404{
1405 __bio_unmap_user(bio);
1406 bio_put(bio);
1407}
1408EXPORT_SYMBOL(bio_unmap_user);
1409
1410static void bio_map_kern_endio(struct bio *bio)
1411{
1412 bio_put(bio);
1413}
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
1426 gfp_t gfp_mask)
1427{
1428 unsigned long kaddr = (unsigned long)data;
1429 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1430 unsigned long start = kaddr >> PAGE_SHIFT;
1431 const int nr_pages = end - start;
1432 int offset, i;
1433 struct bio *bio;
1434
1435 bio = bio_kmalloc(gfp_mask, nr_pages);
1436 if (!bio)
1437 return ERR_PTR(-ENOMEM);
1438
1439 offset = offset_in_page(kaddr);
1440 for (i = 0; i < nr_pages; i++) {
1441 unsigned int bytes = PAGE_SIZE - offset;
1442
1443 if (len <= 0)
1444 break;
1445
1446 if (bytes > len)
1447 bytes = len;
1448
1449 if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
1450 offset) < bytes) {
1451
1452 bio_put(bio);
1453 return ERR_PTR(-EINVAL);
1454 }
1455
1456 data += bytes;
1457 len -= bytes;
1458 offset = 0;
1459 }
1460
1461 bio->bi_end_io = bio_map_kern_endio;
1462 return bio;
1463}
1464EXPORT_SYMBOL(bio_map_kern);
1465
1466static void bio_copy_kern_endio(struct bio *bio)
1467{
1468 bio_free_pages(bio);
1469 bio_put(bio);
1470}
1471
1472static void bio_copy_kern_endio_read(struct bio *bio)
1473{
1474 char *p = bio->bi_private;
1475 struct bio_vec *bvec;
1476 int i;
1477
1478 bio_for_each_segment_all(bvec, bio, i) {
1479 memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
1480 p += bvec->bv_len;
1481 }
1482
1483 bio_copy_kern_endio(bio);
1484}
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
1498 gfp_t gfp_mask, int reading)
1499{
1500 unsigned long kaddr = (unsigned long)data;
1501 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1502 unsigned long start = kaddr >> PAGE_SHIFT;
1503 struct bio *bio;
1504 void *p = data;
1505 int nr_pages = 0;
1506
1507
1508
1509
1510 if (end < start)
1511 return ERR_PTR(-EINVAL);
1512
1513 nr_pages = end - start;
1514 bio = bio_kmalloc(gfp_mask, nr_pages);
1515 if (!bio)
1516 return ERR_PTR(-ENOMEM);
1517
1518 while (len) {
1519 struct page *page;
1520 unsigned int bytes = PAGE_SIZE;
1521
1522 if (bytes > len)
1523 bytes = len;
1524
1525 page = alloc_page(q->bounce_gfp | gfp_mask);
1526 if (!page)
1527 goto cleanup;
1528
1529 if (!reading)
1530 memcpy(page_address(page), p, bytes);
1531
1532 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
1533 break;
1534
1535 len -= bytes;
1536 p += bytes;
1537 }
1538
1539 if (reading) {
1540 bio->bi_end_io = bio_copy_kern_endio_read;
1541 bio->bi_private = data;
1542 } else {
1543 bio->bi_end_io = bio_copy_kern_endio;
1544 bio->bi_rw |= REQ_WRITE;
1545 }
1546
1547 return bio;
1548
1549cleanup:
1550 bio_free_pages(bio);
1551 bio_put(bio);
1552 return ERR_PTR(-ENOMEM);
1553}
1554EXPORT_SYMBOL(bio_copy_kern);
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585void bio_set_pages_dirty(struct bio *bio)
1586{
1587 struct bio_vec *bvec;
1588 int i;
1589
1590 bio_for_each_segment_all(bvec, bio, i) {
1591 struct page *page = bvec->bv_page;
1592
1593 if (page && !PageCompound(page))
1594 set_page_dirty_lock(page);
1595 }
1596}
1597
1598static void bio_release_pages(struct bio *bio)
1599{
1600 struct bio_vec *bvec;
1601 int i;
1602
1603 bio_for_each_segment_all(bvec, bio, i) {
1604 struct page *page = bvec->bv_page;
1605
1606 if (page)
1607 put_page(page);
1608 }
1609}
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622static void bio_dirty_fn(struct work_struct *work);
1623
1624static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1625static DEFINE_SPINLOCK(bio_dirty_lock);
1626static struct bio *bio_dirty_list;
1627
1628
1629
1630
1631static void bio_dirty_fn(struct work_struct *work)
1632{
1633 unsigned long flags;
1634 struct bio *bio;
1635
1636 spin_lock_irqsave(&bio_dirty_lock, flags);
1637 bio = bio_dirty_list;
1638 bio_dirty_list = NULL;
1639 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1640
1641 while (bio) {
1642 struct bio *next = bio->bi_private;
1643
1644 bio_set_pages_dirty(bio);
1645 bio_release_pages(bio);
1646 bio_put(bio);
1647 bio = next;
1648 }
1649}
1650
1651void bio_check_pages_dirty(struct bio *bio)
1652{
1653 struct bio_vec *bvec;
1654 int nr_clean_pages = 0;
1655 int i;
1656
1657 bio_for_each_segment_all(bvec, bio, i) {
1658 struct page *page = bvec->bv_page;
1659
1660 if (PageDirty(page) || PageCompound(page)) {
1661 put_page(page);
1662 bvec->bv_page = NULL;
1663 } else {
1664 nr_clean_pages++;
1665 }
1666 }
1667
1668 if (nr_clean_pages) {
1669 unsigned long flags;
1670
1671 spin_lock_irqsave(&bio_dirty_lock, flags);
1672 bio->bi_private = bio_dirty_list;
1673 bio_dirty_list = bio;
1674 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1675 schedule_work(&bio_dirty_work);
1676 } else {
1677 bio_put(bio);
1678 }
1679}
1680
1681void generic_start_io_acct(int rw, unsigned long sectors,
1682 struct hd_struct *part)
1683{
1684 int cpu = part_stat_lock();
1685
1686 part_round_stats(cpu, part);
1687 part_stat_inc(cpu, part, ios[rw]);
1688 part_stat_add(cpu, part, sectors[rw], sectors);
1689 part_inc_in_flight(part, rw);
1690
1691 part_stat_unlock();
1692}
1693EXPORT_SYMBOL(generic_start_io_acct);
1694
1695void generic_end_io_acct(int rw, struct hd_struct *part,
1696 unsigned long start_time)
1697{
1698 unsigned long duration = jiffies - start_time;
1699 int cpu = part_stat_lock();
1700
1701 part_stat_add(cpu, part, ticks[rw], duration);
1702 part_round_stats(cpu, part);
1703 part_dec_in_flight(part, rw);
1704
1705 part_stat_unlock();
1706}
1707EXPORT_SYMBOL(generic_end_io_acct);
1708
1709#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1710void bio_flush_dcache_pages(struct bio *bi)
1711{
1712 struct bio_vec bvec;
1713 struct bvec_iter iter;
1714
1715 bio_for_each_segment(bvec, bi, iter)
1716 flush_dcache_page(bvec.bv_page);
1717}
1718EXPORT_SYMBOL(bio_flush_dcache_pages);
1719#endif
1720
1721static inline bool bio_remaining_done(struct bio *bio)
1722{
1723
1724
1725
1726
1727 if (!bio_flagged(bio, BIO_CHAIN))
1728 return true;
1729
1730 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1731
1732 if (atomic_dec_and_test(&bio->__bi_remaining)) {
1733 bio_clear_flag(bio, BIO_CHAIN);
1734 return true;
1735 }
1736
1737 return false;
1738}
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749void bio_endio(struct bio *bio)
1750{
1751again:
1752 if (!bio_remaining_done(bio))
1753 return;
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763 if (bio->bi_end_io == bio_chain_endio) {
1764 bio = __bio_chain_endio(bio);
1765 goto again;
1766 }
1767
1768 if (bio->bi_end_io)
1769 bio->bi_end_io(bio);
1770}
1771EXPORT_SYMBOL(bio_endio);
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787struct bio *bio_split(struct bio *bio, int sectors,
1788 gfp_t gfp, struct bio_set *bs)
1789{
1790 struct bio *split = NULL;
1791
1792 BUG_ON(sectors <= 0);
1793 BUG_ON(sectors >= bio_sectors(bio));
1794
1795
1796
1797
1798
1799 if (bio->bi_rw & REQ_DISCARD)
1800 split = bio_clone_bioset(bio, gfp, bs);
1801 else
1802 split = bio_clone_fast(bio, gfp, bs);
1803
1804 if (!split)
1805 return NULL;
1806
1807 split->bi_iter.bi_size = sectors << 9;
1808
1809 if (bio_integrity(split))
1810 bio_integrity_trim(split, 0, sectors);
1811
1812 bio_advance(bio, split->bi_iter.bi_size);
1813
1814 return split;
1815}
1816EXPORT_SYMBOL(bio_split);
1817
1818
1819
1820
1821
1822
1823
1824void bio_trim(struct bio *bio, int offset, int size)
1825{
1826
1827
1828
1829
1830 size <<= 9;
1831 if (offset == 0 && size == bio->bi_iter.bi_size)
1832 return;
1833
1834 bio_clear_flag(bio, BIO_SEG_VALID);
1835
1836 bio_advance(bio, offset << 9);
1837
1838 bio->bi_iter.bi_size = size;
1839}
1840EXPORT_SYMBOL_GPL(bio_trim);
1841
1842
1843
1844
1845
1846mempool_t *biovec_create_pool(int pool_entries)
1847{
1848 struct biovec_slab *bp = bvec_slabs + BIOVEC_MAX_IDX;
1849
1850 return mempool_create_slab_pool(pool_entries, bp->slab);
1851}
1852
1853void bioset_free(struct bio_set *bs)
1854{
1855 if (bs->rescue_workqueue)
1856 destroy_workqueue(bs->rescue_workqueue);
1857
1858 if (bs->bio_pool)
1859 mempool_destroy(bs->bio_pool);
1860
1861 if (bs->bvec_pool)
1862 mempool_destroy(bs->bvec_pool);
1863
1864 bioset_integrity_free(bs);
1865 bio_put_slab(bs);
1866
1867 kfree(bs);
1868}
1869EXPORT_SYMBOL(bioset_free);
1870
1871static struct bio_set *__bioset_create(unsigned int pool_size,
1872 unsigned int front_pad,
1873 bool create_bvec_pool)
1874{
1875 unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1876 struct bio_set *bs;
1877
1878 bs = kzalloc(sizeof(*bs), GFP_KERNEL);
1879 if (!bs)
1880 return NULL;
1881
1882 bs->front_pad = front_pad;
1883
1884 spin_lock_init(&bs->rescue_lock);
1885 bio_list_init(&bs->rescue_list);
1886 INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1887
1888 bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
1889 if (!bs->bio_slab) {
1890 kfree(bs);
1891 return NULL;
1892 }
1893
1894 bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab);
1895 if (!bs->bio_pool)
1896 goto bad;
1897
1898 if (create_bvec_pool) {
1899 bs->bvec_pool = biovec_create_pool(pool_size);
1900 if (!bs->bvec_pool)
1901 goto bad;
1902 }
1903
1904 bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
1905 if (!bs->rescue_workqueue)
1906 goto bad;
1907
1908 return bs;
1909bad:
1910 bioset_free(bs);
1911 return NULL;
1912}
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
1928{
1929 return __bioset_create(pool_size, front_pad, true);
1930}
1931EXPORT_SYMBOL(bioset_create);
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942struct bio_set *bioset_create_nobvec(unsigned int pool_size, unsigned int front_pad)
1943{
1944 return __bioset_create(pool_size, front_pad, false);
1945}
1946EXPORT_SYMBOL(bioset_create_nobvec);
1947
1948#ifdef CONFIG_BLK_CGROUP
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
1963{
1964 if (unlikely(bio->bi_css))
1965 return -EBUSY;
1966 css_get(blkcg_css);
1967 bio->bi_css = blkcg_css;
1968 return 0;
1969}
1970EXPORT_SYMBOL_GPL(bio_associate_blkcg);
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985int bio_associate_current(struct bio *bio)
1986{
1987 struct io_context *ioc;
1988
1989 if (bio->bi_css)
1990 return -EBUSY;
1991
1992 ioc = current->io_context;
1993 if (!ioc)
1994 return -ENOENT;
1995
1996 get_io_context_active(ioc);
1997 bio->bi_ioc = ioc;
1998 bio->bi_css = task_get_css(current, io_cgrp_id);
1999 return 0;
2000}
2001EXPORT_SYMBOL_GPL(bio_associate_current);
2002
2003
2004
2005
2006
2007void bio_disassociate_task(struct bio *bio)
2008{
2009 if (bio->bi_ioc) {
2010 put_io_context(bio->bi_ioc);
2011 bio->bi_ioc = NULL;
2012 }
2013 if (bio->bi_css) {
2014 css_put(bio->bi_css);
2015 bio->bi_css = NULL;
2016 }
2017}
2018
2019#endif
2020
2021static void __init biovec_init_slabs(void)
2022{
2023 int i;
2024
2025 for (i = 0; i < BIOVEC_NR_POOLS; i++) {
2026 int size;
2027 struct biovec_slab *bvs = bvec_slabs + i;
2028
2029 if (bvs->nr_vecs <= BIO_INLINE_VECS) {
2030 bvs->slab = NULL;
2031 continue;
2032 }
2033
2034 size = bvs->nr_vecs * sizeof(struct bio_vec);
2035 bvs->slab = kmem_cache_create(bvs->name, size, 0,
2036 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2037 }
2038}
2039
2040static int __init init_bio(void)
2041{
2042 bio_slab_max = 2;
2043 bio_slab_nr = 0;
2044 bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL);
2045 if (!bio_slabs)
2046 panic("bio: can't allocate bios\n");
2047
2048 bio_integrity_init();
2049 biovec_init_slabs();
2050
2051 fs_bio_set = bioset_create(BIO_POOL_SIZE, 0);
2052 if (!fs_bio_set)
2053 panic("bio: can't allocate bios\n");
2054
2055 if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE))
2056 panic("bio: can't create integrity pool\n");
2057
2058 return 0;
2059}
2060subsys_initcall(init_bio);
2061