1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/bio.h>
21#include <linux/blkdev.h>
22#include <linux/uio.h>
23#include <linux/iocontext.h>
24#include <linux/slab.h>
25#include <linux/init.h>
26#include <linux/kernel.h>
27#include <linux/export.h>
28#include <linux/mempool.h>
29#include <linux/workqueue.h>
30#include <linux/cgroup.h>
31
32#include <trace/events/block.h>
33
34
35
36
37
38#define BIO_INLINE_VECS 4
39
40
41
42
43
44
45#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
46static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = {
47 BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
48};
49#undef BV
50
51
52
53
54
55struct bio_set *fs_bio_set;
56EXPORT_SYMBOL(fs_bio_set);
57
58
59
60
61struct bio_slab {
62 struct kmem_cache *slab;
63 unsigned int slab_ref;
64 unsigned int slab_size;
65 char name[8];
66};
67static DEFINE_MUTEX(bio_slab_lock);
68static struct bio_slab *bio_slabs;
69static unsigned int bio_slab_nr, bio_slab_max;
70
71static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
72{
73 unsigned int sz = sizeof(struct bio) + extra_size;
74 struct kmem_cache *slab = NULL;
75 struct bio_slab *bslab, *new_bio_slabs;
76 unsigned int new_bio_slab_max;
77 unsigned int i, entry = -1;
78
79 mutex_lock(&bio_slab_lock);
80
81 i = 0;
82 while (i < bio_slab_nr) {
83 bslab = &bio_slabs[i];
84
85 if (!bslab->slab && entry == -1)
86 entry = i;
87 else if (bslab->slab_size == sz) {
88 slab = bslab->slab;
89 bslab->slab_ref++;
90 break;
91 }
92 i++;
93 }
94
95 if (slab)
96 goto out_unlock;
97
98 if (bio_slab_nr == bio_slab_max && entry == -1) {
99 new_bio_slab_max = bio_slab_max << 1;
100 new_bio_slabs = krealloc(bio_slabs,
101 new_bio_slab_max * sizeof(struct bio_slab),
102 GFP_KERNEL);
103 if (!new_bio_slabs)
104 goto out_unlock;
105 bio_slab_max = new_bio_slab_max;
106 bio_slabs = new_bio_slabs;
107 }
108 if (entry == -1)
109 entry = bio_slab_nr++;
110
111 bslab = &bio_slabs[entry];
112
113 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
114 slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN,
115 SLAB_HWCACHE_ALIGN, NULL);
116 if (!slab)
117 goto out_unlock;
118
119 bslab->slab = slab;
120 bslab->slab_ref = 1;
121 bslab->slab_size = sz;
122out_unlock:
123 mutex_unlock(&bio_slab_lock);
124 return slab;
125}
126
127static void bio_put_slab(struct bio_set *bs)
128{
129 struct bio_slab *bslab = NULL;
130 unsigned int i;
131
132 mutex_lock(&bio_slab_lock);
133
134 for (i = 0; i < bio_slab_nr; i++) {
135 if (bs->bio_slab == bio_slabs[i].slab) {
136 bslab = &bio_slabs[i];
137 break;
138 }
139 }
140
141 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
142 goto out;
143
144 WARN_ON(!bslab->slab_ref);
145
146 if (--bslab->slab_ref)
147 goto out;
148
149 kmem_cache_destroy(bslab->slab);
150 bslab->slab = NULL;
151
152out:
153 mutex_unlock(&bio_slab_lock);
154}
155
156unsigned int bvec_nr_vecs(unsigned short idx)
157{
158 return bvec_slabs[idx].nr_vecs;
159}
160
161void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
162{
163 if (!idx)
164 return;
165 idx--;
166
167 BIO_BUG_ON(idx >= BVEC_POOL_NR);
168
169 if (idx == BVEC_POOL_MAX) {
170 mempool_free(bv, pool);
171 } else {
172 struct biovec_slab *bvs = bvec_slabs + idx;
173
174 kmem_cache_free(bvs->slab, bv);
175 }
176}
177
178struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
179 mempool_t *pool)
180{
181 struct bio_vec *bvl;
182
183
184
185
186 switch (nr) {
187 case 1:
188 *idx = 0;
189 break;
190 case 2 ... 4:
191 *idx = 1;
192 break;
193 case 5 ... 16:
194 *idx = 2;
195 break;
196 case 17 ... 64:
197 *idx = 3;
198 break;
199 case 65 ... 128:
200 *idx = 4;
201 break;
202 case 129 ... BIO_MAX_PAGES:
203 *idx = 5;
204 break;
205 default:
206 return NULL;
207 }
208
209
210
211
212
213 if (*idx == BVEC_POOL_MAX) {
214fallback:
215 bvl = mempool_alloc(pool, gfp_mask);
216 } else {
217 struct biovec_slab *bvs = bvec_slabs + *idx;
218 gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO);
219
220
221
222
223
224
225 __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
226
227
228
229
230
231 bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
232 if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) {
233 *idx = BVEC_POOL_MAX;
234 goto fallback;
235 }
236 }
237
238 (*idx)++;
239 return bvl;
240}
241
242static void __bio_free(struct bio *bio)
243{
244 bio_disassociate_task(bio);
245
246 if (bio_integrity(bio))
247 bio_integrity_free(bio);
248}
249
250static void bio_free(struct bio *bio)
251{
252 struct bio_set *bs = bio->bi_pool;
253 void *p;
254
255 __bio_free(bio);
256
257 if (bs) {
258 bvec_free(bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
259
260
261
262
263 p = bio;
264 p -= bs->front_pad;
265
266 mempool_free(p, bs->bio_pool);
267 } else {
268
269 kfree(bio);
270 }
271}
272
273void bio_init(struct bio *bio)
274{
275 memset(bio, 0, sizeof(*bio));
276 atomic_set(&bio->__bi_remaining, 1);
277 atomic_set(&bio->__bi_cnt, 1);
278}
279EXPORT_SYMBOL(bio_init);
280
281
282
283
284
285
286
287
288
289
290
291void bio_reset(struct bio *bio)
292{
293 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
294
295 __bio_free(bio);
296
297 memset(bio, 0, BIO_RESET_BYTES);
298 bio->bi_flags = flags;
299 atomic_set(&bio->__bi_remaining, 1);
300}
301EXPORT_SYMBOL(bio_reset);
302
303static struct bio *__bio_chain_endio(struct bio *bio)
304{
305 struct bio *parent = bio->bi_private;
306
307 if (!parent->bi_error)
308 parent->bi_error = bio->bi_error;
309 bio_put(bio);
310 return parent;
311}
312
313static void bio_chain_endio(struct bio *bio)
314{
315 bio_endio(__bio_chain_endio(bio));
316}
317
318
319
320
321
322
323
324
325
326
327
328
329void bio_chain(struct bio *bio, struct bio *parent)
330{
331 BUG_ON(bio->bi_private || bio->bi_end_io);
332
333 bio->bi_private = parent;
334 bio->bi_end_io = bio_chain_endio;
335 bio_inc_remaining(parent);
336}
337EXPORT_SYMBOL(bio_chain);
338
339static void bio_alloc_rescue(struct work_struct *work)
340{
341 struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
342 struct bio *bio;
343
344 while (1) {
345 spin_lock(&bs->rescue_lock);
346 bio = bio_list_pop(&bs->rescue_list);
347 spin_unlock(&bs->rescue_lock);
348
349 if (!bio)
350 break;
351
352 generic_make_request(bio);
353 }
354}
355
356static void punt_bios_to_rescuer(struct bio_set *bs)
357{
358 struct bio_list punt, nopunt;
359 struct bio *bio;
360
361
362
363
364
365
366
367
368
369
370
371
372 bio_list_init(&punt);
373 bio_list_init(&nopunt);
374
375 while ((bio = bio_list_pop(current->bio_list)))
376 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
377
378 *current->bio_list = nopunt;
379
380 spin_lock(&bs->rescue_lock);
381 bio_list_merge(&bs->rescue_list, &punt);
382 spin_unlock(&bs->rescue_lock);
383
384 queue_work(bs->rescue_workqueue, &bs->rescue_work);
385}
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
423{
424 gfp_t saved_gfp = gfp_mask;
425 unsigned front_pad;
426 unsigned inline_vecs;
427 struct bio_vec *bvl = NULL;
428 struct bio *bio;
429 void *p;
430
431 if (!bs) {
432 if (nr_iovecs > UIO_MAXIOV)
433 return NULL;
434
435 p = kmalloc(sizeof(struct bio) +
436 nr_iovecs * sizeof(struct bio_vec),
437 gfp_mask);
438 front_pad = 0;
439 inline_vecs = nr_iovecs;
440 } else {
441
442 if (WARN_ON_ONCE(!bs->bvec_pool && nr_iovecs > 0))
443 return NULL;
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465 if (current->bio_list && !bio_list_empty(current->bio_list))
466 gfp_mask &= ~__GFP_DIRECT_RECLAIM;
467
468 p = mempool_alloc(bs->bio_pool, gfp_mask);
469 if (!p && gfp_mask != saved_gfp) {
470 punt_bios_to_rescuer(bs);
471 gfp_mask = saved_gfp;
472 p = mempool_alloc(bs->bio_pool, gfp_mask);
473 }
474
475 front_pad = bs->front_pad;
476 inline_vecs = BIO_INLINE_VECS;
477 }
478
479 if (unlikely(!p))
480 return NULL;
481
482 bio = p + front_pad;
483 bio_init(bio);
484
485 if (nr_iovecs > inline_vecs) {
486 unsigned long idx = 0;
487
488 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
489 if (!bvl && gfp_mask != saved_gfp) {
490 punt_bios_to_rescuer(bs);
491 gfp_mask = saved_gfp;
492 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
493 }
494
495 if (unlikely(!bvl))
496 goto err_free;
497
498 bio->bi_flags |= idx << BVEC_POOL_OFFSET;
499 } else if (nr_iovecs) {
500 bvl = bio->bi_inline_vecs;
501 }
502
503 bio->bi_pool = bs;
504 bio->bi_max_vecs = nr_iovecs;
505 bio->bi_io_vec = bvl;
506 return bio;
507
508err_free:
509 mempool_free(p, bs->bio_pool);
510 return NULL;
511}
512EXPORT_SYMBOL(bio_alloc_bioset);
513
514void zero_fill_bio(struct bio *bio)
515{
516 unsigned long flags;
517 struct bio_vec bv;
518 struct bvec_iter iter;
519
520 bio_for_each_segment(bv, bio, iter) {
521 char *data = bvec_kmap_irq(&bv, &flags);
522 memset(data, 0, bv.bv_len);
523 flush_dcache_page(bv.bv_page);
524 bvec_kunmap_irq(data, &flags);
525 }
526}
527EXPORT_SYMBOL(zero_fill_bio);
528
529
530
531
532
533
534
535
536
537void bio_put(struct bio *bio)
538{
539 if (!bio_flagged(bio, BIO_REFFED))
540 bio_free(bio);
541 else {
542 BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
543
544
545
546
547 if (atomic_dec_and_test(&bio->__bi_cnt))
548 bio_free(bio);
549 }
550}
551EXPORT_SYMBOL(bio_put);
552
553inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
554{
555 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
556 blk_recount_segments(q, bio);
557
558 return bio->bi_phys_segments;
559}
560EXPORT_SYMBOL(bio_phys_segments);
561
562
563
564
565
566
567
568
569
570
571
572
573void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
574{
575 BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
576
577
578
579
580
581 bio->bi_bdev = bio_src->bi_bdev;
582 bio_set_flag(bio, BIO_CLONED);
583 bio->bi_opf = bio_src->bi_opf;
584 bio->bi_iter = bio_src->bi_iter;
585 bio->bi_io_vec = bio_src->bi_io_vec;
586
587 bio_clone_blkcg_association(bio, bio_src);
588}
589EXPORT_SYMBOL(__bio_clone_fast);
590
591
592
593
594
595
596
597
598
599struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
600{
601 struct bio *b;
602
603 b = bio_alloc_bioset(gfp_mask, 0, bs);
604 if (!b)
605 return NULL;
606
607 __bio_clone_fast(b, bio);
608
609 if (bio_integrity(bio)) {
610 int ret;
611
612 ret = bio_integrity_clone(b, bio, gfp_mask);
613
614 if (ret < 0) {
615 bio_put(b);
616 return NULL;
617 }
618 }
619
620 return b;
621}
622EXPORT_SYMBOL(bio_clone_fast);
623
624
625
626
627
628
629
630
631
632
633struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
634 struct bio_set *bs)
635{
636 struct bvec_iter iter;
637 struct bio_vec bv;
638 struct bio *bio;
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662 bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
663 if (!bio)
664 return NULL;
665 bio->bi_bdev = bio_src->bi_bdev;
666 bio->bi_opf = bio_src->bi_opf;
667 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
668 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
669
670 switch (bio_op(bio)) {
671 case REQ_OP_DISCARD:
672 case REQ_OP_SECURE_ERASE:
673 break;
674 case REQ_OP_WRITE_SAME:
675 bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
676 break;
677 default:
678 bio_for_each_segment(bv, bio_src, iter)
679 bio->bi_io_vec[bio->bi_vcnt++] = bv;
680 break;
681 }
682
683 if (bio_integrity(bio_src)) {
684 int ret;
685
686 ret = bio_integrity_clone(bio, bio_src, gfp_mask);
687 if (ret < 0) {
688 bio_put(bio);
689 return NULL;
690 }
691 }
692
693 bio_clone_blkcg_association(bio, bio_src);
694
695 return bio;
696}
697EXPORT_SYMBOL(bio_clone_bioset);
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
715 *page, unsigned int len, unsigned int offset)
716{
717 int retried_segments = 0;
718 struct bio_vec *bvec;
719
720
721
722
723 if (unlikely(bio_flagged(bio, BIO_CLONED)))
724 return 0;
725
726 if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q))
727 return 0;
728
729
730
731
732
733
734 if (bio->bi_vcnt > 0) {
735 struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
736
737 if (page == prev->bv_page &&
738 offset == prev->bv_offset + prev->bv_len) {
739 prev->bv_len += len;
740 bio->bi_iter.bi_size += len;
741 goto done;
742 }
743
744
745
746
747
748 if (bvec_gap_to_prev(q, prev, offset))
749 return 0;
750 }
751
752 if (bio->bi_vcnt >= bio->bi_max_vecs)
753 return 0;
754
755
756
757
758
759 bvec = &bio->bi_io_vec[bio->bi_vcnt];
760 bvec->bv_page = page;
761 bvec->bv_len = len;
762 bvec->bv_offset = offset;
763 bio->bi_vcnt++;
764 bio->bi_phys_segments++;
765 bio->bi_iter.bi_size += len;
766
767
768
769
770
771
772 while (bio->bi_phys_segments > queue_max_segments(q)) {
773
774 if (retried_segments)
775 goto failed;
776
777 retried_segments = 1;
778 blk_recount_segments(q, bio);
779 }
780
781
782 if (bio->bi_vcnt > 1 && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
783 bio_clear_flag(bio, BIO_SEG_VALID);
784
785 done:
786 return len;
787
788 failed:
789 bvec->bv_page = NULL;
790 bvec->bv_len = 0;
791 bvec->bv_offset = 0;
792 bio->bi_vcnt--;
793 bio->bi_iter.bi_size -= len;
794 blk_recount_segments(q, bio);
795 return 0;
796}
797EXPORT_SYMBOL(bio_add_pc_page);
798
799
800
801
802
803
804
805
806
807
808
809int bio_add_page(struct bio *bio, struct page *page,
810 unsigned int len, unsigned int offset)
811{
812 struct bio_vec *bv;
813
814
815
816
817 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
818 return 0;
819
820
821
822
823
824
825 if (bio->bi_vcnt > 0) {
826 bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
827
828 if (page == bv->bv_page &&
829 offset == bv->bv_offset + bv->bv_len) {
830 bv->bv_len += len;
831 goto done;
832 }
833 }
834
835 if (bio->bi_vcnt >= bio->bi_max_vecs)
836 return 0;
837
838 bv = &bio->bi_io_vec[bio->bi_vcnt];
839 bv->bv_page = page;
840 bv->bv_len = len;
841 bv->bv_offset = offset;
842
843 bio->bi_vcnt++;
844done:
845 bio->bi_iter.bi_size += len;
846 return len;
847}
848EXPORT_SYMBOL(bio_add_page);
849
850struct submit_bio_ret {
851 struct completion event;
852 int error;
853};
854
855static void submit_bio_wait_endio(struct bio *bio)
856{
857 struct submit_bio_ret *ret = bio->bi_private;
858
859 ret->error = bio->bi_error;
860 complete(&ret->event);
861}
862
863
864
865
866
867
868
869
870int submit_bio_wait(struct bio *bio)
871{
872 struct submit_bio_ret ret;
873
874 init_completion(&ret.event);
875 bio->bi_private = &ret;
876 bio->bi_end_io = submit_bio_wait_endio;
877 bio->bi_opf |= REQ_SYNC;
878 submit_bio(bio);
879 wait_for_completion_io(&ret.event);
880
881 return ret.error;
882}
883EXPORT_SYMBOL(submit_bio_wait);
884
885
886
887
888
889
890
891
892
893
894
895
896void bio_advance(struct bio *bio, unsigned bytes)
897{
898 if (bio_integrity(bio))
899 bio_integrity_advance(bio, bytes);
900
901 bio_advance_iter(bio, &bio->bi_iter, bytes);
902}
903EXPORT_SYMBOL(bio_advance);
904
905
906
907
908
909
910
911
912
913
914
915int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
916{
917 int i;
918 struct bio_vec *bv;
919
920 bio_for_each_segment_all(bv, bio, i) {
921 bv->bv_page = alloc_page(gfp_mask);
922 if (!bv->bv_page) {
923 while (--bv >= bio->bi_io_vec)
924 __free_page(bv->bv_page);
925 return -ENOMEM;
926 }
927 }
928
929 return 0;
930}
931EXPORT_SYMBOL(bio_alloc_pages);
932
933
934
935
936
937
938
939
940
941
942
943
944
945void bio_copy_data(struct bio *dst, struct bio *src)
946{
947 struct bvec_iter src_iter, dst_iter;
948 struct bio_vec src_bv, dst_bv;
949 void *src_p, *dst_p;
950 unsigned bytes;
951
952 src_iter = src->bi_iter;
953 dst_iter = dst->bi_iter;
954
955 while (1) {
956 if (!src_iter.bi_size) {
957 src = src->bi_next;
958 if (!src)
959 break;
960
961 src_iter = src->bi_iter;
962 }
963
964 if (!dst_iter.bi_size) {
965 dst = dst->bi_next;
966 if (!dst)
967 break;
968
969 dst_iter = dst->bi_iter;
970 }
971
972 src_bv = bio_iter_iovec(src, src_iter);
973 dst_bv = bio_iter_iovec(dst, dst_iter);
974
975 bytes = min(src_bv.bv_len, dst_bv.bv_len);
976
977 src_p = kmap_atomic(src_bv.bv_page);
978 dst_p = kmap_atomic(dst_bv.bv_page);
979
980 memcpy(dst_p + dst_bv.bv_offset,
981 src_p + src_bv.bv_offset,
982 bytes);
983
984 kunmap_atomic(dst_p);
985 kunmap_atomic(src_p);
986
987 bio_advance_iter(src, &src_iter, bytes);
988 bio_advance_iter(dst, &dst_iter, bytes);
989 }
990}
991EXPORT_SYMBOL(bio_copy_data);
992
993struct bio_map_data {
994 int is_our_pages;
995 struct iov_iter iter;
996 struct iovec iov[];
997};
998
999static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count,
1000 gfp_t gfp_mask)
1001{
1002 if (iov_count > UIO_MAXIOV)
1003 return NULL;
1004
1005 return kmalloc(sizeof(struct bio_map_data) +
1006 sizeof(struct iovec) * iov_count, gfp_mask);
1007}
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017static int bio_copy_from_iter(struct bio *bio, struct iov_iter iter)
1018{
1019 int i;
1020 struct bio_vec *bvec;
1021
1022 bio_for_each_segment_all(bvec, bio, i) {
1023 ssize_t ret;
1024
1025 ret = copy_page_from_iter(bvec->bv_page,
1026 bvec->bv_offset,
1027 bvec->bv_len,
1028 &iter);
1029
1030 if (!iov_iter_count(&iter))
1031 break;
1032
1033 if (ret < bvec->bv_len)
1034 return -EFAULT;
1035 }
1036
1037 return 0;
1038}
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
1049{
1050 int i;
1051 struct bio_vec *bvec;
1052
1053 bio_for_each_segment_all(bvec, bio, i) {
1054 ssize_t ret;
1055
1056 ret = copy_page_to_iter(bvec->bv_page,
1057 bvec->bv_offset,
1058 bvec->bv_len,
1059 &iter);
1060
1061 if (!iov_iter_count(&iter))
1062 break;
1063
1064 if (ret < bvec->bv_len)
1065 return -EFAULT;
1066 }
1067
1068 return 0;
1069}
1070
1071void bio_free_pages(struct bio *bio)
1072{
1073 struct bio_vec *bvec;
1074 int i;
1075
1076 bio_for_each_segment_all(bvec, bio, i)
1077 __free_page(bvec->bv_page);
1078}
1079EXPORT_SYMBOL(bio_free_pages);
1080
1081
1082
1083
1084
1085
1086
1087
1088int bio_uncopy_user(struct bio *bio)
1089{
1090 struct bio_map_data *bmd = bio->bi_private;
1091 int ret = 0;
1092
1093 if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1094
1095
1096
1097
1098
1099 if (!current->mm)
1100 ret = -EINTR;
1101 else if (bio_data_dir(bio) == READ)
1102 ret = bio_copy_to_iter(bio, bmd->iter);
1103 if (bmd->is_our_pages)
1104 bio_free_pages(bio);
1105 }
1106 kfree(bmd);
1107 bio_put(bio);
1108 return ret;
1109}
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122struct bio *bio_copy_user_iov(struct request_queue *q,
1123 struct rq_map_data *map_data,
1124 const struct iov_iter *iter,
1125 gfp_t gfp_mask)
1126{
1127 struct bio_map_data *bmd;
1128 struct page *page;
1129 struct bio *bio;
1130 int i, ret;
1131 int nr_pages = 0;
1132 unsigned int len = iter->count;
1133 unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
1134
1135 for (i = 0; i < iter->nr_segs; i++) {
1136 unsigned long uaddr;
1137 unsigned long end;
1138 unsigned long start;
1139
1140 uaddr = (unsigned long) iter->iov[i].iov_base;
1141 end = (uaddr + iter->iov[i].iov_len + PAGE_SIZE - 1)
1142 >> PAGE_SHIFT;
1143 start = uaddr >> PAGE_SHIFT;
1144
1145
1146
1147
1148 if (end < start)
1149 return ERR_PTR(-EINVAL);
1150
1151 nr_pages += end - start;
1152 }
1153
1154 if (offset)
1155 nr_pages++;
1156
1157 bmd = bio_alloc_map_data(iter->nr_segs, gfp_mask);
1158 if (!bmd)
1159 return ERR_PTR(-ENOMEM);
1160
1161
1162
1163
1164
1165
1166 bmd->is_our_pages = map_data ? 0 : 1;
1167 memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs);
1168 iov_iter_init(&bmd->iter, iter->type, bmd->iov,
1169 iter->nr_segs, iter->count);
1170
1171 ret = -ENOMEM;
1172 bio = bio_kmalloc(gfp_mask, nr_pages);
1173 if (!bio)
1174 goto out_bmd;
1175
1176 if (iter->type & WRITE)
1177 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1178
1179 ret = 0;
1180
1181 if (map_data) {
1182 nr_pages = 1 << map_data->page_order;
1183 i = map_data->offset / PAGE_SIZE;
1184 }
1185 while (len) {
1186 unsigned int bytes = PAGE_SIZE;
1187
1188 bytes -= offset;
1189
1190 if (bytes > len)
1191 bytes = len;
1192
1193 if (map_data) {
1194 if (i == map_data->nr_entries * nr_pages) {
1195 ret = -ENOMEM;
1196 break;
1197 }
1198
1199 page = map_data->pages[i / nr_pages];
1200 page += (i % nr_pages);
1201
1202 i++;
1203 } else {
1204 page = alloc_page(q->bounce_gfp | gfp_mask);
1205 if (!page) {
1206 ret = -ENOMEM;
1207 break;
1208 }
1209 }
1210
1211 if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
1212 break;
1213
1214 len -= bytes;
1215 offset = 0;
1216 }
1217
1218 if (ret)
1219 goto cleanup;
1220
1221
1222
1223
1224 if (((iter->type & WRITE) && (!map_data || !map_data->null_mapped)) ||
1225 (map_data && map_data->from_user)) {
1226 ret = bio_copy_from_iter(bio, *iter);
1227 if (ret)
1228 goto cleanup;
1229 }
1230
1231 bio->bi_private = bmd;
1232 return bio;
1233cleanup:
1234 if (!map_data)
1235 bio_free_pages(bio);
1236 bio_put(bio);
1237out_bmd:
1238 kfree(bmd);
1239 return ERR_PTR(ret);
1240}
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251struct bio *bio_map_user_iov(struct request_queue *q,
1252 const struct iov_iter *iter,
1253 gfp_t gfp_mask)
1254{
1255 int j;
1256 int nr_pages = 0;
1257 struct page **pages;
1258 struct bio *bio;
1259 int cur_page = 0;
1260 int ret, offset;
1261 struct iov_iter i;
1262 struct iovec iov;
1263
1264 iov_for_each(iov, i, *iter) {
1265 unsigned long uaddr = (unsigned long) iov.iov_base;
1266 unsigned long len = iov.iov_len;
1267 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1268 unsigned long start = uaddr >> PAGE_SHIFT;
1269
1270
1271
1272
1273 if (end < start)
1274 return ERR_PTR(-EINVAL);
1275
1276 nr_pages += end - start;
1277
1278
1279
1280 if (uaddr & queue_dma_alignment(q))
1281 return ERR_PTR(-EINVAL);
1282 }
1283
1284 if (!nr_pages)
1285 return ERR_PTR(-EINVAL);
1286
1287 bio = bio_kmalloc(gfp_mask, nr_pages);
1288 if (!bio)
1289 return ERR_PTR(-ENOMEM);
1290
1291 ret = -ENOMEM;
1292 pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask);
1293 if (!pages)
1294 goto out;
1295
1296 iov_for_each(iov, i, *iter) {
1297 unsigned long uaddr = (unsigned long) iov.iov_base;
1298 unsigned long len = iov.iov_len;
1299 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1300 unsigned long start = uaddr >> PAGE_SHIFT;
1301 const int local_nr_pages = end - start;
1302 const int page_limit = cur_page + local_nr_pages;
1303
1304 ret = get_user_pages_fast(uaddr, local_nr_pages,
1305 (iter->type & WRITE) != WRITE,
1306 &pages[cur_page]);
1307 if (ret < local_nr_pages) {
1308 ret = -EFAULT;
1309 goto out_unmap;
1310 }
1311
1312 offset = offset_in_page(uaddr);
1313 for (j = cur_page; j < page_limit; j++) {
1314 unsigned int bytes = PAGE_SIZE - offset;
1315
1316 if (len <= 0)
1317 break;
1318
1319 if (bytes > len)
1320 bytes = len;
1321
1322
1323
1324
1325 if (bio_add_pc_page(q, bio, pages[j], bytes, offset) <
1326 bytes)
1327 break;
1328
1329 len -= bytes;
1330 offset = 0;
1331 }
1332
1333 cur_page = j;
1334
1335
1336
1337 while (j < page_limit)
1338 put_page(pages[j++]);
1339 }
1340
1341 kfree(pages);
1342
1343
1344
1345
1346 if (iter->type & WRITE)
1347 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1348
1349 bio_set_flag(bio, BIO_USER_MAPPED);
1350
1351
1352
1353
1354
1355
1356
1357 bio_get(bio);
1358 return bio;
1359
1360 out_unmap:
1361 for (j = 0; j < nr_pages; j++) {
1362 if (!pages[j])
1363 break;
1364 put_page(pages[j]);
1365 }
1366 out:
1367 kfree(pages);
1368 bio_put(bio);
1369 return ERR_PTR(ret);
1370}
1371
1372static void __bio_unmap_user(struct bio *bio)
1373{
1374 struct bio_vec *bvec;
1375 int i;
1376
1377
1378
1379
1380 bio_for_each_segment_all(bvec, bio, i) {
1381 if (bio_data_dir(bio) == READ)
1382 set_page_dirty_lock(bvec->bv_page);
1383
1384 put_page(bvec->bv_page);
1385 }
1386
1387 bio_put(bio);
1388}
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399void bio_unmap_user(struct bio *bio)
1400{
1401 __bio_unmap_user(bio);
1402 bio_put(bio);
1403}
1404
1405static void bio_map_kern_endio(struct bio *bio)
1406{
1407 bio_put(bio);
1408}
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
1421 gfp_t gfp_mask)
1422{
1423 unsigned long kaddr = (unsigned long)data;
1424 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1425 unsigned long start = kaddr >> PAGE_SHIFT;
1426 const int nr_pages = end - start;
1427 int offset, i;
1428 struct bio *bio;
1429
1430 bio = bio_kmalloc(gfp_mask, nr_pages);
1431 if (!bio)
1432 return ERR_PTR(-ENOMEM);
1433
1434 offset = offset_in_page(kaddr);
1435 for (i = 0; i < nr_pages; i++) {
1436 unsigned int bytes = PAGE_SIZE - offset;
1437
1438 if (len <= 0)
1439 break;
1440
1441 if (bytes > len)
1442 bytes = len;
1443
1444 if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
1445 offset) < bytes) {
1446
1447 bio_put(bio);
1448 return ERR_PTR(-EINVAL);
1449 }
1450
1451 data += bytes;
1452 len -= bytes;
1453 offset = 0;
1454 }
1455
1456 bio->bi_end_io = bio_map_kern_endio;
1457 return bio;
1458}
1459EXPORT_SYMBOL(bio_map_kern);
1460
1461static void bio_copy_kern_endio(struct bio *bio)
1462{
1463 bio_free_pages(bio);
1464 bio_put(bio);
1465}
1466
1467static void bio_copy_kern_endio_read(struct bio *bio)
1468{
1469 char *p = bio->bi_private;
1470 struct bio_vec *bvec;
1471 int i;
1472
1473 bio_for_each_segment_all(bvec, bio, i) {
1474 memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
1475 p += bvec->bv_len;
1476 }
1477
1478 bio_copy_kern_endio(bio);
1479}
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
1493 gfp_t gfp_mask, int reading)
1494{
1495 unsigned long kaddr = (unsigned long)data;
1496 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1497 unsigned long start = kaddr >> PAGE_SHIFT;
1498 struct bio *bio;
1499 void *p = data;
1500 int nr_pages = 0;
1501
1502
1503
1504
1505 if (end < start)
1506 return ERR_PTR(-EINVAL);
1507
1508 nr_pages = end - start;
1509 bio = bio_kmalloc(gfp_mask, nr_pages);
1510 if (!bio)
1511 return ERR_PTR(-ENOMEM);
1512
1513 while (len) {
1514 struct page *page;
1515 unsigned int bytes = PAGE_SIZE;
1516
1517 if (bytes > len)
1518 bytes = len;
1519
1520 page = alloc_page(q->bounce_gfp | gfp_mask);
1521 if (!page)
1522 goto cleanup;
1523
1524 if (!reading)
1525 memcpy(page_address(page), p, bytes);
1526
1527 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
1528 break;
1529
1530 len -= bytes;
1531 p += bytes;
1532 }
1533
1534 if (reading) {
1535 bio->bi_end_io = bio_copy_kern_endio_read;
1536 bio->bi_private = data;
1537 } else {
1538 bio->bi_end_io = bio_copy_kern_endio;
1539 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1540 }
1541
1542 return bio;
1543
1544cleanup:
1545 bio_free_pages(bio);
1546 bio_put(bio);
1547 return ERR_PTR(-ENOMEM);
1548}
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579void bio_set_pages_dirty(struct bio *bio)
1580{
1581 struct bio_vec *bvec;
1582 int i;
1583
1584 bio_for_each_segment_all(bvec, bio, i) {
1585 struct page *page = bvec->bv_page;
1586
1587 if (page && !PageCompound(page))
1588 set_page_dirty_lock(page);
1589 }
1590}
1591
1592static void bio_release_pages(struct bio *bio)
1593{
1594 struct bio_vec *bvec;
1595 int i;
1596
1597 bio_for_each_segment_all(bvec, bio, i) {
1598 struct page *page = bvec->bv_page;
1599
1600 if (page)
1601 put_page(page);
1602 }
1603}
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616static void bio_dirty_fn(struct work_struct *work);
1617
1618static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1619static DEFINE_SPINLOCK(bio_dirty_lock);
1620static struct bio *bio_dirty_list;
1621
1622
1623
1624
1625static void bio_dirty_fn(struct work_struct *work)
1626{
1627 unsigned long flags;
1628 struct bio *bio;
1629
1630 spin_lock_irqsave(&bio_dirty_lock, flags);
1631 bio = bio_dirty_list;
1632 bio_dirty_list = NULL;
1633 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1634
1635 while (bio) {
1636 struct bio *next = bio->bi_private;
1637
1638 bio_set_pages_dirty(bio);
1639 bio_release_pages(bio);
1640 bio_put(bio);
1641 bio = next;
1642 }
1643}
1644
1645void bio_check_pages_dirty(struct bio *bio)
1646{
1647 struct bio_vec *bvec;
1648 int nr_clean_pages = 0;
1649 int i;
1650
1651 bio_for_each_segment_all(bvec, bio, i) {
1652 struct page *page = bvec->bv_page;
1653
1654 if (PageDirty(page) || PageCompound(page)) {
1655 put_page(page);
1656 bvec->bv_page = NULL;
1657 } else {
1658 nr_clean_pages++;
1659 }
1660 }
1661
1662 if (nr_clean_pages) {
1663 unsigned long flags;
1664
1665 spin_lock_irqsave(&bio_dirty_lock, flags);
1666 bio->bi_private = bio_dirty_list;
1667 bio_dirty_list = bio;
1668 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1669 schedule_work(&bio_dirty_work);
1670 } else {
1671 bio_put(bio);
1672 }
1673}
1674
1675void generic_start_io_acct(int rw, unsigned long sectors,
1676 struct hd_struct *part)
1677{
1678 int cpu = part_stat_lock();
1679
1680 part_round_stats(cpu, part);
1681 part_stat_inc(cpu, part, ios[rw]);
1682 part_stat_add(cpu, part, sectors[rw], sectors);
1683 part_inc_in_flight(part, rw);
1684
1685 part_stat_unlock();
1686}
1687EXPORT_SYMBOL(generic_start_io_acct);
1688
1689void generic_end_io_acct(int rw, struct hd_struct *part,
1690 unsigned long start_time)
1691{
1692 unsigned long duration = jiffies - start_time;
1693 int cpu = part_stat_lock();
1694
1695 part_stat_add(cpu, part, ticks[rw], duration);
1696 part_round_stats(cpu, part);
1697 part_dec_in_flight(part, rw);
1698
1699 part_stat_unlock();
1700}
1701EXPORT_SYMBOL(generic_end_io_acct);
1702
1703#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1704void bio_flush_dcache_pages(struct bio *bi)
1705{
1706 struct bio_vec bvec;
1707 struct bvec_iter iter;
1708
1709 bio_for_each_segment(bvec, bi, iter)
1710 flush_dcache_page(bvec.bv_page);
1711}
1712EXPORT_SYMBOL(bio_flush_dcache_pages);
1713#endif
1714
1715static inline bool bio_remaining_done(struct bio *bio)
1716{
1717
1718
1719
1720
1721 if (!bio_flagged(bio, BIO_CHAIN))
1722 return true;
1723
1724 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1725
1726 if (atomic_dec_and_test(&bio->__bi_remaining)) {
1727 bio_clear_flag(bio, BIO_CHAIN);
1728 return true;
1729 }
1730
1731 return false;
1732}
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743void bio_endio(struct bio *bio)
1744{
1745again:
1746 if (!bio_remaining_done(bio))
1747 return;
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757 if (bio->bi_end_io == bio_chain_endio) {
1758 bio = __bio_chain_endio(bio);
1759 goto again;
1760 }
1761
1762 if (bio->bi_end_io)
1763 bio->bi_end_io(bio);
1764}
1765EXPORT_SYMBOL(bio_endio);
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781struct bio *bio_split(struct bio *bio, int sectors,
1782 gfp_t gfp, struct bio_set *bs)
1783{
1784 struct bio *split = NULL;
1785
1786 BUG_ON(sectors <= 0);
1787 BUG_ON(sectors >= bio_sectors(bio));
1788
1789
1790
1791
1792
1793 if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE)
1794 split = bio_clone_bioset(bio, gfp, bs);
1795 else
1796 split = bio_clone_fast(bio, gfp, bs);
1797
1798 if (!split)
1799 return NULL;
1800
1801 split->bi_iter.bi_size = sectors << 9;
1802
1803 if (bio_integrity(split))
1804 bio_integrity_trim(split, 0, sectors);
1805
1806 bio_advance(bio, split->bi_iter.bi_size);
1807
1808 return split;
1809}
1810EXPORT_SYMBOL(bio_split);
1811
1812
1813
1814
1815
1816
1817
1818void bio_trim(struct bio *bio, int offset, int size)
1819{
1820
1821
1822
1823
1824 size <<= 9;
1825 if (offset == 0 && size == bio->bi_iter.bi_size)
1826 return;
1827
1828 bio_clear_flag(bio, BIO_SEG_VALID);
1829
1830 bio_advance(bio, offset << 9);
1831
1832 bio->bi_iter.bi_size = size;
1833}
1834EXPORT_SYMBOL_GPL(bio_trim);
1835
1836
1837
1838
1839
1840mempool_t *biovec_create_pool(int pool_entries)
1841{
1842 struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX;
1843
1844 return mempool_create_slab_pool(pool_entries, bp->slab);
1845}
1846
1847void bioset_free(struct bio_set *bs)
1848{
1849 if (bs->rescue_workqueue)
1850 destroy_workqueue(bs->rescue_workqueue);
1851
1852 if (bs->bio_pool)
1853 mempool_destroy(bs->bio_pool);
1854
1855 if (bs->bvec_pool)
1856 mempool_destroy(bs->bvec_pool);
1857
1858 bioset_integrity_free(bs);
1859 bio_put_slab(bs);
1860
1861 kfree(bs);
1862}
1863EXPORT_SYMBOL(bioset_free);
1864
1865static struct bio_set *__bioset_create(unsigned int pool_size,
1866 unsigned int front_pad,
1867 bool create_bvec_pool)
1868{
1869 unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1870 struct bio_set *bs;
1871
1872 bs = kzalloc(sizeof(*bs), GFP_KERNEL);
1873 if (!bs)
1874 return NULL;
1875
1876 bs->front_pad = front_pad;
1877
1878 spin_lock_init(&bs->rescue_lock);
1879 bio_list_init(&bs->rescue_list);
1880 INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1881
1882 bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
1883 if (!bs->bio_slab) {
1884 kfree(bs);
1885 return NULL;
1886 }
1887
1888 bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab);
1889 if (!bs->bio_pool)
1890 goto bad;
1891
1892 if (create_bvec_pool) {
1893 bs->bvec_pool = biovec_create_pool(pool_size);
1894 if (!bs->bvec_pool)
1895 goto bad;
1896 }
1897
1898 bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
1899 if (!bs->rescue_workqueue)
1900 goto bad;
1901
1902 return bs;
1903bad:
1904 bioset_free(bs);
1905 return NULL;
1906}
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
1922{
1923 return __bioset_create(pool_size, front_pad, true);
1924}
1925EXPORT_SYMBOL(bioset_create);
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936struct bio_set *bioset_create_nobvec(unsigned int pool_size, unsigned int front_pad)
1937{
1938 return __bioset_create(pool_size, front_pad, false);
1939}
1940EXPORT_SYMBOL(bioset_create_nobvec);
1941
1942#ifdef CONFIG_BLK_CGROUP
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
1957{
1958 if (unlikely(bio->bi_css))
1959 return -EBUSY;
1960 css_get(blkcg_css);
1961 bio->bi_css = blkcg_css;
1962 return 0;
1963}
1964EXPORT_SYMBOL_GPL(bio_associate_blkcg);
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979int bio_associate_current(struct bio *bio)
1980{
1981 struct io_context *ioc;
1982
1983 if (bio->bi_css)
1984 return -EBUSY;
1985
1986 ioc = current->io_context;
1987 if (!ioc)
1988 return -ENOENT;
1989
1990 get_io_context_active(ioc);
1991 bio->bi_ioc = ioc;
1992 bio->bi_css = task_get_css(current, io_cgrp_id);
1993 return 0;
1994}
1995EXPORT_SYMBOL_GPL(bio_associate_current);
1996
1997
1998
1999
2000
2001void bio_disassociate_task(struct bio *bio)
2002{
2003 if (bio->bi_ioc) {
2004 put_io_context(bio->bi_ioc);
2005 bio->bi_ioc = NULL;
2006 }
2007 if (bio->bi_css) {
2008 css_put(bio->bi_css);
2009 bio->bi_css = NULL;
2010 }
2011}
2012
2013
2014
2015
2016
2017
2018void bio_clone_blkcg_association(struct bio *dst, struct bio *src)
2019{
2020 if (src->bi_css)
2021 WARN_ON(bio_associate_blkcg(dst, src->bi_css));
2022}
2023
2024#endif
2025
2026static void __init biovec_init_slabs(void)
2027{
2028 int i;
2029
2030 for (i = 0; i < BVEC_POOL_NR; i++) {
2031 int size;
2032 struct biovec_slab *bvs = bvec_slabs + i;
2033
2034 if (bvs->nr_vecs <= BIO_INLINE_VECS) {
2035 bvs->slab = NULL;
2036 continue;
2037 }
2038
2039 size = bvs->nr_vecs * sizeof(struct bio_vec);
2040 bvs->slab = kmem_cache_create(bvs->name, size, 0,
2041 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2042 }
2043}
2044
2045static int __init init_bio(void)
2046{
2047 bio_slab_max = 2;
2048 bio_slab_nr = 0;
2049 bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL);
2050 if (!bio_slabs)
2051 panic("bio: can't allocate bios\n");
2052
2053 bio_integrity_init();
2054 biovec_init_slabs();
2055
2056 fs_bio_set = bioset_create(BIO_POOL_SIZE, 0);
2057 if (!fs_bio_set)
2058 panic("bio: can't allocate bios\n");
2059
2060 if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE))
2061 panic("bio: can't create integrity pool\n");
2062
2063 return 0;
2064}
2065subsys_initcall(init_bio);
2066