1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/bio.h>
21#include <linux/blkdev.h>
22#include <linux/uio.h>
23#include <linux/iocontext.h>
24#include <linux/slab.h>
25#include <linux/init.h>
26#include <linux/kernel.h>
27#include <linux/export.h>
28#include <linux/mempool.h>
29#include <linux/workqueue.h>
30#include <linux/cgroup.h>
31
32#include <trace/events/block.h>
33#include "blk.h"
34
35
36
37
38
39#define BIO_INLINE_VECS 4
40
41
42
43
44
45
46#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
47static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = {
48 BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
49};
50#undef BV
51
52
53
54
55
56struct bio_set *fs_bio_set;
57EXPORT_SYMBOL(fs_bio_set);
58
59
60
61
62struct bio_slab {
63 struct kmem_cache *slab;
64 unsigned int slab_ref;
65 unsigned int slab_size;
66 char name[8];
67};
68static DEFINE_MUTEX(bio_slab_lock);
69static struct bio_slab *bio_slabs;
70static unsigned int bio_slab_nr, bio_slab_max;
71
72static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
73{
74 unsigned int sz = sizeof(struct bio) + extra_size;
75 struct kmem_cache *slab = NULL;
76 struct bio_slab *bslab, *new_bio_slabs;
77 unsigned int new_bio_slab_max;
78 unsigned int i, entry = -1;
79
80 mutex_lock(&bio_slab_lock);
81
82 i = 0;
83 while (i < bio_slab_nr) {
84 bslab = &bio_slabs[i];
85
86 if (!bslab->slab && entry == -1)
87 entry = i;
88 else if (bslab->slab_size == sz) {
89 slab = bslab->slab;
90 bslab->slab_ref++;
91 break;
92 }
93 i++;
94 }
95
96 if (slab)
97 goto out_unlock;
98
99 if (bio_slab_nr == bio_slab_max && entry == -1) {
100 new_bio_slab_max = bio_slab_max << 1;
101 new_bio_slabs = krealloc(bio_slabs,
102 new_bio_slab_max * sizeof(struct bio_slab),
103 GFP_KERNEL);
104 if (!new_bio_slabs)
105 goto out_unlock;
106 bio_slab_max = new_bio_slab_max;
107 bio_slabs = new_bio_slabs;
108 }
109 if (entry == -1)
110 entry = bio_slab_nr++;
111
112 bslab = &bio_slabs[entry];
113
114 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
115 slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN,
116 SLAB_HWCACHE_ALIGN, NULL);
117 if (!slab)
118 goto out_unlock;
119
120 bslab->slab = slab;
121 bslab->slab_ref = 1;
122 bslab->slab_size = sz;
123out_unlock:
124 mutex_unlock(&bio_slab_lock);
125 return slab;
126}
127
128static void bio_put_slab(struct bio_set *bs)
129{
130 struct bio_slab *bslab = NULL;
131 unsigned int i;
132
133 mutex_lock(&bio_slab_lock);
134
135 for (i = 0; i < bio_slab_nr; i++) {
136 if (bs->bio_slab == bio_slabs[i].slab) {
137 bslab = &bio_slabs[i];
138 break;
139 }
140 }
141
142 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
143 goto out;
144
145 WARN_ON(!bslab->slab_ref);
146
147 if (--bslab->slab_ref)
148 goto out;
149
150 kmem_cache_destroy(bslab->slab);
151 bslab->slab = NULL;
152
153out:
154 mutex_unlock(&bio_slab_lock);
155}
156
157unsigned int bvec_nr_vecs(unsigned short idx)
158{
159 return bvec_slabs[idx].nr_vecs;
160}
161
162void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
163{
164 if (!idx)
165 return;
166 idx--;
167
168 BIO_BUG_ON(idx >= BVEC_POOL_NR);
169
170 if (idx == BVEC_POOL_MAX) {
171 mempool_free(bv, pool);
172 } else {
173 struct biovec_slab *bvs = bvec_slabs + idx;
174
175 kmem_cache_free(bvs->slab, bv);
176 }
177}
178
179struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
180 mempool_t *pool)
181{
182 struct bio_vec *bvl;
183
184
185
186
187 switch (nr) {
188 case 1:
189 *idx = 0;
190 break;
191 case 2 ... 4:
192 *idx = 1;
193 break;
194 case 5 ... 16:
195 *idx = 2;
196 break;
197 case 17 ... 64:
198 *idx = 3;
199 break;
200 case 65 ... 128:
201 *idx = 4;
202 break;
203 case 129 ... BIO_MAX_PAGES:
204 *idx = 5;
205 break;
206 default:
207 return NULL;
208 }
209
210
211
212
213
214 if (*idx == BVEC_POOL_MAX) {
215fallback:
216 bvl = mempool_alloc(pool, gfp_mask);
217 } else {
218 struct biovec_slab *bvs = bvec_slabs + *idx;
219 gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO);
220
221
222
223
224
225
226 __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
227
228
229
230
231
232 bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
233 if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) {
234 *idx = BVEC_POOL_MAX;
235 goto fallback;
236 }
237 }
238
239 (*idx)++;
240 return bvl;
241}
242
243void bio_uninit(struct bio *bio)
244{
245 bio_disassociate_task(bio);
246}
247EXPORT_SYMBOL(bio_uninit);
248
249static void bio_free(struct bio *bio)
250{
251 struct bio_set *bs = bio->bi_pool;
252 void *p;
253
254 bio_uninit(bio);
255
256 if (bs) {
257 bvec_free(bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
258
259
260
261
262 p = bio;
263 p -= bs->front_pad;
264
265 mempool_free(p, bs->bio_pool);
266 } else {
267
268 kfree(bio);
269 }
270}
271
272
273
274
275
276
277void bio_init(struct bio *bio, struct bio_vec *table,
278 unsigned short max_vecs)
279{
280 memset(bio, 0, sizeof(*bio));
281 atomic_set(&bio->__bi_remaining, 1);
282 atomic_set(&bio->__bi_cnt, 1);
283
284 bio->bi_io_vec = table;
285 bio->bi_max_vecs = max_vecs;
286}
287EXPORT_SYMBOL(bio_init);
288
289
290
291
292
293
294
295
296
297
298
299void bio_reset(struct bio *bio)
300{
301 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
302
303 bio_uninit(bio);
304
305 memset(bio, 0, BIO_RESET_BYTES);
306 bio->bi_flags = flags;
307 atomic_set(&bio->__bi_remaining, 1);
308}
309EXPORT_SYMBOL(bio_reset);
310
311static struct bio *__bio_chain_endio(struct bio *bio)
312{
313 struct bio *parent = bio->bi_private;
314
315 if (!parent->bi_status)
316 parent->bi_status = bio->bi_status;
317 bio_put(bio);
318 return parent;
319}
320
321static void bio_chain_endio(struct bio *bio)
322{
323 bio_endio(__bio_chain_endio(bio));
324}
325
326
327
328
329
330
331
332
333
334
335
336
337void bio_chain(struct bio *bio, struct bio *parent)
338{
339 BUG_ON(bio->bi_private || bio->bi_end_io);
340
341 bio->bi_private = parent;
342 bio->bi_end_io = bio_chain_endio;
343 bio_inc_remaining(parent);
344}
345EXPORT_SYMBOL(bio_chain);
346
347static void bio_alloc_rescue(struct work_struct *work)
348{
349 struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
350 struct bio *bio;
351
352 while (1) {
353 spin_lock(&bs->rescue_lock);
354 bio = bio_list_pop(&bs->rescue_list);
355 spin_unlock(&bs->rescue_lock);
356
357 if (!bio)
358 break;
359
360 generic_make_request(bio);
361 }
362}
363
364static void punt_bios_to_rescuer(struct bio_set *bs)
365{
366 struct bio_list punt, nopunt;
367 struct bio *bio;
368
369 if (WARN_ON_ONCE(!bs->rescue_workqueue))
370 return;
371
372
373
374
375
376
377
378
379
380
381
382 bio_list_init(&punt);
383 bio_list_init(&nopunt);
384
385 while ((bio = bio_list_pop(¤t->bio_list[0])))
386 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
387 current->bio_list[0] = nopunt;
388
389 bio_list_init(&nopunt);
390 while ((bio = bio_list_pop(¤t->bio_list[1])))
391 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
392 current->bio_list[1] = nopunt;
393
394 spin_lock(&bs->rescue_lock);
395 bio_list_merge(&bs->rescue_list, &punt);
396 spin_unlock(&bs->rescue_lock);
397
398 queue_work(bs->rescue_workqueue, &bs->rescue_work);
399}
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
437 struct bio_set *bs)
438{
439 gfp_t saved_gfp = gfp_mask;
440 unsigned front_pad;
441 unsigned inline_vecs;
442 struct bio_vec *bvl = NULL;
443 struct bio *bio;
444 void *p;
445
446 if (!bs) {
447 if (nr_iovecs > UIO_MAXIOV)
448 return NULL;
449
450 p = kmalloc(sizeof(struct bio) +
451 nr_iovecs * sizeof(struct bio_vec),
452 gfp_mask);
453 front_pad = 0;
454 inline_vecs = nr_iovecs;
455 } else {
456
457 if (WARN_ON_ONCE(!bs->bvec_pool && nr_iovecs > 0))
458 return NULL;
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480 if (current->bio_list &&
481 (!bio_list_empty(¤t->bio_list[0]) ||
482 !bio_list_empty(¤t->bio_list[1])) &&
483 bs->rescue_workqueue)
484 gfp_mask &= ~__GFP_DIRECT_RECLAIM;
485
486 p = mempool_alloc(bs->bio_pool, gfp_mask);
487 if (!p && gfp_mask != saved_gfp) {
488 punt_bios_to_rescuer(bs);
489 gfp_mask = saved_gfp;
490 p = mempool_alloc(bs->bio_pool, gfp_mask);
491 }
492
493 front_pad = bs->front_pad;
494 inline_vecs = BIO_INLINE_VECS;
495 }
496
497 if (unlikely(!p))
498 return NULL;
499
500 bio = p + front_pad;
501 bio_init(bio, NULL, 0);
502
503 if (nr_iovecs > inline_vecs) {
504 unsigned long idx = 0;
505
506 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
507 if (!bvl && gfp_mask != saved_gfp) {
508 punt_bios_to_rescuer(bs);
509 gfp_mask = saved_gfp;
510 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
511 }
512
513 if (unlikely(!bvl))
514 goto err_free;
515
516 bio->bi_flags |= idx << BVEC_POOL_OFFSET;
517 } else if (nr_iovecs) {
518 bvl = bio->bi_inline_vecs;
519 }
520
521 bio->bi_pool = bs;
522 bio->bi_max_vecs = nr_iovecs;
523 bio->bi_io_vec = bvl;
524 return bio;
525
526err_free:
527 mempool_free(p, bs->bio_pool);
528 return NULL;
529}
530EXPORT_SYMBOL(bio_alloc_bioset);
531
532void zero_fill_bio(struct bio *bio)
533{
534 unsigned long flags;
535 struct bio_vec bv;
536 struct bvec_iter iter;
537
538 bio_for_each_segment(bv, bio, iter) {
539 char *data = bvec_kmap_irq(&bv, &flags);
540 memset(data, 0, bv.bv_len);
541 flush_dcache_page(bv.bv_page);
542 bvec_kunmap_irq(data, &flags);
543 }
544}
545EXPORT_SYMBOL(zero_fill_bio);
546
547
548
549
550
551
552
553
554
555void bio_put(struct bio *bio)
556{
557 if (!bio_flagged(bio, BIO_REFFED))
558 bio_free(bio);
559 else {
560 BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
561
562
563
564
565 if (atomic_dec_and_test(&bio->__bi_cnt))
566 bio_free(bio);
567 }
568}
569EXPORT_SYMBOL(bio_put);
570
571inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
572{
573 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
574 blk_recount_segments(q, bio);
575
576 return bio->bi_phys_segments;
577}
578EXPORT_SYMBOL(bio_phys_segments);
579
580
581
582
583
584
585
586
587
588
589
590
591void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
592{
593 BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
594
595
596
597
598
599 bio->bi_disk = bio_src->bi_disk;
600 bio_set_flag(bio, BIO_CLONED);
601 bio->bi_opf = bio_src->bi_opf;
602 bio->bi_write_hint = bio_src->bi_write_hint;
603 bio->bi_iter = bio_src->bi_iter;
604 bio->bi_io_vec = bio_src->bi_io_vec;
605
606 bio_clone_blkcg_association(bio, bio_src);
607}
608EXPORT_SYMBOL(__bio_clone_fast);
609
610
611
612
613
614
615
616
617
618struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
619{
620 struct bio *b;
621
622 b = bio_alloc_bioset(gfp_mask, 0, bs);
623 if (!b)
624 return NULL;
625
626 __bio_clone_fast(b, bio);
627
628 if (bio_integrity(bio)) {
629 int ret;
630
631 ret = bio_integrity_clone(b, bio, gfp_mask);
632
633 if (ret < 0) {
634 bio_put(b);
635 return NULL;
636 }
637 }
638
639 return b;
640}
641EXPORT_SYMBOL(bio_clone_fast);
642
643
644
645
646
647
648
649
650
651
652struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
653 struct bio_set *bs)
654{
655 struct bvec_iter iter;
656 struct bio_vec bv;
657 struct bio *bio;
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681 bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
682 if (!bio)
683 return NULL;
684 bio->bi_disk = bio_src->bi_disk;
685 bio->bi_opf = bio_src->bi_opf;
686 bio->bi_write_hint = bio_src->bi_write_hint;
687 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
688 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
689
690 switch (bio_op(bio)) {
691 case REQ_OP_DISCARD:
692 case REQ_OP_SECURE_ERASE:
693 case REQ_OP_WRITE_ZEROES:
694 break;
695 case REQ_OP_WRITE_SAME:
696 bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
697 break;
698 default:
699 bio_for_each_segment(bv, bio_src, iter)
700 bio->bi_io_vec[bio->bi_vcnt++] = bv;
701 break;
702 }
703
704 if (bio_integrity(bio_src)) {
705 int ret;
706
707 ret = bio_integrity_clone(bio, bio_src, gfp_mask);
708 if (ret < 0) {
709 bio_put(bio);
710 return NULL;
711 }
712 }
713
714 bio_clone_blkcg_association(bio, bio_src);
715
716 return bio;
717}
718EXPORT_SYMBOL(bio_clone_bioset);
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
736 *page, unsigned int len, unsigned int offset)
737{
738 int retried_segments = 0;
739 struct bio_vec *bvec;
740
741
742
743
744 if (unlikely(bio_flagged(bio, BIO_CLONED)))
745 return 0;
746
747 if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q))
748 return 0;
749
750
751
752
753
754
755 if (bio->bi_vcnt > 0) {
756 struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
757
758 if (page == prev->bv_page &&
759 offset == prev->bv_offset + prev->bv_len) {
760 prev->bv_len += len;
761 bio->bi_iter.bi_size += len;
762 goto done;
763 }
764
765
766
767
768
769 if (bvec_gap_to_prev(q, prev, offset))
770 return 0;
771 }
772
773 if (bio->bi_vcnt >= bio->bi_max_vecs)
774 return 0;
775
776
777
778
779
780 bvec = &bio->bi_io_vec[bio->bi_vcnt];
781 bvec->bv_page = page;
782 bvec->bv_len = len;
783 bvec->bv_offset = offset;
784 bio->bi_vcnt++;
785 bio->bi_phys_segments++;
786 bio->bi_iter.bi_size += len;
787
788
789
790
791
792
793 while (bio->bi_phys_segments > queue_max_segments(q)) {
794
795 if (retried_segments)
796 goto failed;
797
798 retried_segments = 1;
799 blk_recount_segments(q, bio);
800 }
801
802
803 if (bio->bi_vcnt > 1 && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
804 bio_clear_flag(bio, BIO_SEG_VALID);
805
806 done:
807 return len;
808
809 failed:
810 bvec->bv_page = NULL;
811 bvec->bv_len = 0;
812 bvec->bv_offset = 0;
813 bio->bi_vcnt--;
814 bio->bi_iter.bi_size -= len;
815 blk_recount_segments(q, bio);
816 return 0;
817}
818EXPORT_SYMBOL(bio_add_pc_page);
819
820
821
822
823
824
825
826
827
828
829
830int bio_add_page(struct bio *bio, struct page *page,
831 unsigned int len, unsigned int offset)
832{
833 struct bio_vec *bv;
834
835
836
837
838 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
839 return 0;
840
841
842
843
844
845
846 if (bio->bi_vcnt > 0) {
847 bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
848
849 if (page == bv->bv_page &&
850 offset == bv->bv_offset + bv->bv_len) {
851 bv->bv_len += len;
852 goto done;
853 }
854 }
855
856 if (bio->bi_vcnt >= bio->bi_max_vecs)
857 return 0;
858
859 bv = &bio->bi_io_vec[bio->bi_vcnt];
860 bv->bv_page = page;
861 bv->bv_len = len;
862 bv->bv_offset = offset;
863
864 bio->bi_vcnt++;
865done:
866 bio->bi_iter.bi_size += len;
867 return len;
868}
869EXPORT_SYMBOL(bio_add_page);
870
871
872
873
874
875
876
877
878
879int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
880{
881 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
882 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
883 struct page **pages = (struct page **)bv;
884 size_t offset, diff;
885 ssize_t size;
886
887 size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
888 if (unlikely(size <= 0))
889 return size ? size : -EFAULT;
890 nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
891
892
893
894
895
896
897
898
899
900 bio->bi_iter.bi_size += size;
901 bio->bi_vcnt += nr_pages;
902
903 diff = (nr_pages * PAGE_SIZE - offset) - size;
904 while (nr_pages--) {
905 bv[nr_pages].bv_page = pages[nr_pages];
906 bv[nr_pages].bv_len = PAGE_SIZE;
907 bv[nr_pages].bv_offset = 0;
908 }
909
910 bv[0].bv_offset += offset;
911 bv[0].bv_len -= offset;
912 if (diff)
913 bv[bio->bi_vcnt - 1].bv_len -= diff;
914
915 iov_iter_advance(iter, size);
916 return 0;
917}
918EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
919
920struct submit_bio_ret {
921 struct completion event;
922 int error;
923};
924
925static void submit_bio_wait_endio(struct bio *bio)
926{
927 struct submit_bio_ret *ret = bio->bi_private;
928
929 ret->error = blk_status_to_errno(bio->bi_status);
930 complete(&ret->event);
931}
932
933
934
935
936
937
938
939
940
941
942
943
944int submit_bio_wait(struct bio *bio)
945{
946 struct submit_bio_ret ret;
947
948 init_completion(&ret.event);
949 bio->bi_private = &ret;
950 bio->bi_end_io = submit_bio_wait_endio;
951 bio->bi_opf |= REQ_SYNC;
952 submit_bio(bio);
953 wait_for_completion_io(&ret.event);
954
955 return ret.error;
956}
957EXPORT_SYMBOL(submit_bio_wait);
958
959
960
961
962
963
964
965
966
967
968
969
970void bio_advance(struct bio *bio, unsigned bytes)
971{
972 if (bio_integrity(bio))
973 bio_integrity_advance(bio, bytes);
974
975 bio_advance_iter(bio, &bio->bi_iter, bytes);
976}
977EXPORT_SYMBOL(bio_advance);
978
979
980
981
982
983
984
985
986
987
988
989int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
990{
991 int i;
992 struct bio_vec *bv;
993
994 bio_for_each_segment_all(bv, bio, i) {
995 bv->bv_page = alloc_page(gfp_mask);
996 if (!bv->bv_page) {
997 while (--bv >= bio->bi_io_vec)
998 __free_page(bv->bv_page);
999 return -ENOMEM;
1000 }
1001 }
1002
1003 return 0;
1004}
1005EXPORT_SYMBOL(bio_alloc_pages);
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019void bio_copy_data(struct bio *dst, struct bio *src)
1020{
1021 struct bvec_iter src_iter, dst_iter;
1022 struct bio_vec src_bv, dst_bv;
1023 void *src_p, *dst_p;
1024 unsigned bytes;
1025
1026 src_iter = src->bi_iter;
1027 dst_iter = dst->bi_iter;
1028
1029 while (1) {
1030 if (!src_iter.bi_size) {
1031 src = src->bi_next;
1032 if (!src)
1033 break;
1034
1035 src_iter = src->bi_iter;
1036 }
1037
1038 if (!dst_iter.bi_size) {
1039 dst = dst->bi_next;
1040 if (!dst)
1041 break;
1042
1043 dst_iter = dst->bi_iter;
1044 }
1045
1046 src_bv = bio_iter_iovec(src, src_iter);
1047 dst_bv = bio_iter_iovec(dst, dst_iter);
1048
1049 bytes = min(src_bv.bv_len, dst_bv.bv_len);
1050
1051 src_p = kmap_atomic(src_bv.bv_page);
1052 dst_p = kmap_atomic(dst_bv.bv_page);
1053
1054 memcpy(dst_p + dst_bv.bv_offset,
1055 src_p + src_bv.bv_offset,
1056 bytes);
1057
1058 kunmap_atomic(dst_p);
1059 kunmap_atomic(src_p);
1060
1061 bio_advance_iter(src, &src_iter, bytes);
1062 bio_advance_iter(dst, &dst_iter, bytes);
1063 }
1064}
1065EXPORT_SYMBOL(bio_copy_data);
1066
1067struct bio_map_data {
1068 int is_our_pages;
1069 struct iov_iter iter;
1070 struct iovec iov[];
1071};
1072
1073static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count,
1074 gfp_t gfp_mask)
1075{
1076 if (iov_count > UIO_MAXIOV)
1077 return NULL;
1078
1079 return kmalloc(sizeof(struct bio_map_data) +
1080 sizeof(struct iovec) * iov_count, gfp_mask);
1081}
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091static int bio_copy_from_iter(struct bio *bio, struct iov_iter iter)
1092{
1093 int i;
1094 struct bio_vec *bvec;
1095
1096 bio_for_each_segment_all(bvec, bio, i) {
1097 ssize_t ret;
1098
1099 ret = copy_page_from_iter(bvec->bv_page,
1100 bvec->bv_offset,
1101 bvec->bv_len,
1102 &iter);
1103
1104 if (!iov_iter_count(&iter))
1105 break;
1106
1107 if (ret < bvec->bv_len)
1108 return -EFAULT;
1109 }
1110
1111 return 0;
1112}
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
1123{
1124 int i;
1125 struct bio_vec *bvec;
1126
1127 bio_for_each_segment_all(bvec, bio, i) {
1128 ssize_t ret;
1129
1130 ret = copy_page_to_iter(bvec->bv_page,
1131 bvec->bv_offset,
1132 bvec->bv_len,
1133 &iter);
1134
1135 if (!iov_iter_count(&iter))
1136 break;
1137
1138 if (ret < bvec->bv_len)
1139 return -EFAULT;
1140 }
1141
1142 return 0;
1143}
1144
1145void bio_free_pages(struct bio *bio)
1146{
1147 struct bio_vec *bvec;
1148 int i;
1149
1150 bio_for_each_segment_all(bvec, bio, i)
1151 __free_page(bvec->bv_page);
1152}
1153EXPORT_SYMBOL(bio_free_pages);
1154
1155
1156
1157
1158
1159
1160
1161
1162int bio_uncopy_user(struct bio *bio)
1163{
1164 struct bio_map_data *bmd = bio->bi_private;
1165 int ret = 0;
1166
1167 if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1168
1169
1170
1171
1172
1173 if (!current->mm)
1174 ret = -EINTR;
1175 else if (bio_data_dir(bio) == READ)
1176 ret = bio_copy_to_iter(bio, bmd->iter);
1177 if (bmd->is_our_pages)
1178 bio_free_pages(bio);
1179 }
1180 kfree(bmd);
1181 bio_put(bio);
1182 return ret;
1183}
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196struct bio *bio_copy_user_iov(struct request_queue *q,
1197 struct rq_map_data *map_data,
1198 const struct iov_iter *iter,
1199 gfp_t gfp_mask)
1200{
1201 struct bio_map_data *bmd;
1202 struct page *page;
1203 struct bio *bio;
1204 int i, ret;
1205 int nr_pages = 0;
1206 unsigned int len = iter->count;
1207 unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
1208
1209 for (i = 0; i < iter->nr_segs; i++) {
1210 unsigned long uaddr;
1211 unsigned long end;
1212 unsigned long start;
1213
1214 uaddr = (unsigned long) iter->iov[i].iov_base;
1215 end = (uaddr + iter->iov[i].iov_len + PAGE_SIZE - 1)
1216 >> PAGE_SHIFT;
1217 start = uaddr >> PAGE_SHIFT;
1218
1219
1220
1221
1222 if (end < start)
1223 return ERR_PTR(-EINVAL);
1224
1225 nr_pages += end - start;
1226 }
1227
1228 if (offset)
1229 nr_pages++;
1230
1231 bmd = bio_alloc_map_data(iter->nr_segs, gfp_mask);
1232 if (!bmd)
1233 return ERR_PTR(-ENOMEM);
1234
1235
1236
1237
1238
1239
1240 bmd->is_our_pages = map_data ? 0 : 1;
1241 memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs);
1242 bmd->iter = *iter;
1243 bmd->iter.iov = bmd->iov;
1244
1245 ret = -ENOMEM;
1246 bio = bio_kmalloc(gfp_mask, nr_pages);
1247 if (!bio)
1248 goto out_bmd;
1249
1250 ret = 0;
1251
1252 if (map_data) {
1253 nr_pages = 1 << map_data->page_order;
1254 i = map_data->offset / PAGE_SIZE;
1255 }
1256 while (len) {
1257 unsigned int bytes = PAGE_SIZE;
1258
1259 bytes -= offset;
1260
1261 if (bytes > len)
1262 bytes = len;
1263
1264 if (map_data) {
1265 if (i == map_data->nr_entries * nr_pages) {
1266 ret = -ENOMEM;
1267 break;
1268 }
1269
1270 page = map_data->pages[i / nr_pages];
1271 page += (i % nr_pages);
1272
1273 i++;
1274 } else {
1275 page = alloc_page(q->bounce_gfp | gfp_mask);
1276 if (!page) {
1277 ret = -ENOMEM;
1278 break;
1279 }
1280 }
1281
1282 if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
1283 break;
1284
1285 len -= bytes;
1286 offset = 0;
1287 }
1288
1289 if (ret)
1290 goto cleanup;
1291
1292
1293
1294
1295 if (((iter->type & WRITE) && (!map_data || !map_data->null_mapped)) ||
1296 (map_data && map_data->from_user)) {
1297 ret = bio_copy_from_iter(bio, *iter);
1298 if (ret)
1299 goto cleanup;
1300 }
1301
1302 bio->bi_private = bmd;
1303 return bio;
1304cleanup:
1305 if (!map_data)
1306 bio_free_pages(bio);
1307 bio_put(bio);
1308out_bmd:
1309 kfree(bmd);
1310 return ERR_PTR(ret);
1311}
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322struct bio *bio_map_user_iov(struct request_queue *q,
1323 const struct iov_iter *iter,
1324 gfp_t gfp_mask)
1325{
1326 int j;
1327 int nr_pages = 0;
1328 struct page **pages;
1329 struct bio *bio;
1330 int cur_page = 0;
1331 int ret, offset;
1332 struct iov_iter i;
1333 struct iovec iov;
1334 struct bio_vec *bvec;
1335
1336 iov_for_each(iov, i, *iter) {
1337 unsigned long uaddr = (unsigned long) iov.iov_base;
1338 unsigned long len = iov.iov_len;
1339 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1340 unsigned long start = uaddr >> PAGE_SHIFT;
1341
1342
1343
1344
1345 if (end < start)
1346 return ERR_PTR(-EINVAL);
1347
1348 nr_pages += end - start;
1349
1350
1351
1352 if (uaddr & queue_dma_alignment(q))
1353 return ERR_PTR(-EINVAL);
1354 }
1355
1356 if (!nr_pages)
1357 return ERR_PTR(-EINVAL);
1358
1359 bio = bio_kmalloc(gfp_mask, nr_pages);
1360 if (!bio)
1361 return ERR_PTR(-ENOMEM);
1362
1363 ret = -ENOMEM;
1364 pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask);
1365 if (!pages)
1366 goto out;
1367
1368 iov_for_each(iov, i, *iter) {
1369 unsigned long uaddr = (unsigned long) iov.iov_base;
1370 unsigned long len = iov.iov_len;
1371 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1372 unsigned long start = uaddr >> PAGE_SHIFT;
1373 const int local_nr_pages = end - start;
1374 const int page_limit = cur_page + local_nr_pages;
1375
1376 ret = get_user_pages_fast(uaddr, local_nr_pages,
1377 (iter->type & WRITE) != WRITE,
1378 &pages[cur_page]);
1379 if (unlikely(ret < local_nr_pages)) {
1380 for (j = cur_page; j < page_limit; j++) {
1381 if (!pages[j])
1382 break;
1383 put_page(pages[j]);
1384 }
1385 ret = -EFAULT;
1386 goto out_unmap;
1387 }
1388
1389 offset = offset_in_page(uaddr);
1390 for (j = cur_page; j < page_limit; j++) {
1391 unsigned int bytes = PAGE_SIZE - offset;
1392 unsigned short prev_bi_vcnt = bio->bi_vcnt;
1393
1394 if (len <= 0)
1395 break;
1396
1397 if (bytes > len)
1398 bytes = len;
1399
1400
1401
1402
1403 if (bio_add_pc_page(q, bio, pages[j], bytes, offset) <
1404 bytes)
1405 break;
1406
1407
1408
1409
1410
1411 if (bio->bi_vcnt == prev_bi_vcnt)
1412 put_page(pages[j]);
1413
1414 len -= bytes;
1415 offset = 0;
1416 }
1417
1418 cur_page = j;
1419
1420
1421
1422 while (j < page_limit)
1423 put_page(pages[j++]);
1424 }
1425
1426 kfree(pages);
1427
1428 bio_set_flag(bio, BIO_USER_MAPPED);
1429
1430
1431
1432
1433
1434
1435
1436 bio_get(bio);
1437 return bio;
1438
1439 out_unmap:
1440 bio_for_each_segment_all(bvec, bio, j) {
1441 put_page(bvec->bv_page);
1442 }
1443 out:
1444 kfree(pages);
1445 bio_put(bio);
1446 return ERR_PTR(ret);
1447}
1448
1449static void __bio_unmap_user(struct bio *bio)
1450{
1451 struct bio_vec *bvec;
1452 int i;
1453
1454
1455
1456
1457 bio_for_each_segment_all(bvec, bio, i) {
1458 if (bio_data_dir(bio) == READ)
1459 set_page_dirty_lock(bvec->bv_page);
1460
1461 put_page(bvec->bv_page);
1462 }
1463
1464 bio_put(bio);
1465}
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476void bio_unmap_user(struct bio *bio)
1477{
1478 __bio_unmap_user(bio);
1479 bio_put(bio);
1480}
1481
1482static void bio_map_kern_endio(struct bio *bio)
1483{
1484 bio_put(bio);
1485}
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
1498 gfp_t gfp_mask)
1499{
1500 unsigned long kaddr = (unsigned long)data;
1501 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1502 unsigned long start = kaddr >> PAGE_SHIFT;
1503 const int nr_pages = end - start;
1504 int offset, i;
1505 struct bio *bio;
1506
1507 bio = bio_kmalloc(gfp_mask, nr_pages);
1508 if (!bio)
1509 return ERR_PTR(-ENOMEM);
1510
1511 offset = offset_in_page(kaddr);
1512 for (i = 0; i < nr_pages; i++) {
1513 unsigned int bytes = PAGE_SIZE - offset;
1514
1515 if (len <= 0)
1516 break;
1517
1518 if (bytes > len)
1519 bytes = len;
1520
1521 if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
1522 offset) < bytes) {
1523
1524 bio_put(bio);
1525 return ERR_PTR(-EINVAL);
1526 }
1527
1528 data += bytes;
1529 len -= bytes;
1530 offset = 0;
1531 }
1532
1533 bio->bi_end_io = bio_map_kern_endio;
1534 return bio;
1535}
1536EXPORT_SYMBOL(bio_map_kern);
1537
1538static void bio_copy_kern_endio(struct bio *bio)
1539{
1540 bio_free_pages(bio);
1541 bio_put(bio);
1542}
1543
1544static void bio_copy_kern_endio_read(struct bio *bio)
1545{
1546 char *p = bio->bi_private;
1547 struct bio_vec *bvec;
1548 int i;
1549
1550 bio_for_each_segment_all(bvec, bio, i) {
1551 memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
1552 p += bvec->bv_len;
1553 }
1554
1555 bio_copy_kern_endio(bio);
1556}
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
1570 gfp_t gfp_mask, int reading)
1571{
1572 unsigned long kaddr = (unsigned long)data;
1573 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1574 unsigned long start = kaddr >> PAGE_SHIFT;
1575 struct bio *bio;
1576 void *p = data;
1577 int nr_pages = 0;
1578
1579
1580
1581
1582 if (end < start)
1583 return ERR_PTR(-EINVAL);
1584
1585 nr_pages = end - start;
1586 bio = bio_kmalloc(gfp_mask, nr_pages);
1587 if (!bio)
1588 return ERR_PTR(-ENOMEM);
1589
1590 while (len) {
1591 struct page *page;
1592 unsigned int bytes = PAGE_SIZE;
1593
1594 if (bytes > len)
1595 bytes = len;
1596
1597 page = alloc_page(q->bounce_gfp | gfp_mask);
1598 if (!page)
1599 goto cleanup;
1600
1601 if (!reading)
1602 memcpy(page_address(page), p, bytes);
1603
1604 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
1605 break;
1606
1607 len -= bytes;
1608 p += bytes;
1609 }
1610
1611 if (reading) {
1612 bio->bi_end_io = bio_copy_kern_endio_read;
1613 bio->bi_private = data;
1614 } else {
1615 bio->bi_end_io = bio_copy_kern_endio;
1616 }
1617
1618 return bio;
1619
1620cleanup:
1621 bio_free_pages(bio);
1622 bio_put(bio);
1623 return ERR_PTR(-ENOMEM);
1624}
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655void bio_set_pages_dirty(struct bio *bio)
1656{
1657 struct bio_vec *bvec;
1658 int i;
1659
1660 bio_for_each_segment_all(bvec, bio, i) {
1661 struct page *page = bvec->bv_page;
1662
1663 if (page && !PageCompound(page))
1664 set_page_dirty_lock(page);
1665 }
1666}
1667
1668static void bio_release_pages(struct bio *bio)
1669{
1670 struct bio_vec *bvec;
1671 int i;
1672
1673 bio_for_each_segment_all(bvec, bio, i) {
1674 struct page *page = bvec->bv_page;
1675
1676 if (page)
1677 put_page(page);
1678 }
1679}
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692static void bio_dirty_fn(struct work_struct *work);
1693
1694static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1695static DEFINE_SPINLOCK(bio_dirty_lock);
1696static struct bio *bio_dirty_list;
1697
1698
1699
1700
1701static void bio_dirty_fn(struct work_struct *work)
1702{
1703 unsigned long flags;
1704 struct bio *bio;
1705
1706 spin_lock_irqsave(&bio_dirty_lock, flags);
1707 bio = bio_dirty_list;
1708 bio_dirty_list = NULL;
1709 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1710
1711 while (bio) {
1712 struct bio *next = bio->bi_private;
1713
1714 bio_set_pages_dirty(bio);
1715 bio_release_pages(bio);
1716 bio_put(bio);
1717 bio = next;
1718 }
1719}
1720
1721void bio_check_pages_dirty(struct bio *bio)
1722{
1723 struct bio_vec *bvec;
1724 int nr_clean_pages = 0;
1725 int i;
1726
1727 bio_for_each_segment_all(bvec, bio, i) {
1728 struct page *page = bvec->bv_page;
1729
1730 if (PageDirty(page) || PageCompound(page)) {
1731 put_page(page);
1732 bvec->bv_page = NULL;
1733 } else {
1734 nr_clean_pages++;
1735 }
1736 }
1737
1738 if (nr_clean_pages) {
1739 unsigned long flags;
1740
1741 spin_lock_irqsave(&bio_dirty_lock, flags);
1742 bio->bi_private = bio_dirty_list;
1743 bio_dirty_list = bio;
1744 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1745 schedule_work(&bio_dirty_work);
1746 } else {
1747 bio_put(bio);
1748 }
1749}
1750
1751void generic_start_io_acct(struct request_queue *q, int rw,
1752 unsigned long sectors, struct hd_struct *part)
1753{
1754 int cpu = part_stat_lock();
1755
1756 part_round_stats(q, cpu, part);
1757 part_stat_inc(cpu, part, ios[rw]);
1758 part_stat_add(cpu, part, sectors[rw], sectors);
1759 part_inc_in_flight(q, part, rw);
1760
1761 part_stat_unlock();
1762}
1763EXPORT_SYMBOL(generic_start_io_acct);
1764
1765void generic_end_io_acct(struct request_queue *q, int rw,
1766 struct hd_struct *part, unsigned long start_time)
1767{
1768 unsigned long duration = jiffies - start_time;
1769 int cpu = part_stat_lock();
1770
1771 part_stat_add(cpu, part, ticks[rw], duration);
1772 part_round_stats(q, cpu, part);
1773 part_dec_in_flight(q, part, rw);
1774
1775 part_stat_unlock();
1776}
1777EXPORT_SYMBOL(generic_end_io_acct);
1778
1779#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1780void bio_flush_dcache_pages(struct bio *bi)
1781{
1782 struct bio_vec bvec;
1783 struct bvec_iter iter;
1784
1785 bio_for_each_segment(bvec, bi, iter)
1786 flush_dcache_page(bvec.bv_page);
1787}
1788EXPORT_SYMBOL(bio_flush_dcache_pages);
1789#endif
1790
1791static inline bool bio_remaining_done(struct bio *bio)
1792{
1793
1794
1795
1796
1797 if (!bio_flagged(bio, BIO_CHAIN))
1798 return true;
1799
1800 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1801
1802 if (atomic_dec_and_test(&bio->__bi_remaining)) {
1803 bio_clear_flag(bio, BIO_CHAIN);
1804 return true;
1805 }
1806
1807 return false;
1808}
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824void bio_endio(struct bio *bio)
1825{
1826again:
1827 if (!bio_remaining_done(bio))
1828 return;
1829 if (!bio_integrity_endio(bio))
1830 return;
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840 if (bio->bi_end_io == bio_chain_endio) {
1841 bio = __bio_chain_endio(bio);
1842 goto again;
1843 }
1844
1845 if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
1846 trace_block_bio_complete(bio->bi_disk->queue, bio,
1847 blk_status_to_errno(bio->bi_status));
1848 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1849 }
1850
1851 blk_throtl_bio_endio(bio);
1852
1853 bio_uninit(bio);
1854 if (bio->bi_end_io)
1855 bio->bi_end_io(bio);
1856}
1857EXPORT_SYMBOL(bio_endio);
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873struct bio *bio_split(struct bio *bio, int sectors,
1874 gfp_t gfp, struct bio_set *bs)
1875{
1876 struct bio *split = NULL;
1877
1878 BUG_ON(sectors <= 0);
1879 BUG_ON(sectors >= bio_sectors(bio));
1880
1881 split = bio_clone_fast(bio, gfp, bs);
1882 if (!split)
1883 return NULL;
1884
1885 split->bi_iter.bi_size = sectors << 9;
1886
1887 if (bio_integrity(split))
1888 bio_integrity_trim(split);
1889
1890 bio_advance(bio, split->bi_iter.bi_size);
1891
1892 if (bio_flagged(bio, BIO_TRACE_COMPLETION))
1893 bio_set_flag(bio, BIO_TRACE_COMPLETION);
1894
1895 return split;
1896}
1897EXPORT_SYMBOL(bio_split);
1898
1899
1900
1901
1902
1903
1904
1905void bio_trim(struct bio *bio, int offset, int size)
1906{
1907
1908
1909
1910
1911 size <<= 9;
1912 if (offset == 0 && size == bio->bi_iter.bi_size)
1913 return;
1914
1915 bio_clear_flag(bio, BIO_SEG_VALID);
1916
1917 bio_advance(bio, offset << 9);
1918
1919 bio->bi_iter.bi_size = size;
1920
1921 if (bio_integrity(bio))
1922 bio_integrity_trim(bio);
1923
1924}
1925EXPORT_SYMBOL_GPL(bio_trim);
1926
1927
1928
1929
1930
1931mempool_t *biovec_create_pool(int pool_entries)
1932{
1933 struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX;
1934
1935 return mempool_create_slab_pool(pool_entries, bp->slab);
1936}
1937
1938void bioset_free(struct bio_set *bs)
1939{
1940 if (bs->rescue_workqueue)
1941 destroy_workqueue(bs->rescue_workqueue);
1942
1943 if (bs->bio_pool)
1944 mempool_destroy(bs->bio_pool);
1945
1946 if (bs->bvec_pool)
1947 mempool_destroy(bs->bvec_pool);
1948
1949 bioset_integrity_free(bs);
1950 bio_put_slab(bs);
1951
1952 kfree(bs);
1953}
1954EXPORT_SYMBOL(bioset_free);
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976struct bio_set *bioset_create(unsigned int pool_size,
1977 unsigned int front_pad,
1978 int flags)
1979{
1980 unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1981 struct bio_set *bs;
1982
1983 bs = kzalloc(sizeof(*bs), GFP_KERNEL);
1984 if (!bs)
1985 return NULL;
1986
1987 bs->front_pad = front_pad;
1988
1989 spin_lock_init(&bs->rescue_lock);
1990 bio_list_init(&bs->rescue_list);
1991 INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1992
1993 bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
1994 if (!bs->bio_slab) {
1995 kfree(bs);
1996 return NULL;
1997 }
1998
1999 bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab);
2000 if (!bs->bio_pool)
2001 goto bad;
2002
2003 if (flags & BIOSET_NEED_BVECS) {
2004 bs->bvec_pool = biovec_create_pool(pool_size);
2005 if (!bs->bvec_pool)
2006 goto bad;
2007 }
2008
2009 if (!(flags & BIOSET_NEED_RESCUER))
2010 return bs;
2011
2012 bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
2013 if (!bs->rescue_workqueue)
2014 goto bad;
2015
2016 return bs;
2017bad:
2018 bioset_free(bs);
2019 return NULL;
2020}
2021EXPORT_SYMBOL(bioset_create);
2022
2023#ifdef CONFIG_BLK_CGROUP
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
2038{
2039 if (unlikely(bio->bi_css))
2040 return -EBUSY;
2041 css_get(blkcg_css);
2042 bio->bi_css = blkcg_css;
2043 return 0;
2044}
2045EXPORT_SYMBOL_GPL(bio_associate_blkcg);
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060int bio_associate_current(struct bio *bio)
2061{
2062 struct io_context *ioc;
2063
2064 if (bio->bi_css)
2065 return -EBUSY;
2066
2067 ioc = current->io_context;
2068 if (!ioc)
2069 return -ENOENT;
2070
2071 get_io_context_active(ioc);
2072 bio->bi_ioc = ioc;
2073 bio->bi_css = task_get_css(current, io_cgrp_id);
2074 return 0;
2075}
2076EXPORT_SYMBOL_GPL(bio_associate_current);
2077
2078
2079
2080
2081
2082void bio_disassociate_task(struct bio *bio)
2083{
2084 if (bio->bi_ioc) {
2085 put_io_context(bio->bi_ioc);
2086 bio->bi_ioc = NULL;
2087 }
2088 if (bio->bi_css) {
2089 css_put(bio->bi_css);
2090 bio->bi_css = NULL;
2091 }
2092}
2093
2094
2095
2096
2097
2098
2099void bio_clone_blkcg_association(struct bio *dst, struct bio *src)
2100{
2101 if (src->bi_css)
2102 WARN_ON(bio_associate_blkcg(dst, src->bi_css));
2103}
2104EXPORT_SYMBOL_GPL(bio_clone_blkcg_association);
2105#endif
2106
2107static void __init biovec_init_slabs(void)
2108{
2109 int i;
2110
2111 for (i = 0; i < BVEC_POOL_NR; i++) {
2112 int size;
2113 struct biovec_slab *bvs = bvec_slabs + i;
2114
2115 if (bvs->nr_vecs <= BIO_INLINE_VECS) {
2116 bvs->slab = NULL;
2117 continue;
2118 }
2119
2120 size = bvs->nr_vecs * sizeof(struct bio_vec);
2121 bvs->slab = kmem_cache_create(bvs->name, size, 0,
2122 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2123 }
2124}
2125
2126static int __init init_bio(void)
2127{
2128 bio_slab_max = 2;
2129 bio_slab_nr = 0;
2130 bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL);
2131 if (!bio_slabs)
2132 panic("bio: can't allocate bios\n");
2133
2134 bio_integrity_init();
2135 biovec_init_slabs();
2136
2137 fs_bio_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
2138 if (!fs_bio_set)
2139 panic("bio: can't allocate bios\n");
2140
2141 if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE))
2142 panic("bio: can't create integrity pool\n");
2143
2144 return 0;
2145}
2146subsys_initcall(init_bio);
2147