1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/bio.h>
21#include <linux/blkdev.h>
22#include <linux/uio.h>
23#include <linux/iocontext.h>
24#include <linux/slab.h>
25#include <linux/init.h>
26#include <linux/kernel.h>
27#include <linux/export.h>
28#include <linux/mempool.h>
29#include <linux/workqueue.h>
30#include <linux/cgroup.h>
31
32#include <trace/events/block.h>
33
34
35
36
37
38#define BIO_INLINE_VECS 4
39
40
41
42
43
44
45#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
46static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = {
47 BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
48};
49#undef BV
50
51
52
53
54
55struct bio_set *fs_bio_set;
56EXPORT_SYMBOL(fs_bio_set);
57
58
59
60
61struct bio_slab {
62 struct kmem_cache *slab;
63 unsigned int slab_ref;
64 unsigned int slab_size;
65 char name[8];
66};
67static DEFINE_MUTEX(bio_slab_lock);
68static struct bio_slab *bio_slabs;
69static unsigned int bio_slab_nr, bio_slab_max;
70
71static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
72{
73 unsigned int sz = sizeof(struct bio) + extra_size;
74 struct kmem_cache *slab = NULL;
75 struct bio_slab *bslab, *new_bio_slabs;
76 unsigned int new_bio_slab_max;
77 unsigned int i, entry = -1;
78
79 mutex_lock(&bio_slab_lock);
80
81 i = 0;
82 while (i < bio_slab_nr) {
83 bslab = &bio_slabs[i];
84
85 if (!bslab->slab && entry == -1)
86 entry = i;
87 else if (bslab->slab_size == sz) {
88 slab = bslab->slab;
89 bslab->slab_ref++;
90 break;
91 }
92 i++;
93 }
94
95 if (slab)
96 goto out_unlock;
97
98 if (bio_slab_nr == bio_slab_max && entry == -1) {
99 new_bio_slab_max = bio_slab_max << 1;
100 new_bio_slabs = krealloc(bio_slabs,
101 new_bio_slab_max * sizeof(struct bio_slab),
102 GFP_KERNEL);
103 if (!new_bio_slabs)
104 goto out_unlock;
105 bio_slab_max = new_bio_slab_max;
106 bio_slabs = new_bio_slabs;
107 }
108 if (entry == -1)
109 entry = bio_slab_nr++;
110
111 bslab = &bio_slabs[entry];
112
113 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
114 slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN,
115 SLAB_HWCACHE_ALIGN, NULL);
116 if (!slab)
117 goto out_unlock;
118
119 bslab->slab = slab;
120 bslab->slab_ref = 1;
121 bslab->slab_size = sz;
122out_unlock:
123 mutex_unlock(&bio_slab_lock);
124 return slab;
125}
126
127static void bio_put_slab(struct bio_set *bs)
128{
129 struct bio_slab *bslab = NULL;
130 unsigned int i;
131
132 mutex_lock(&bio_slab_lock);
133
134 for (i = 0; i < bio_slab_nr; i++) {
135 if (bs->bio_slab == bio_slabs[i].slab) {
136 bslab = &bio_slabs[i];
137 break;
138 }
139 }
140
141 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
142 goto out;
143
144 WARN_ON(!bslab->slab_ref);
145
146 if (--bslab->slab_ref)
147 goto out;
148
149 kmem_cache_destroy(bslab->slab);
150 bslab->slab = NULL;
151
152out:
153 mutex_unlock(&bio_slab_lock);
154}
155
156unsigned int bvec_nr_vecs(unsigned short idx)
157{
158 return bvec_slabs[idx].nr_vecs;
159}
160
161void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
162{
163 if (!idx)
164 return;
165 idx--;
166
167 BIO_BUG_ON(idx >= BVEC_POOL_NR);
168
169 if (idx == BVEC_POOL_MAX) {
170 mempool_free(bv, pool);
171 } else {
172 struct biovec_slab *bvs = bvec_slabs + idx;
173
174 kmem_cache_free(bvs->slab, bv);
175 }
176}
177
178struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
179 mempool_t *pool)
180{
181 struct bio_vec *bvl;
182
183
184
185
186 switch (nr) {
187 case 1:
188 *idx = 0;
189 break;
190 case 2 ... 4:
191 *idx = 1;
192 break;
193 case 5 ... 16:
194 *idx = 2;
195 break;
196 case 17 ... 64:
197 *idx = 3;
198 break;
199 case 65 ... 128:
200 *idx = 4;
201 break;
202 case 129 ... BIO_MAX_PAGES:
203 *idx = 5;
204 break;
205 default:
206 return NULL;
207 }
208
209
210
211
212
213 if (*idx == BVEC_POOL_MAX) {
214fallback:
215 bvl = mempool_alloc(pool, gfp_mask);
216 } else {
217 struct biovec_slab *bvs = bvec_slabs + *idx;
218 gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO);
219
220
221
222
223
224
225 __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
226
227
228
229
230
231 bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
232 if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) {
233 *idx = BVEC_POOL_MAX;
234 goto fallback;
235 }
236 }
237
238 (*idx)++;
239 return bvl;
240}
241
242static void __bio_free(struct bio *bio)
243{
244 bio_disassociate_task(bio);
245
246 if (bio_integrity(bio))
247 bio_integrity_free(bio);
248}
249
250static void bio_free(struct bio *bio)
251{
252 struct bio_set *bs = bio->bi_pool;
253 void *p;
254
255 __bio_free(bio);
256
257 if (bs) {
258 bvec_free(bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
259
260
261
262
263 p = bio;
264 p -= bs->front_pad;
265
266 mempool_free(p, bs->bio_pool);
267 } else {
268
269 kfree(bio);
270 }
271}
272
273void bio_init(struct bio *bio, struct bio_vec *table,
274 unsigned short max_vecs)
275{
276 memset(bio, 0, sizeof(*bio));
277 atomic_set(&bio->__bi_remaining, 1);
278 atomic_set(&bio->__bi_cnt, 1);
279
280 bio->bi_io_vec = table;
281 bio->bi_max_vecs = max_vecs;
282}
283EXPORT_SYMBOL(bio_init);
284
285
286
287
288
289
290
291
292
293
294
295void bio_reset(struct bio *bio)
296{
297 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
298
299 __bio_free(bio);
300
301 memset(bio, 0, BIO_RESET_BYTES);
302 bio->bi_flags = flags;
303 atomic_set(&bio->__bi_remaining, 1);
304}
305EXPORT_SYMBOL(bio_reset);
306
307static struct bio *__bio_chain_endio(struct bio *bio)
308{
309 struct bio *parent = bio->bi_private;
310
311 if (!parent->bi_error)
312 parent->bi_error = bio->bi_error;
313 bio_put(bio);
314 return parent;
315}
316
317static void bio_chain_endio(struct bio *bio)
318{
319 bio_endio(__bio_chain_endio(bio));
320}
321
322
323
324
325
326
327
328
329
330
331
332
333void bio_chain(struct bio *bio, struct bio *parent)
334{
335 BUG_ON(bio->bi_private || bio->bi_end_io);
336
337 bio->bi_private = parent;
338 bio->bi_end_io = bio_chain_endio;
339 bio_inc_remaining(parent);
340}
341EXPORT_SYMBOL(bio_chain);
342
343static void bio_alloc_rescue(struct work_struct *work)
344{
345 struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
346 struct bio *bio;
347
348 while (1) {
349 spin_lock(&bs->rescue_lock);
350 bio = bio_list_pop(&bs->rescue_list);
351 spin_unlock(&bs->rescue_lock);
352
353 if (!bio)
354 break;
355
356 generic_make_request(bio);
357 }
358}
359
360static void punt_bios_to_rescuer(struct bio_set *bs)
361{
362 struct bio_list punt, nopunt;
363 struct bio *bio;
364
365
366
367
368
369
370
371
372
373
374
375
376 bio_list_init(&punt);
377 bio_list_init(&nopunt);
378
379 while ((bio = bio_list_pop(current->bio_list)))
380 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
381
382 *current->bio_list = nopunt;
383
384 spin_lock(&bs->rescue_lock);
385 bio_list_merge(&bs->rescue_list, &punt);
386 spin_unlock(&bs->rescue_lock);
387
388 queue_work(bs->rescue_workqueue, &bs->rescue_work);
389}
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
427{
428 gfp_t saved_gfp = gfp_mask;
429 unsigned front_pad;
430 unsigned inline_vecs;
431 struct bio_vec *bvl = NULL;
432 struct bio *bio;
433 void *p;
434
435 if (!bs) {
436 if (nr_iovecs > UIO_MAXIOV)
437 return NULL;
438
439 p = kmalloc(sizeof(struct bio) +
440 nr_iovecs * sizeof(struct bio_vec),
441 gfp_mask);
442 front_pad = 0;
443 inline_vecs = nr_iovecs;
444 } else {
445
446 if (WARN_ON_ONCE(!bs->bvec_pool && nr_iovecs > 0))
447 return NULL;
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469 if (current->bio_list && !bio_list_empty(current->bio_list))
470 gfp_mask &= ~__GFP_DIRECT_RECLAIM;
471
472 p = mempool_alloc(bs->bio_pool, gfp_mask);
473 if (!p && gfp_mask != saved_gfp) {
474 punt_bios_to_rescuer(bs);
475 gfp_mask = saved_gfp;
476 p = mempool_alloc(bs->bio_pool, gfp_mask);
477 }
478
479 front_pad = bs->front_pad;
480 inline_vecs = BIO_INLINE_VECS;
481 }
482
483 if (unlikely(!p))
484 return NULL;
485
486 bio = p + front_pad;
487 bio_init(bio, NULL, 0);
488
489 if (nr_iovecs > inline_vecs) {
490 unsigned long idx = 0;
491
492 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
493 if (!bvl && gfp_mask != saved_gfp) {
494 punt_bios_to_rescuer(bs);
495 gfp_mask = saved_gfp;
496 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
497 }
498
499 if (unlikely(!bvl))
500 goto err_free;
501
502 bio->bi_flags |= idx << BVEC_POOL_OFFSET;
503 } else if (nr_iovecs) {
504 bvl = bio->bi_inline_vecs;
505 }
506
507 bio->bi_pool = bs;
508 bio->bi_max_vecs = nr_iovecs;
509 bio->bi_io_vec = bvl;
510 return bio;
511
512err_free:
513 mempool_free(p, bs->bio_pool);
514 return NULL;
515}
516EXPORT_SYMBOL(bio_alloc_bioset);
517
518void zero_fill_bio(struct bio *bio)
519{
520 unsigned long flags;
521 struct bio_vec bv;
522 struct bvec_iter iter;
523
524 bio_for_each_segment(bv, bio, iter) {
525 char *data = bvec_kmap_irq(&bv, &flags);
526 memset(data, 0, bv.bv_len);
527 flush_dcache_page(bv.bv_page);
528 bvec_kunmap_irq(data, &flags);
529 }
530}
531EXPORT_SYMBOL(zero_fill_bio);
532
533
534
535
536
537
538
539
540
541void bio_put(struct bio *bio)
542{
543 if (!bio_flagged(bio, BIO_REFFED))
544 bio_free(bio);
545 else {
546 BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
547
548
549
550
551 if (atomic_dec_and_test(&bio->__bi_cnt))
552 bio_free(bio);
553 }
554}
555EXPORT_SYMBOL(bio_put);
556
557inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
558{
559 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
560 blk_recount_segments(q, bio);
561
562 return bio->bi_phys_segments;
563}
564EXPORT_SYMBOL(bio_phys_segments);
565
566
567
568
569
570
571
572
573
574
575
576
577void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
578{
579 BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
580
581
582
583
584
585 bio->bi_bdev = bio_src->bi_bdev;
586 bio_set_flag(bio, BIO_CLONED);
587 bio->bi_opf = bio_src->bi_opf;
588 bio->bi_iter = bio_src->bi_iter;
589 bio->bi_io_vec = bio_src->bi_io_vec;
590
591 bio_clone_blkcg_association(bio, bio_src);
592}
593EXPORT_SYMBOL(__bio_clone_fast);
594
595
596
597
598
599
600
601
602
603struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
604{
605 struct bio *b;
606
607 b = bio_alloc_bioset(gfp_mask, 0, bs);
608 if (!b)
609 return NULL;
610
611 __bio_clone_fast(b, bio);
612
613 if (bio_integrity(bio)) {
614 int ret;
615
616 ret = bio_integrity_clone(b, bio, gfp_mask);
617
618 if (ret < 0) {
619 bio_put(b);
620 return NULL;
621 }
622 }
623
624 return b;
625}
626EXPORT_SYMBOL(bio_clone_fast);
627
628
629
630
631
632
633
634
635
636
637struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
638 struct bio_set *bs)
639{
640 struct bvec_iter iter;
641 struct bio_vec bv;
642 struct bio *bio;
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666 bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
667 if (!bio)
668 return NULL;
669 bio->bi_bdev = bio_src->bi_bdev;
670 bio->bi_opf = bio_src->bi_opf;
671 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
672 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
673
674 switch (bio_op(bio)) {
675 case REQ_OP_DISCARD:
676 case REQ_OP_SECURE_ERASE:
677 case REQ_OP_WRITE_ZEROES:
678 break;
679 case REQ_OP_WRITE_SAME:
680 bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
681 break;
682 default:
683 bio_for_each_segment(bv, bio_src, iter)
684 bio->bi_io_vec[bio->bi_vcnt++] = bv;
685 break;
686 }
687
688 if (bio_integrity(bio_src)) {
689 int ret;
690
691 ret = bio_integrity_clone(bio, bio_src, gfp_mask);
692 if (ret < 0) {
693 bio_put(bio);
694 return NULL;
695 }
696 }
697
698 bio_clone_blkcg_association(bio, bio_src);
699
700 return bio;
701}
702EXPORT_SYMBOL(bio_clone_bioset);
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
720 *page, unsigned int len, unsigned int offset)
721{
722 int retried_segments = 0;
723 struct bio_vec *bvec;
724
725
726
727
728 if (unlikely(bio_flagged(bio, BIO_CLONED)))
729 return 0;
730
731 if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q))
732 return 0;
733
734
735
736
737
738
739 if (bio->bi_vcnt > 0) {
740 struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
741
742 if (page == prev->bv_page &&
743 offset == prev->bv_offset + prev->bv_len) {
744 prev->bv_len += len;
745 bio->bi_iter.bi_size += len;
746 goto done;
747 }
748
749
750
751
752
753 if (bvec_gap_to_prev(q, prev, offset))
754 return 0;
755 }
756
757 if (bio->bi_vcnt >= bio->bi_max_vecs)
758 return 0;
759
760
761
762
763
764 bvec = &bio->bi_io_vec[bio->bi_vcnt];
765 bvec->bv_page = page;
766 bvec->bv_len = len;
767 bvec->bv_offset = offset;
768 bio->bi_vcnt++;
769 bio->bi_phys_segments++;
770 bio->bi_iter.bi_size += len;
771
772
773
774
775
776
777 while (bio->bi_phys_segments > queue_max_segments(q)) {
778
779 if (retried_segments)
780 goto failed;
781
782 retried_segments = 1;
783 blk_recount_segments(q, bio);
784 }
785
786
787 if (bio->bi_vcnt > 1 && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
788 bio_clear_flag(bio, BIO_SEG_VALID);
789
790 done:
791 return len;
792
793 failed:
794 bvec->bv_page = NULL;
795 bvec->bv_len = 0;
796 bvec->bv_offset = 0;
797 bio->bi_vcnt--;
798 bio->bi_iter.bi_size -= len;
799 blk_recount_segments(q, bio);
800 return 0;
801}
802EXPORT_SYMBOL(bio_add_pc_page);
803
804
805
806
807
808
809
810
811
812
813
814int bio_add_page(struct bio *bio, struct page *page,
815 unsigned int len, unsigned int offset)
816{
817 struct bio_vec *bv;
818
819
820
821
822 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
823 return 0;
824
825
826
827
828
829
830 if (bio->bi_vcnt > 0) {
831 bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
832
833 if (page == bv->bv_page &&
834 offset == bv->bv_offset + bv->bv_len) {
835 bv->bv_len += len;
836 goto done;
837 }
838 }
839
840 if (bio->bi_vcnt >= bio->bi_max_vecs)
841 return 0;
842
843 bv = &bio->bi_io_vec[bio->bi_vcnt];
844 bv->bv_page = page;
845 bv->bv_len = len;
846 bv->bv_offset = offset;
847
848 bio->bi_vcnt++;
849done:
850 bio->bi_iter.bi_size += len;
851 return len;
852}
853EXPORT_SYMBOL(bio_add_page);
854
855
856
857
858
859
860
861
862
863int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
864{
865 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
866 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
867 struct page **pages = (struct page **)bv;
868 size_t offset, diff;
869 ssize_t size;
870
871 size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
872 if (unlikely(size <= 0))
873 return size ? size : -EFAULT;
874 nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
875
876
877
878
879
880
881
882
883
884 bio->bi_iter.bi_size += size;
885 bio->bi_vcnt += nr_pages;
886
887 diff = (nr_pages * PAGE_SIZE - offset) - size;
888 while (nr_pages--) {
889 bv[nr_pages].bv_page = pages[nr_pages];
890 bv[nr_pages].bv_len = PAGE_SIZE;
891 bv[nr_pages].bv_offset = 0;
892 }
893
894 bv[0].bv_offset += offset;
895 bv[0].bv_len -= offset;
896 if (diff)
897 bv[bio->bi_vcnt - 1].bv_len -= diff;
898
899 iov_iter_advance(iter, size);
900 return 0;
901}
902EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
903
904struct submit_bio_ret {
905 struct completion event;
906 int error;
907};
908
909static void submit_bio_wait_endio(struct bio *bio)
910{
911 struct submit_bio_ret *ret = bio->bi_private;
912
913 ret->error = bio->bi_error;
914 complete(&ret->event);
915}
916
917
918
919
920
921
922
923
924int submit_bio_wait(struct bio *bio)
925{
926 struct submit_bio_ret ret;
927
928 init_completion(&ret.event);
929 bio->bi_private = &ret;
930 bio->bi_end_io = submit_bio_wait_endio;
931 bio->bi_opf |= REQ_SYNC;
932 submit_bio(bio);
933 wait_for_completion_io(&ret.event);
934
935 return ret.error;
936}
937EXPORT_SYMBOL(submit_bio_wait);
938
939
940
941
942
943
944
945
946
947
948
949
950void bio_advance(struct bio *bio, unsigned bytes)
951{
952 if (bio_integrity(bio))
953 bio_integrity_advance(bio, bytes);
954
955 bio_advance_iter(bio, &bio->bi_iter, bytes);
956}
957EXPORT_SYMBOL(bio_advance);
958
959
960
961
962
963
964
965
966
967
968
969int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
970{
971 int i;
972 struct bio_vec *bv;
973
974 bio_for_each_segment_all(bv, bio, i) {
975 bv->bv_page = alloc_page(gfp_mask);
976 if (!bv->bv_page) {
977 while (--bv >= bio->bi_io_vec)
978 __free_page(bv->bv_page);
979 return -ENOMEM;
980 }
981 }
982
983 return 0;
984}
985EXPORT_SYMBOL(bio_alloc_pages);
986
987
988
989
990
991
992
993
994
995
996
997
998
999void bio_copy_data(struct bio *dst, struct bio *src)
1000{
1001 struct bvec_iter src_iter, dst_iter;
1002 struct bio_vec src_bv, dst_bv;
1003 void *src_p, *dst_p;
1004 unsigned bytes;
1005
1006 src_iter = src->bi_iter;
1007 dst_iter = dst->bi_iter;
1008
1009 while (1) {
1010 if (!src_iter.bi_size) {
1011 src = src->bi_next;
1012 if (!src)
1013 break;
1014
1015 src_iter = src->bi_iter;
1016 }
1017
1018 if (!dst_iter.bi_size) {
1019 dst = dst->bi_next;
1020 if (!dst)
1021 break;
1022
1023 dst_iter = dst->bi_iter;
1024 }
1025
1026 src_bv = bio_iter_iovec(src, src_iter);
1027 dst_bv = bio_iter_iovec(dst, dst_iter);
1028
1029 bytes = min(src_bv.bv_len, dst_bv.bv_len);
1030
1031 src_p = kmap_atomic(src_bv.bv_page);
1032 dst_p = kmap_atomic(dst_bv.bv_page);
1033
1034 memcpy(dst_p + dst_bv.bv_offset,
1035 src_p + src_bv.bv_offset,
1036 bytes);
1037
1038 kunmap_atomic(dst_p);
1039 kunmap_atomic(src_p);
1040
1041 bio_advance_iter(src, &src_iter, bytes);
1042 bio_advance_iter(dst, &dst_iter, bytes);
1043 }
1044}
1045EXPORT_SYMBOL(bio_copy_data);
1046
1047struct bio_map_data {
1048 int is_our_pages;
1049 struct iov_iter iter;
1050 struct iovec iov[];
1051};
1052
1053static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count,
1054 gfp_t gfp_mask)
1055{
1056 if (iov_count > UIO_MAXIOV)
1057 return NULL;
1058
1059 return kmalloc(sizeof(struct bio_map_data) +
1060 sizeof(struct iovec) * iov_count, gfp_mask);
1061}
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071static int bio_copy_from_iter(struct bio *bio, struct iov_iter iter)
1072{
1073 int i;
1074 struct bio_vec *bvec;
1075
1076 bio_for_each_segment_all(bvec, bio, i) {
1077 ssize_t ret;
1078
1079 ret = copy_page_from_iter(bvec->bv_page,
1080 bvec->bv_offset,
1081 bvec->bv_len,
1082 &iter);
1083
1084 if (!iov_iter_count(&iter))
1085 break;
1086
1087 if (ret < bvec->bv_len)
1088 return -EFAULT;
1089 }
1090
1091 return 0;
1092}
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
1103{
1104 int i;
1105 struct bio_vec *bvec;
1106
1107 bio_for_each_segment_all(bvec, bio, i) {
1108 ssize_t ret;
1109
1110 ret = copy_page_to_iter(bvec->bv_page,
1111 bvec->bv_offset,
1112 bvec->bv_len,
1113 &iter);
1114
1115 if (!iov_iter_count(&iter))
1116 break;
1117
1118 if (ret < bvec->bv_len)
1119 return -EFAULT;
1120 }
1121
1122 return 0;
1123}
1124
1125void bio_free_pages(struct bio *bio)
1126{
1127 struct bio_vec *bvec;
1128 int i;
1129
1130 bio_for_each_segment_all(bvec, bio, i)
1131 __free_page(bvec->bv_page);
1132}
1133EXPORT_SYMBOL(bio_free_pages);
1134
1135
1136
1137
1138
1139
1140
1141
1142int bio_uncopy_user(struct bio *bio)
1143{
1144 struct bio_map_data *bmd = bio->bi_private;
1145 int ret = 0;
1146
1147 if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1148
1149
1150
1151
1152
1153 if (!current->mm)
1154 ret = -EINTR;
1155 else if (bio_data_dir(bio) == READ)
1156 ret = bio_copy_to_iter(bio, bmd->iter);
1157 if (bmd->is_our_pages)
1158 bio_free_pages(bio);
1159 }
1160 kfree(bmd);
1161 bio_put(bio);
1162 return ret;
1163}
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176struct bio *bio_copy_user_iov(struct request_queue *q,
1177 struct rq_map_data *map_data,
1178 const struct iov_iter *iter,
1179 gfp_t gfp_mask)
1180{
1181 struct bio_map_data *bmd;
1182 struct page *page;
1183 struct bio *bio;
1184 int i, ret;
1185 int nr_pages = 0;
1186 unsigned int len = iter->count;
1187 unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
1188
1189 for (i = 0; i < iter->nr_segs; i++) {
1190 unsigned long uaddr;
1191 unsigned long end;
1192 unsigned long start;
1193
1194 uaddr = (unsigned long) iter->iov[i].iov_base;
1195 end = (uaddr + iter->iov[i].iov_len + PAGE_SIZE - 1)
1196 >> PAGE_SHIFT;
1197 start = uaddr >> PAGE_SHIFT;
1198
1199
1200
1201
1202 if (end < start)
1203 return ERR_PTR(-EINVAL);
1204
1205 nr_pages += end - start;
1206 }
1207
1208 if (offset)
1209 nr_pages++;
1210
1211 bmd = bio_alloc_map_data(iter->nr_segs, gfp_mask);
1212 if (!bmd)
1213 return ERR_PTR(-ENOMEM);
1214
1215
1216
1217
1218
1219
1220 bmd->is_our_pages = map_data ? 0 : 1;
1221 memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs);
1222 iov_iter_init(&bmd->iter, iter->type, bmd->iov,
1223 iter->nr_segs, iter->count);
1224
1225 ret = -ENOMEM;
1226 bio = bio_kmalloc(gfp_mask, nr_pages);
1227 if (!bio)
1228 goto out_bmd;
1229
1230 if (iter->type & WRITE)
1231 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1232
1233 ret = 0;
1234
1235 if (map_data) {
1236 nr_pages = 1 << map_data->page_order;
1237 i = map_data->offset / PAGE_SIZE;
1238 }
1239 while (len) {
1240 unsigned int bytes = PAGE_SIZE;
1241
1242 bytes -= offset;
1243
1244 if (bytes > len)
1245 bytes = len;
1246
1247 if (map_data) {
1248 if (i == map_data->nr_entries * nr_pages) {
1249 ret = -ENOMEM;
1250 break;
1251 }
1252
1253 page = map_data->pages[i / nr_pages];
1254 page += (i % nr_pages);
1255
1256 i++;
1257 } else {
1258 page = alloc_page(q->bounce_gfp | gfp_mask);
1259 if (!page) {
1260 ret = -ENOMEM;
1261 break;
1262 }
1263 }
1264
1265 if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
1266 break;
1267
1268 len -= bytes;
1269 offset = 0;
1270 }
1271
1272 if (ret)
1273 goto cleanup;
1274
1275
1276
1277
1278 if (((iter->type & WRITE) && (!map_data || !map_data->null_mapped)) ||
1279 (map_data && map_data->from_user)) {
1280 ret = bio_copy_from_iter(bio, *iter);
1281 if (ret)
1282 goto cleanup;
1283 }
1284
1285 bio->bi_private = bmd;
1286 return bio;
1287cleanup:
1288 if (!map_data)
1289 bio_free_pages(bio);
1290 bio_put(bio);
1291out_bmd:
1292 kfree(bmd);
1293 return ERR_PTR(ret);
1294}
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305struct bio *bio_map_user_iov(struct request_queue *q,
1306 const struct iov_iter *iter,
1307 gfp_t gfp_mask)
1308{
1309 int j;
1310 int nr_pages = 0;
1311 struct page **pages;
1312 struct bio *bio;
1313 int cur_page = 0;
1314 int ret, offset;
1315 struct iov_iter i;
1316 struct iovec iov;
1317
1318 iov_for_each(iov, i, *iter) {
1319 unsigned long uaddr = (unsigned long) iov.iov_base;
1320 unsigned long len = iov.iov_len;
1321 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1322 unsigned long start = uaddr >> PAGE_SHIFT;
1323
1324
1325
1326
1327 if (end < start)
1328 return ERR_PTR(-EINVAL);
1329
1330 nr_pages += end - start;
1331
1332
1333
1334 if (uaddr & queue_dma_alignment(q))
1335 return ERR_PTR(-EINVAL);
1336 }
1337
1338 if (!nr_pages)
1339 return ERR_PTR(-EINVAL);
1340
1341 bio = bio_kmalloc(gfp_mask, nr_pages);
1342 if (!bio)
1343 return ERR_PTR(-ENOMEM);
1344
1345 ret = -ENOMEM;
1346 pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask);
1347 if (!pages)
1348 goto out;
1349
1350 iov_for_each(iov, i, *iter) {
1351 unsigned long uaddr = (unsigned long) iov.iov_base;
1352 unsigned long len = iov.iov_len;
1353 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1354 unsigned long start = uaddr >> PAGE_SHIFT;
1355 const int local_nr_pages = end - start;
1356 const int page_limit = cur_page + local_nr_pages;
1357
1358 ret = get_user_pages_fast(uaddr, local_nr_pages,
1359 (iter->type & WRITE) != WRITE,
1360 &pages[cur_page]);
1361 if (ret < local_nr_pages) {
1362 ret = -EFAULT;
1363 goto out_unmap;
1364 }
1365
1366 offset = offset_in_page(uaddr);
1367 for (j = cur_page; j < page_limit; j++) {
1368 unsigned int bytes = PAGE_SIZE - offset;
1369
1370 if (len <= 0)
1371 break;
1372
1373 if (bytes > len)
1374 bytes = len;
1375
1376
1377
1378
1379 if (bio_add_pc_page(q, bio, pages[j], bytes, offset) <
1380 bytes)
1381 break;
1382
1383 len -= bytes;
1384 offset = 0;
1385 }
1386
1387 cur_page = j;
1388
1389
1390
1391 while (j < page_limit)
1392 put_page(pages[j++]);
1393 }
1394
1395 kfree(pages);
1396
1397
1398
1399
1400 if (iter->type & WRITE)
1401 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1402
1403 bio_set_flag(bio, BIO_USER_MAPPED);
1404
1405
1406
1407
1408
1409
1410
1411 bio_get(bio);
1412 return bio;
1413
1414 out_unmap:
1415 for (j = 0; j < nr_pages; j++) {
1416 if (!pages[j])
1417 break;
1418 put_page(pages[j]);
1419 }
1420 out:
1421 kfree(pages);
1422 bio_put(bio);
1423 return ERR_PTR(ret);
1424}
1425
1426static void __bio_unmap_user(struct bio *bio)
1427{
1428 struct bio_vec *bvec;
1429 int i;
1430
1431
1432
1433
1434 bio_for_each_segment_all(bvec, bio, i) {
1435 if (bio_data_dir(bio) == READ)
1436 set_page_dirty_lock(bvec->bv_page);
1437
1438 put_page(bvec->bv_page);
1439 }
1440
1441 bio_put(bio);
1442}
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453void bio_unmap_user(struct bio *bio)
1454{
1455 __bio_unmap_user(bio);
1456 bio_put(bio);
1457}
1458
1459static void bio_map_kern_endio(struct bio *bio)
1460{
1461 bio_put(bio);
1462}
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
1475 gfp_t gfp_mask)
1476{
1477 unsigned long kaddr = (unsigned long)data;
1478 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1479 unsigned long start = kaddr >> PAGE_SHIFT;
1480 const int nr_pages = end - start;
1481 int offset, i;
1482 struct bio *bio;
1483
1484 bio = bio_kmalloc(gfp_mask, nr_pages);
1485 if (!bio)
1486 return ERR_PTR(-ENOMEM);
1487
1488 offset = offset_in_page(kaddr);
1489 for (i = 0; i < nr_pages; i++) {
1490 unsigned int bytes = PAGE_SIZE - offset;
1491
1492 if (len <= 0)
1493 break;
1494
1495 if (bytes > len)
1496 bytes = len;
1497
1498 if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
1499 offset) < bytes) {
1500
1501 bio_put(bio);
1502 return ERR_PTR(-EINVAL);
1503 }
1504
1505 data += bytes;
1506 len -= bytes;
1507 offset = 0;
1508 }
1509
1510 bio->bi_end_io = bio_map_kern_endio;
1511 return bio;
1512}
1513EXPORT_SYMBOL(bio_map_kern);
1514
1515static void bio_copy_kern_endio(struct bio *bio)
1516{
1517 bio_free_pages(bio);
1518 bio_put(bio);
1519}
1520
1521static void bio_copy_kern_endio_read(struct bio *bio)
1522{
1523 char *p = bio->bi_private;
1524 struct bio_vec *bvec;
1525 int i;
1526
1527 bio_for_each_segment_all(bvec, bio, i) {
1528 memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
1529 p += bvec->bv_len;
1530 }
1531
1532 bio_copy_kern_endio(bio);
1533}
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
1547 gfp_t gfp_mask, int reading)
1548{
1549 unsigned long kaddr = (unsigned long)data;
1550 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1551 unsigned long start = kaddr >> PAGE_SHIFT;
1552 struct bio *bio;
1553 void *p = data;
1554 int nr_pages = 0;
1555
1556
1557
1558
1559 if (end < start)
1560 return ERR_PTR(-EINVAL);
1561
1562 nr_pages = end - start;
1563 bio = bio_kmalloc(gfp_mask, nr_pages);
1564 if (!bio)
1565 return ERR_PTR(-ENOMEM);
1566
1567 while (len) {
1568 struct page *page;
1569 unsigned int bytes = PAGE_SIZE;
1570
1571 if (bytes > len)
1572 bytes = len;
1573
1574 page = alloc_page(q->bounce_gfp | gfp_mask);
1575 if (!page)
1576 goto cleanup;
1577
1578 if (!reading)
1579 memcpy(page_address(page), p, bytes);
1580
1581 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
1582 break;
1583
1584 len -= bytes;
1585 p += bytes;
1586 }
1587
1588 if (reading) {
1589 bio->bi_end_io = bio_copy_kern_endio_read;
1590 bio->bi_private = data;
1591 } else {
1592 bio->bi_end_io = bio_copy_kern_endio;
1593 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1594 }
1595
1596 return bio;
1597
1598cleanup:
1599 bio_free_pages(bio);
1600 bio_put(bio);
1601 return ERR_PTR(-ENOMEM);
1602}
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633void bio_set_pages_dirty(struct bio *bio)
1634{
1635 struct bio_vec *bvec;
1636 int i;
1637
1638 bio_for_each_segment_all(bvec, bio, i) {
1639 struct page *page = bvec->bv_page;
1640
1641 if (page && !PageCompound(page))
1642 set_page_dirty_lock(page);
1643 }
1644}
1645
1646static void bio_release_pages(struct bio *bio)
1647{
1648 struct bio_vec *bvec;
1649 int i;
1650
1651 bio_for_each_segment_all(bvec, bio, i) {
1652 struct page *page = bvec->bv_page;
1653
1654 if (page)
1655 put_page(page);
1656 }
1657}
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670static void bio_dirty_fn(struct work_struct *work);
1671
1672static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1673static DEFINE_SPINLOCK(bio_dirty_lock);
1674static struct bio *bio_dirty_list;
1675
1676
1677
1678
1679static void bio_dirty_fn(struct work_struct *work)
1680{
1681 unsigned long flags;
1682 struct bio *bio;
1683
1684 spin_lock_irqsave(&bio_dirty_lock, flags);
1685 bio = bio_dirty_list;
1686 bio_dirty_list = NULL;
1687 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1688
1689 while (bio) {
1690 struct bio *next = bio->bi_private;
1691
1692 bio_set_pages_dirty(bio);
1693 bio_release_pages(bio);
1694 bio_put(bio);
1695 bio = next;
1696 }
1697}
1698
1699void bio_check_pages_dirty(struct bio *bio)
1700{
1701 struct bio_vec *bvec;
1702 int nr_clean_pages = 0;
1703 int i;
1704
1705 bio_for_each_segment_all(bvec, bio, i) {
1706 struct page *page = bvec->bv_page;
1707
1708 if (PageDirty(page) || PageCompound(page)) {
1709 put_page(page);
1710 bvec->bv_page = NULL;
1711 } else {
1712 nr_clean_pages++;
1713 }
1714 }
1715
1716 if (nr_clean_pages) {
1717 unsigned long flags;
1718
1719 spin_lock_irqsave(&bio_dirty_lock, flags);
1720 bio->bi_private = bio_dirty_list;
1721 bio_dirty_list = bio;
1722 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1723 schedule_work(&bio_dirty_work);
1724 } else {
1725 bio_put(bio);
1726 }
1727}
1728
1729void generic_start_io_acct(int rw, unsigned long sectors,
1730 struct hd_struct *part)
1731{
1732 int cpu = part_stat_lock();
1733
1734 part_round_stats(cpu, part);
1735 part_stat_inc(cpu, part, ios[rw]);
1736 part_stat_add(cpu, part, sectors[rw], sectors);
1737 part_inc_in_flight(part, rw);
1738
1739 part_stat_unlock();
1740}
1741EXPORT_SYMBOL(generic_start_io_acct);
1742
1743void generic_end_io_acct(int rw, struct hd_struct *part,
1744 unsigned long start_time)
1745{
1746 unsigned long duration = jiffies - start_time;
1747 int cpu = part_stat_lock();
1748
1749 part_stat_add(cpu, part, ticks[rw], duration);
1750 part_round_stats(cpu, part);
1751 part_dec_in_flight(part, rw);
1752
1753 part_stat_unlock();
1754}
1755EXPORT_SYMBOL(generic_end_io_acct);
1756
1757#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1758void bio_flush_dcache_pages(struct bio *bi)
1759{
1760 struct bio_vec bvec;
1761 struct bvec_iter iter;
1762
1763 bio_for_each_segment(bvec, bi, iter)
1764 flush_dcache_page(bvec.bv_page);
1765}
1766EXPORT_SYMBOL(bio_flush_dcache_pages);
1767#endif
1768
1769static inline bool bio_remaining_done(struct bio *bio)
1770{
1771
1772
1773
1774
1775 if (!bio_flagged(bio, BIO_CHAIN))
1776 return true;
1777
1778 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1779
1780 if (atomic_dec_and_test(&bio->__bi_remaining)) {
1781 bio_clear_flag(bio, BIO_CHAIN);
1782 return true;
1783 }
1784
1785 return false;
1786}
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797void bio_endio(struct bio *bio)
1798{
1799again:
1800 if (!bio_remaining_done(bio))
1801 return;
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811 if (bio->bi_end_io == bio_chain_endio) {
1812 bio = __bio_chain_endio(bio);
1813 goto again;
1814 }
1815
1816 if (bio->bi_end_io)
1817 bio->bi_end_io(bio);
1818}
1819EXPORT_SYMBOL(bio_endio);
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835struct bio *bio_split(struct bio *bio, int sectors,
1836 gfp_t gfp, struct bio_set *bs)
1837{
1838 struct bio *split = NULL;
1839
1840 BUG_ON(sectors <= 0);
1841 BUG_ON(sectors >= bio_sectors(bio));
1842
1843 split = bio_clone_fast(bio, gfp, bs);
1844 if (!split)
1845 return NULL;
1846
1847 split->bi_iter.bi_size = sectors << 9;
1848
1849 if (bio_integrity(split))
1850 bio_integrity_trim(split, 0, sectors);
1851
1852 bio_advance(bio, split->bi_iter.bi_size);
1853
1854 return split;
1855}
1856EXPORT_SYMBOL(bio_split);
1857
1858
1859
1860
1861
1862
1863
1864void bio_trim(struct bio *bio, int offset, int size)
1865{
1866
1867
1868
1869
1870 size <<= 9;
1871 if (offset == 0 && size == bio->bi_iter.bi_size)
1872 return;
1873
1874 bio_clear_flag(bio, BIO_SEG_VALID);
1875
1876 bio_advance(bio, offset << 9);
1877
1878 bio->bi_iter.bi_size = size;
1879}
1880EXPORT_SYMBOL_GPL(bio_trim);
1881
1882
1883
1884
1885
1886mempool_t *biovec_create_pool(int pool_entries)
1887{
1888 struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX;
1889
1890 return mempool_create_slab_pool(pool_entries, bp->slab);
1891}
1892
1893void bioset_free(struct bio_set *bs)
1894{
1895 if (bs->rescue_workqueue)
1896 destroy_workqueue(bs->rescue_workqueue);
1897
1898 if (bs->bio_pool)
1899 mempool_destroy(bs->bio_pool);
1900
1901 if (bs->bvec_pool)
1902 mempool_destroy(bs->bvec_pool);
1903
1904 bioset_integrity_free(bs);
1905 bio_put_slab(bs);
1906
1907 kfree(bs);
1908}
1909EXPORT_SYMBOL(bioset_free);
1910
1911static struct bio_set *__bioset_create(unsigned int pool_size,
1912 unsigned int front_pad,
1913 bool create_bvec_pool)
1914{
1915 unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1916 struct bio_set *bs;
1917
1918 bs = kzalloc(sizeof(*bs), GFP_KERNEL);
1919 if (!bs)
1920 return NULL;
1921
1922 bs->front_pad = front_pad;
1923
1924 spin_lock_init(&bs->rescue_lock);
1925 bio_list_init(&bs->rescue_list);
1926 INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1927
1928 bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
1929 if (!bs->bio_slab) {
1930 kfree(bs);
1931 return NULL;
1932 }
1933
1934 bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab);
1935 if (!bs->bio_pool)
1936 goto bad;
1937
1938 if (create_bvec_pool) {
1939 bs->bvec_pool = biovec_create_pool(pool_size);
1940 if (!bs->bvec_pool)
1941 goto bad;
1942 }
1943
1944 bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
1945 if (!bs->rescue_workqueue)
1946 goto bad;
1947
1948 return bs;
1949bad:
1950 bioset_free(bs);
1951 return NULL;
1952}
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
1968{
1969 return __bioset_create(pool_size, front_pad, true);
1970}
1971EXPORT_SYMBOL(bioset_create);
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982struct bio_set *bioset_create_nobvec(unsigned int pool_size, unsigned int front_pad)
1983{
1984 return __bioset_create(pool_size, front_pad, false);
1985}
1986EXPORT_SYMBOL(bioset_create_nobvec);
1987
1988#ifdef CONFIG_BLK_CGROUP
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
2003{
2004 if (unlikely(bio->bi_css))
2005 return -EBUSY;
2006 css_get(blkcg_css);
2007 bio->bi_css = blkcg_css;
2008 return 0;
2009}
2010EXPORT_SYMBOL_GPL(bio_associate_blkcg);
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025int bio_associate_current(struct bio *bio)
2026{
2027 struct io_context *ioc;
2028
2029 if (bio->bi_css)
2030 return -EBUSY;
2031
2032 ioc = current->io_context;
2033 if (!ioc)
2034 return -ENOENT;
2035
2036 get_io_context_active(ioc);
2037 bio->bi_ioc = ioc;
2038 bio->bi_css = task_get_css(current, io_cgrp_id);
2039 return 0;
2040}
2041EXPORT_SYMBOL_GPL(bio_associate_current);
2042
2043
2044
2045
2046
2047void bio_disassociate_task(struct bio *bio)
2048{
2049 if (bio->bi_ioc) {
2050 put_io_context(bio->bi_ioc);
2051 bio->bi_ioc = NULL;
2052 }
2053 if (bio->bi_css) {
2054 css_put(bio->bi_css);
2055 bio->bi_css = NULL;
2056 }
2057}
2058
2059
2060
2061
2062
2063
2064void bio_clone_blkcg_association(struct bio *dst, struct bio *src)
2065{
2066 if (src->bi_css)
2067 WARN_ON(bio_associate_blkcg(dst, src->bi_css));
2068}
2069
2070#endif
2071
2072static void __init biovec_init_slabs(void)
2073{
2074 int i;
2075
2076 for (i = 0; i < BVEC_POOL_NR; i++) {
2077 int size;
2078 struct biovec_slab *bvs = bvec_slabs + i;
2079
2080 if (bvs->nr_vecs <= BIO_INLINE_VECS) {
2081 bvs->slab = NULL;
2082 continue;
2083 }
2084
2085 size = bvs->nr_vecs * sizeof(struct bio_vec);
2086 bvs->slab = kmem_cache_create(bvs->name, size, 0,
2087 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2088 }
2089}
2090
2091static int __init init_bio(void)
2092{
2093 bio_slab_max = 2;
2094 bio_slab_nr = 0;
2095 bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL);
2096 if (!bio_slabs)
2097 panic("bio: can't allocate bios\n");
2098
2099 bio_integrity_init();
2100 biovec_init_slabs();
2101
2102 fs_bio_set = bioset_create(BIO_POOL_SIZE, 0);
2103 if (!fs_bio_set)
2104 panic("bio: can't allocate bios\n");
2105
2106 if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE))
2107 panic("bio: can't create integrity pool\n");
2108
2109 return 0;
2110}
2111subsys_initcall(init_bio);
2112