1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/bio.h>
21#include <linux/blkdev.h>
22#include <linux/uio.h>
23#include <linux/iocontext.h>
24#include <linux/slab.h>
25#include <linux/init.h>
26#include <linux/kernel.h>
27#include <linux/export.h>
28#include <linux/mempool.h>
29#include <linux/workqueue.h>
30#include <linux/cgroup.h>
31#include <scsi/sg.h>
32
33#include <trace/events/block.h>
34
35
36
37
38
39#define BIO_INLINE_VECS 4
40
41static mempool_t *bio_split_pool __read_mostly;
42
43
44
45
46
47
48#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
49static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
50 BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
51};
52#undef BV
53
54
55
56
57
58struct bio_set *fs_bio_set;
59EXPORT_SYMBOL(fs_bio_set);
60
61
62
63
64struct bio_slab {
65 struct kmem_cache *slab;
66 unsigned int slab_ref;
67 unsigned int slab_size;
68 char name[8];
69};
70static DEFINE_MUTEX(bio_slab_lock);
71static struct bio_slab *bio_slabs;
72static unsigned int bio_slab_nr, bio_slab_max;
73
74static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
75{
76 unsigned int sz = sizeof(struct bio) + extra_size;
77 struct kmem_cache *slab = NULL;
78 struct bio_slab *bslab, *new_bio_slabs;
79 unsigned int new_bio_slab_max;
80 unsigned int i, entry = -1;
81
82 mutex_lock(&bio_slab_lock);
83
84 i = 0;
85 while (i < bio_slab_nr) {
86 bslab = &bio_slabs[i];
87
88 if (!bslab->slab && entry == -1)
89 entry = i;
90 else if (bslab->slab_size == sz) {
91 slab = bslab->slab;
92 bslab->slab_ref++;
93 break;
94 }
95 i++;
96 }
97
98 if (slab)
99 goto out_unlock;
100
101 if (bio_slab_nr == bio_slab_max && entry == -1) {
102 new_bio_slab_max = bio_slab_max << 1;
103 new_bio_slabs = krealloc(bio_slabs,
104 new_bio_slab_max * sizeof(struct bio_slab),
105 GFP_KERNEL);
106 if (!new_bio_slabs)
107 goto out_unlock;
108 bio_slab_max = new_bio_slab_max;
109 bio_slabs = new_bio_slabs;
110 }
111 if (entry == -1)
112 entry = bio_slab_nr++;
113
114 bslab = &bio_slabs[entry];
115
116 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
117 slab = kmem_cache_create(bslab->name, sz, 0, SLAB_HWCACHE_ALIGN, NULL);
118 if (!slab)
119 goto out_unlock;
120
121 printk(KERN_INFO "bio: create slab <%s> at %d\n", bslab->name, entry);
122 bslab->slab = slab;
123 bslab->slab_ref = 1;
124 bslab->slab_size = sz;
125out_unlock:
126 mutex_unlock(&bio_slab_lock);
127 return slab;
128}
129
130static void bio_put_slab(struct bio_set *bs)
131{
132 struct bio_slab *bslab = NULL;
133 unsigned int i;
134
135 mutex_lock(&bio_slab_lock);
136
137 for (i = 0; i < bio_slab_nr; i++) {
138 if (bs->bio_slab == bio_slabs[i].slab) {
139 bslab = &bio_slabs[i];
140 break;
141 }
142 }
143
144 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
145 goto out;
146
147 WARN_ON(!bslab->slab_ref);
148
149 if (--bslab->slab_ref)
150 goto out;
151
152 kmem_cache_destroy(bslab->slab);
153 bslab->slab = NULL;
154
155out:
156 mutex_unlock(&bio_slab_lock);
157}
158
159unsigned int bvec_nr_vecs(unsigned short idx)
160{
161 return bvec_slabs[idx].nr_vecs;
162}
163
164void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
165{
166 BIO_BUG_ON(idx >= BIOVEC_NR_POOLS);
167
168 if (idx == BIOVEC_MAX_IDX)
169 mempool_free(bv, pool);
170 else {
171 struct biovec_slab *bvs = bvec_slabs + idx;
172
173 kmem_cache_free(bvs->slab, bv);
174 }
175}
176
177struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
178 mempool_t *pool)
179{
180 struct bio_vec *bvl;
181
182
183
184
185 switch (nr) {
186 case 1:
187 *idx = 0;
188 break;
189 case 2 ... 4:
190 *idx = 1;
191 break;
192 case 5 ... 16:
193 *idx = 2;
194 break;
195 case 17 ... 64:
196 *idx = 3;
197 break;
198 case 65 ... 128:
199 *idx = 4;
200 break;
201 case 129 ... BIO_MAX_PAGES:
202 *idx = 5;
203 break;
204 default:
205 return NULL;
206 }
207
208
209
210
211
212 if (*idx == BIOVEC_MAX_IDX) {
213fallback:
214 bvl = mempool_alloc(pool, gfp_mask);
215 } else {
216 struct biovec_slab *bvs = bvec_slabs + *idx;
217 gfp_t __gfp_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO);
218
219
220
221
222
223
224 __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
225
226
227
228
229
230 bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
231 if (unlikely(!bvl && (gfp_mask & __GFP_WAIT))) {
232 *idx = BIOVEC_MAX_IDX;
233 goto fallback;
234 }
235 }
236
237 return bvl;
238}
239
240static void __bio_free(struct bio *bio)
241{
242 bio_disassociate_task(bio);
243
244 if (bio_integrity(bio))
245 bio_integrity_free(bio);
246}
247
248static void bio_free(struct bio *bio)
249{
250 struct bio_set *bs = bio->bi_pool;
251 void *p;
252
253 __bio_free(bio);
254
255 if (bs) {
256 if (bio_flagged(bio, BIO_OWNS_VEC))
257 bvec_free(bs->bvec_pool, bio->bi_io_vec, BIO_POOL_IDX(bio));
258
259
260
261
262 p = bio;
263 p -= bs->front_pad;
264
265 mempool_free(p, bs->bio_pool);
266 } else {
267
268 kfree(bio);
269 }
270}
271
272void bio_init(struct bio *bio)
273{
274 memset(bio, 0, sizeof(*bio));
275 bio->bi_flags = 1 << BIO_UPTODATE;
276 atomic_set(&bio->bi_cnt, 1);
277}
278EXPORT_SYMBOL(bio_init);
279
280
281
282
283
284
285
286
287
288
289
290void bio_reset(struct bio *bio)
291{
292 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
293
294 __bio_free(bio);
295
296 memset(bio, 0, BIO_RESET_BYTES);
297 bio->bi_flags = flags|(1 << BIO_UPTODATE);
298}
299EXPORT_SYMBOL(bio_reset);
300
301static void bio_alloc_rescue(struct work_struct *work)
302{
303 struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
304 struct bio *bio;
305
306 while (1) {
307 spin_lock(&bs->rescue_lock);
308 bio = bio_list_pop(&bs->rescue_list);
309 spin_unlock(&bs->rescue_lock);
310
311 if (!bio)
312 break;
313
314 generic_make_request(bio);
315 }
316}
317
318static void punt_bios_to_rescuer(struct bio_set *bs)
319{
320 struct bio_list punt, nopunt;
321 struct bio *bio;
322
323
324
325
326
327
328
329
330
331
332
333
334 bio_list_init(&punt);
335 bio_list_init(&nopunt);
336
337 while ((bio = bio_list_pop(current->bio_list)))
338 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
339
340 *current->bio_list = nopunt;
341
342 spin_lock(&bs->rescue_lock);
343 bio_list_merge(&bs->rescue_list, &punt);
344 spin_unlock(&bs->rescue_lock);
345
346 queue_work(bs->rescue_workqueue, &bs->rescue_work);
347}
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
385{
386 gfp_t saved_gfp = gfp_mask;
387 unsigned front_pad;
388 unsigned inline_vecs;
389 unsigned long idx = BIO_POOL_NONE;
390 struct bio_vec *bvl = NULL;
391 struct bio *bio;
392 void *p;
393
394 if (!bs) {
395 if (nr_iovecs > UIO_MAXIOV)
396 return NULL;
397
398 p = kmalloc(sizeof(struct bio) +
399 nr_iovecs * sizeof(struct bio_vec),
400 gfp_mask);
401 front_pad = 0;
402 inline_vecs = nr_iovecs;
403 } else {
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425 if (current->bio_list && !bio_list_empty(current->bio_list))
426 gfp_mask &= ~__GFP_WAIT;
427
428 p = mempool_alloc(bs->bio_pool, gfp_mask);
429 if (!p && gfp_mask != saved_gfp) {
430 punt_bios_to_rescuer(bs);
431 gfp_mask = saved_gfp;
432 p = mempool_alloc(bs->bio_pool, gfp_mask);
433 }
434
435 front_pad = bs->front_pad;
436 inline_vecs = BIO_INLINE_VECS;
437 }
438
439 if (unlikely(!p))
440 return NULL;
441
442 bio = p + front_pad;
443 bio_init(bio);
444
445 if (nr_iovecs > inline_vecs) {
446 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
447 if (!bvl && gfp_mask != saved_gfp) {
448 punt_bios_to_rescuer(bs);
449 gfp_mask = saved_gfp;
450 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
451 }
452
453 if (unlikely(!bvl))
454 goto err_free;
455
456 bio->bi_flags |= 1 << BIO_OWNS_VEC;
457 } else if (nr_iovecs) {
458 bvl = bio->bi_inline_vecs;
459 }
460
461 bio->bi_pool = bs;
462 bio->bi_flags |= idx << BIO_POOL_OFFSET;
463 bio->bi_max_vecs = nr_iovecs;
464 bio->bi_io_vec = bvl;
465 return bio;
466
467err_free:
468 mempool_free(p, bs->bio_pool);
469 return NULL;
470}
471EXPORT_SYMBOL(bio_alloc_bioset);
472
473void zero_fill_bio(struct bio *bio)
474{
475 unsigned long flags;
476 struct bio_vec *bv;
477 int i;
478
479 bio_for_each_segment(bv, bio, i) {
480 char *data = bvec_kmap_irq(bv, &flags);
481 memset(data, 0, bv->bv_len);
482 flush_dcache_page(bv->bv_page);
483 bvec_kunmap_irq(data, &flags);
484 }
485}
486EXPORT_SYMBOL(zero_fill_bio);
487
488
489
490
491
492
493
494
495
496void bio_put(struct bio *bio)
497{
498 BIO_BUG_ON(!atomic_read(&bio->bi_cnt));
499
500
501
502
503 if (atomic_dec_and_test(&bio->bi_cnt))
504 bio_free(bio);
505}
506EXPORT_SYMBOL(bio_put);
507
508inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
509{
510 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
511 blk_recount_segments(q, bio);
512
513 return bio->bi_phys_segments;
514}
515EXPORT_SYMBOL(bio_phys_segments);
516
517
518
519
520
521
522
523
524
525
526void __bio_clone(struct bio *bio, struct bio *bio_src)
527{
528 memcpy(bio->bi_io_vec, bio_src->bi_io_vec,
529 bio_src->bi_max_vecs * sizeof(struct bio_vec));
530
531
532
533
534
535 bio->bi_sector = bio_src->bi_sector;
536 bio->bi_bdev = bio_src->bi_bdev;
537 bio->bi_flags |= 1 << BIO_CLONED;
538 bio->bi_rw = bio_src->bi_rw;
539 bio->bi_vcnt = bio_src->bi_vcnt;
540 bio->bi_size = bio_src->bi_size;
541 bio->bi_idx = bio_src->bi_idx;
542}
543EXPORT_SYMBOL(__bio_clone);
544
545
546
547
548
549
550
551
552
553struct bio *bio_clone_bioset(struct bio *bio, gfp_t gfp_mask,
554 struct bio_set *bs)
555{
556 struct bio *b;
557
558 b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, bs);
559 if (!b)
560 return NULL;
561
562 __bio_clone(b, bio);
563
564 if (bio_integrity(bio)) {
565 int ret;
566
567 ret = bio_integrity_clone(b, bio, gfp_mask);
568
569 if (ret < 0) {
570 bio_put(b);
571 return NULL;
572 }
573 }
574
575 return b;
576}
577EXPORT_SYMBOL(bio_clone_bioset);
578
579
580
581
582
583
584
585
586
587
588int bio_get_nr_vecs(struct block_device *bdev)
589{
590 struct request_queue *q = bdev_get_queue(bdev);
591 int nr_pages;
592
593 nr_pages = min_t(unsigned,
594 queue_max_segments(q),
595 queue_max_sectors(q) / (PAGE_SIZE >> 9) + 1);
596
597 return min_t(unsigned, nr_pages, BIO_MAX_PAGES);
598
599}
600EXPORT_SYMBOL(bio_get_nr_vecs);
601
602static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
603 *page, unsigned int len, unsigned int offset,
604 unsigned short max_sectors)
605{
606 int retried_segments = 0;
607 struct bio_vec *bvec;
608
609
610
611
612 if (unlikely(bio_flagged(bio, BIO_CLONED)))
613 return 0;
614
615 if (((bio->bi_size + len) >> 9) > max_sectors)
616 return 0;
617
618
619
620
621
622
623 if (bio->bi_vcnt > 0) {
624 struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
625
626 if (page == prev->bv_page &&
627 offset == prev->bv_offset + prev->bv_len) {
628 unsigned int prev_bv_len = prev->bv_len;
629 prev->bv_len += len;
630
631 if (q->merge_bvec_fn) {
632 struct bvec_merge_data bvm = {
633
634
635
636
637 .bi_bdev = bio->bi_bdev,
638 .bi_sector = bio->bi_sector,
639 .bi_size = bio->bi_size - prev_bv_len,
640 .bi_rw = bio->bi_rw,
641 };
642
643 if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len) {
644 prev->bv_len -= len;
645 return 0;
646 }
647 }
648
649 goto done;
650 }
651 }
652
653 if (bio->bi_vcnt >= bio->bi_max_vecs)
654 return 0;
655
656
657
658
659
660
661 while (bio->bi_phys_segments >= queue_max_segments(q)) {
662
663 if (retried_segments)
664 return 0;
665
666 retried_segments = 1;
667 blk_recount_segments(q, bio);
668 }
669
670
671
672
673
674 bvec = &bio->bi_io_vec[bio->bi_vcnt];
675 bvec->bv_page = page;
676 bvec->bv_len = len;
677 bvec->bv_offset = offset;
678
679
680
681
682
683
684 if (q->merge_bvec_fn) {
685 struct bvec_merge_data bvm = {
686 .bi_bdev = bio->bi_bdev,
687 .bi_sector = bio->bi_sector,
688 .bi_size = bio->bi_size,
689 .bi_rw = bio->bi_rw,
690 };
691
692
693
694
695
696 if (q->merge_bvec_fn(q, &bvm, bvec) < bvec->bv_len) {
697 bvec->bv_page = NULL;
698 bvec->bv_len = 0;
699 bvec->bv_offset = 0;
700 return 0;
701 }
702 }
703
704
705 if (bio->bi_vcnt && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
706 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
707
708 bio->bi_vcnt++;
709 bio->bi_phys_segments++;
710 done:
711 bio->bi_size += len;
712 return len;
713}
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,
731 unsigned int len, unsigned int offset)
732{
733 return __bio_add_page(q, bio, page, len, offset,
734 queue_max_hw_sectors(q));
735}
736EXPORT_SYMBOL(bio_add_pc_page);
737
738
739
740
741
742
743
744
745
746
747
748
749
750int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
751 unsigned int offset)
752{
753 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
754 return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q));
755}
756EXPORT_SYMBOL(bio_add_page);
757
758struct submit_bio_ret {
759 struct completion event;
760 int error;
761};
762
763static void submit_bio_wait_endio(struct bio *bio, int error)
764{
765 struct submit_bio_ret *ret = bio->bi_private;
766
767 ret->error = error;
768 complete(&ret->event);
769}
770
771
772
773
774
775
776
777
778
779int submit_bio_wait(int rw, struct bio *bio)
780{
781 struct submit_bio_ret ret;
782
783 rw |= REQ_SYNC;
784 init_completion(&ret.event);
785 bio->bi_private = &ret;
786 bio->bi_end_io = submit_bio_wait_endio;
787 submit_bio(rw, bio);
788 wait_for_completion(&ret.event);
789
790 return ret.error;
791}
792EXPORT_SYMBOL(submit_bio_wait);
793
794
795
796
797
798
799
800
801
802
803
804
805void bio_advance(struct bio *bio, unsigned bytes)
806{
807 if (bio_integrity(bio))
808 bio_integrity_advance(bio, bytes);
809
810 bio->bi_sector += bytes >> 9;
811 bio->bi_size -= bytes;
812
813 if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
814 return;
815
816 while (bytes) {
817 if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
818 WARN_ONCE(1, "bio idx %d >= vcnt %d\n",
819 bio->bi_idx, bio->bi_vcnt);
820 break;
821 }
822
823 if (bytes >= bio_iovec(bio)->bv_len) {
824 bytes -= bio_iovec(bio)->bv_len;
825 bio->bi_idx++;
826 } else {
827 bio_iovec(bio)->bv_len -= bytes;
828 bio_iovec(bio)->bv_offset += bytes;
829 bytes = 0;
830 }
831 }
832}
833EXPORT_SYMBOL(bio_advance);
834
835
836
837
838
839
840
841
842
843
844
845int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
846{
847 int i;
848 struct bio_vec *bv;
849
850 bio_for_each_segment_all(bv, bio, i) {
851 bv->bv_page = alloc_page(gfp_mask);
852 if (!bv->bv_page) {
853 while (--bv >= bio->bi_io_vec)
854 __free_page(bv->bv_page);
855 return -ENOMEM;
856 }
857 }
858
859 return 0;
860}
861EXPORT_SYMBOL(bio_alloc_pages);
862
863
864
865
866
867
868
869
870
871
872
873
874
875void bio_copy_data(struct bio *dst, struct bio *src)
876{
877 struct bio_vec *src_bv, *dst_bv;
878 unsigned src_offset, dst_offset, bytes;
879 void *src_p, *dst_p;
880
881 src_bv = bio_iovec(src);
882 dst_bv = bio_iovec(dst);
883
884 src_offset = src_bv->bv_offset;
885 dst_offset = dst_bv->bv_offset;
886
887 while (1) {
888 if (src_offset == src_bv->bv_offset + src_bv->bv_len) {
889 src_bv++;
890 if (src_bv == bio_iovec_idx(src, src->bi_vcnt)) {
891 src = src->bi_next;
892 if (!src)
893 break;
894
895 src_bv = bio_iovec(src);
896 }
897
898 src_offset = src_bv->bv_offset;
899 }
900
901 if (dst_offset == dst_bv->bv_offset + dst_bv->bv_len) {
902 dst_bv++;
903 if (dst_bv == bio_iovec_idx(dst, dst->bi_vcnt)) {
904 dst = dst->bi_next;
905 if (!dst)
906 break;
907
908 dst_bv = bio_iovec(dst);
909 }
910
911 dst_offset = dst_bv->bv_offset;
912 }
913
914 bytes = min(dst_bv->bv_offset + dst_bv->bv_len - dst_offset,
915 src_bv->bv_offset + src_bv->bv_len - src_offset);
916
917 src_p = kmap_atomic(src_bv->bv_page);
918 dst_p = kmap_atomic(dst_bv->bv_page);
919
920 memcpy(dst_p + dst_bv->bv_offset,
921 src_p + src_bv->bv_offset,
922 bytes);
923
924 kunmap_atomic(dst_p);
925 kunmap_atomic(src_p);
926
927 src_offset += bytes;
928 dst_offset += bytes;
929 }
930}
931EXPORT_SYMBOL(bio_copy_data);
932
933struct bio_map_data {
934 struct bio_vec *iovecs;
935 struct sg_iovec *sgvecs;
936 int nr_sgvecs;
937 int is_our_pages;
938};
939
940static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio,
941 struct sg_iovec *iov, int iov_count,
942 int is_our_pages)
943{
944 memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt);
945 memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count);
946 bmd->nr_sgvecs = iov_count;
947 bmd->is_our_pages = is_our_pages;
948 bio->bi_private = bmd;
949}
950
951static void bio_free_map_data(struct bio_map_data *bmd)
952{
953 kfree(bmd->iovecs);
954 kfree(bmd->sgvecs);
955 kfree(bmd);
956}
957
958static struct bio_map_data *bio_alloc_map_data(int nr_segs,
959 unsigned int iov_count,
960 gfp_t gfp_mask)
961{
962 struct bio_map_data *bmd;
963
964 if (iov_count > UIO_MAXIOV)
965 return NULL;
966
967 bmd = kmalloc(sizeof(*bmd), gfp_mask);
968 if (!bmd)
969 return NULL;
970
971 bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, gfp_mask);
972 if (!bmd->iovecs) {
973 kfree(bmd);
974 return NULL;
975 }
976
977 bmd->sgvecs = kmalloc(sizeof(struct sg_iovec) * iov_count, gfp_mask);
978 if (bmd->sgvecs)
979 return bmd;
980
981 kfree(bmd->iovecs);
982 kfree(bmd);
983 return NULL;
984}
985
986static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
987 struct sg_iovec *iov, int iov_count,
988 int to_user, int from_user, int do_free_page)
989{
990 int ret = 0, i;
991 struct bio_vec *bvec;
992 int iov_idx = 0;
993 unsigned int iov_off = 0;
994
995 bio_for_each_segment_all(bvec, bio, i) {
996 char *bv_addr = page_address(bvec->bv_page);
997 unsigned int bv_len = iovecs[i].bv_len;
998
999 while (bv_len && iov_idx < iov_count) {
1000 unsigned int bytes;
1001 char __user *iov_addr;
1002
1003 bytes = min_t(unsigned int,
1004 iov[iov_idx].iov_len - iov_off, bv_len);
1005 iov_addr = iov[iov_idx].iov_base + iov_off;
1006
1007 if (!ret) {
1008 if (to_user)
1009 ret = copy_to_user(iov_addr, bv_addr,
1010 bytes);
1011
1012 if (from_user)
1013 ret = copy_from_user(bv_addr, iov_addr,
1014 bytes);
1015
1016 if (ret)
1017 ret = -EFAULT;
1018 }
1019
1020 bv_len -= bytes;
1021 bv_addr += bytes;
1022 iov_addr += bytes;
1023 iov_off += bytes;
1024
1025 if (iov[iov_idx].iov_len == iov_off) {
1026 iov_idx++;
1027 iov_off = 0;
1028 }
1029 }
1030
1031 if (do_free_page)
1032 __free_page(bvec->bv_page);
1033 }
1034
1035 return ret;
1036}
1037
1038
1039
1040
1041
1042
1043
1044
1045int bio_uncopy_user(struct bio *bio)
1046{
1047 struct bio_map_data *bmd = bio->bi_private;
1048 struct bio_vec *bvec;
1049 int ret = 0, i;
1050
1051 if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1052
1053
1054
1055
1056 if (current->mm)
1057 ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
1058 bmd->nr_sgvecs, bio_data_dir(bio) == READ,
1059 0, bmd->is_our_pages);
1060 else if (bmd->is_our_pages)
1061 bio_for_each_segment_all(bvec, bio, i)
1062 __free_page(bvec->bv_page);
1063 }
1064 bio_free_map_data(bmd);
1065 bio_put(bio);
1066 return ret;
1067}
1068EXPORT_SYMBOL(bio_uncopy_user);
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083struct bio *bio_copy_user_iov(struct request_queue *q,
1084 struct rq_map_data *map_data,
1085 struct sg_iovec *iov, int iov_count,
1086 int write_to_vm, gfp_t gfp_mask)
1087{
1088 struct bio_map_data *bmd;
1089 struct bio_vec *bvec;
1090 struct page *page;
1091 struct bio *bio;
1092 int i, ret;
1093 int nr_pages = 0;
1094 unsigned int len = 0;
1095 unsigned int offset = map_data ? map_data->offset & ~PAGE_MASK : 0;
1096
1097 for (i = 0; i < iov_count; i++) {
1098 unsigned long uaddr;
1099 unsigned long end;
1100 unsigned long start;
1101
1102 uaddr = (unsigned long)iov[i].iov_base;
1103 end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1104 start = uaddr >> PAGE_SHIFT;
1105
1106
1107
1108
1109 if (end < start)
1110 return ERR_PTR(-EINVAL);
1111
1112 nr_pages += end - start;
1113 len += iov[i].iov_len;
1114 }
1115
1116 if (offset)
1117 nr_pages++;
1118
1119 bmd = bio_alloc_map_data(nr_pages, iov_count, gfp_mask);
1120 if (!bmd)
1121 return ERR_PTR(-ENOMEM);
1122
1123 ret = -ENOMEM;
1124 bio = bio_kmalloc(gfp_mask, nr_pages);
1125 if (!bio)
1126 goto out_bmd;
1127
1128 if (!write_to_vm)
1129 bio->bi_rw |= REQ_WRITE;
1130
1131 ret = 0;
1132
1133 if (map_data) {
1134 nr_pages = 1 << map_data->page_order;
1135 i = map_data->offset / PAGE_SIZE;
1136 }
1137 while (len) {
1138 unsigned int bytes = PAGE_SIZE;
1139
1140 bytes -= offset;
1141
1142 if (bytes > len)
1143 bytes = len;
1144
1145 if (map_data) {
1146 if (i == map_data->nr_entries * nr_pages) {
1147 ret = -ENOMEM;
1148 break;
1149 }
1150
1151 page = map_data->pages[i / nr_pages];
1152 page += (i % nr_pages);
1153
1154 i++;
1155 } else {
1156 page = alloc_page(q->bounce_gfp | gfp_mask);
1157 if (!page) {
1158 ret = -ENOMEM;
1159 break;
1160 }
1161 }
1162
1163 if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
1164 break;
1165
1166 len -= bytes;
1167 offset = 0;
1168 }
1169
1170 if (ret)
1171 goto cleanup;
1172
1173
1174
1175
1176 if ((!write_to_vm && (!map_data || !map_data->null_mapped)) ||
1177 (map_data && map_data->from_user)) {
1178 ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0, 1, 0);
1179 if (ret)
1180 goto cleanup;
1181 }
1182
1183 bio_set_map_data(bmd, bio, iov, iov_count, map_data ? 0 : 1);
1184 return bio;
1185cleanup:
1186 if (!map_data)
1187 bio_for_each_segment_all(bvec, bio, i)
1188 __free_page(bvec->bv_page);
1189
1190 bio_put(bio);
1191out_bmd:
1192 bio_free_map_data(bmd);
1193 return ERR_PTR(ret);
1194}
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209struct bio *bio_copy_user(struct request_queue *q, struct rq_map_data *map_data,
1210 unsigned long uaddr, unsigned int len,
1211 int write_to_vm, gfp_t gfp_mask)
1212{
1213 struct sg_iovec iov;
1214
1215 iov.iov_base = (void __user *)uaddr;
1216 iov.iov_len = len;
1217
1218 return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask);
1219}
1220EXPORT_SYMBOL(bio_copy_user);
1221
1222static struct bio *__bio_map_user_iov(struct request_queue *q,
1223 struct block_device *bdev,
1224 struct sg_iovec *iov, int iov_count,
1225 int write_to_vm, gfp_t gfp_mask)
1226{
1227 int i, j;
1228 int nr_pages = 0;
1229 struct page **pages;
1230 struct bio *bio;
1231 int cur_page = 0;
1232 int ret, offset;
1233
1234 for (i = 0; i < iov_count; i++) {
1235 unsigned long uaddr = (unsigned long)iov[i].iov_base;
1236 unsigned long len = iov[i].iov_len;
1237 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1238 unsigned long start = uaddr >> PAGE_SHIFT;
1239
1240
1241
1242
1243 if (end < start)
1244 return ERR_PTR(-EINVAL);
1245
1246 nr_pages += end - start;
1247
1248
1249
1250 if (uaddr & queue_dma_alignment(q))
1251 return ERR_PTR(-EINVAL);
1252 }
1253
1254 if (!nr_pages)
1255 return ERR_PTR(-EINVAL);
1256
1257 bio = bio_kmalloc(gfp_mask, nr_pages);
1258 if (!bio)
1259 return ERR_PTR(-ENOMEM);
1260
1261 ret = -ENOMEM;
1262 pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask);
1263 if (!pages)
1264 goto out;
1265
1266 for (i = 0; i < iov_count; i++) {
1267 unsigned long uaddr = (unsigned long)iov[i].iov_base;
1268 unsigned long len = iov[i].iov_len;
1269 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1270 unsigned long start = uaddr >> PAGE_SHIFT;
1271 const int local_nr_pages = end - start;
1272 const int page_limit = cur_page + local_nr_pages;
1273
1274 ret = get_user_pages_fast(uaddr, local_nr_pages,
1275 write_to_vm, &pages[cur_page]);
1276 if (ret < local_nr_pages) {
1277 ret = -EFAULT;
1278 goto out_unmap;
1279 }
1280
1281 offset = uaddr & ~PAGE_MASK;
1282 for (j = cur_page; j < page_limit; j++) {
1283 unsigned int bytes = PAGE_SIZE - offset;
1284
1285 if (len <= 0)
1286 break;
1287
1288 if (bytes > len)
1289 bytes = len;
1290
1291
1292
1293
1294 if (bio_add_pc_page(q, bio, pages[j], bytes, offset) <
1295 bytes)
1296 break;
1297
1298 len -= bytes;
1299 offset = 0;
1300 }
1301
1302 cur_page = j;
1303
1304
1305
1306 while (j < page_limit)
1307 page_cache_release(pages[j++]);
1308 }
1309
1310 kfree(pages);
1311
1312
1313
1314
1315 if (!write_to_vm)
1316 bio->bi_rw |= REQ_WRITE;
1317
1318 bio->bi_bdev = bdev;
1319 bio->bi_flags |= (1 << BIO_USER_MAPPED);
1320 return bio;
1321
1322 out_unmap:
1323 for (i = 0; i < nr_pages; i++) {
1324 if(!pages[i])
1325 break;
1326 page_cache_release(pages[i]);
1327 }
1328 out:
1329 kfree(pages);
1330 bio_put(bio);
1331 return ERR_PTR(ret);
1332}
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev,
1347 unsigned long uaddr, unsigned int len, int write_to_vm,
1348 gfp_t gfp_mask)
1349{
1350 struct sg_iovec iov;
1351
1352 iov.iov_base = (void __user *)uaddr;
1353 iov.iov_len = len;
1354
1355 return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask);
1356}
1357EXPORT_SYMBOL(bio_map_user);
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev,
1372 struct sg_iovec *iov, int iov_count,
1373 int write_to_vm, gfp_t gfp_mask)
1374{
1375 struct bio *bio;
1376
1377 bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm,
1378 gfp_mask);
1379 if (IS_ERR(bio))
1380 return bio;
1381
1382
1383
1384
1385
1386
1387
1388 bio_get(bio);
1389
1390 return bio;
1391}
1392
1393static void __bio_unmap_user(struct bio *bio)
1394{
1395 struct bio_vec *bvec;
1396 int i;
1397
1398
1399
1400
1401 bio_for_each_segment_all(bvec, bio, i) {
1402 if (bio_data_dir(bio) == READ)
1403 set_page_dirty_lock(bvec->bv_page);
1404
1405 page_cache_release(bvec->bv_page);
1406 }
1407
1408 bio_put(bio);
1409}
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420void bio_unmap_user(struct bio *bio)
1421{
1422 __bio_unmap_user(bio);
1423 bio_put(bio);
1424}
1425EXPORT_SYMBOL(bio_unmap_user);
1426
1427static void bio_map_kern_endio(struct bio *bio, int err)
1428{
1429 bio_put(bio);
1430}
1431
1432static struct bio *__bio_map_kern(struct request_queue *q, void *data,
1433 unsigned int len, gfp_t gfp_mask)
1434{
1435 unsigned long kaddr = (unsigned long)data;
1436 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1437 unsigned long start = kaddr >> PAGE_SHIFT;
1438 const int nr_pages = end - start;
1439 int offset, i;
1440 struct bio *bio;
1441
1442 bio = bio_kmalloc(gfp_mask, nr_pages);
1443 if (!bio)
1444 return ERR_PTR(-ENOMEM);
1445
1446 offset = offset_in_page(kaddr);
1447 for (i = 0; i < nr_pages; i++) {
1448 unsigned int bytes = PAGE_SIZE - offset;
1449
1450 if (len <= 0)
1451 break;
1452
1453 if (bytes > len)
1454 bytes = len;
1455
1456 if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
1457 offset) < bytes)
1458 break;
1459
1460 data += bytes;
1461 len -= bytes;
1462 offset = 0;
1463 }
1464
1465 bio->bi_end_io = bio_map_kern_endio;
1466 return bio;
1467}
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
1480 gfp_t gfp_mask)
1481{
1482 struct bio *bio;
1483
1484 bio = __bio_map_kern(q, data, len, gfp_mask);
1485 if (IS_ERR(bio))
1486 return bio;
1487
1488 if (bio->bi_size == len)
1489 return bio;
1490
1491
1492
1493
1494 bio_put(bio);
1495 return ERR_PTR(-EINVAL);
1496}
1497EXPORT_SYMBOL(bio_map_kern);
1498
1499static void bio_copy_kern_endio(struct bio *bio, int err)
1500{
1501 struct bio_vec *bvec;
1502 const int read = bio_data_dir(bio) == READ;
1503 struct bio_map_data *bmd = bio->bi_private;
1504 int i;
1505 char *p = bmd->sgvecs[0].iov_base;
1506
1507 bio_for_each_segment_all(bvec, bio, i) {
1508 char *addr = page_address(bvec->bv_page);
1509 int len = bmd->iovecs[i].bv_len;
1510
1511 if (read)
1512 memcpy(p, addr, len);
1513
1514 __free_page(bvec->bv_page);
1515 p += len;
1516 }
1517
1518 bio_free_map_data(bmd);
1519 bio_put(bio);
1520}
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
1534 gfp_t gfp_mask, int reading)
1535{
1536 struct bio *bio;
1537 struct bio_vec *bvec;
1538 int i;
1539
1540 bio = bio_copy_user(q, NULL, (unsigned long)data, len, 1, gfp_mask);
1541 if (IS_ERR(bio))
1542 return bio;
1543
1544 if (!reading) {
1545 void *p = data;
1546
1547 bio_for_each_segment_all(bvec, bio, i) {
1548 char *addr = page_address(bvec->bv_page);
1549
1550 memcpy(addr, p, bvec->bv_len);
1551 p += bvec->bv_len;
1552 }
1553 }
1554
1555 bio->bi_end_io = bio_copy_kern_endio;
1556
1557 return bio;
1558}
1559EXPORT_SYMBOL(bio_copy_kern);
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590void bio_set_pages_dirty(struct bio *bio)
1591{
1592 struct bio_vec *bvec;
1593 int i;
1594
1595 bio_for_each_segment_all(bvec, bio, i) {
1596 struct page *page = bvec->bv_page;
1597
1598 if (page && !PageCompound(page))
1599 set_page_dirty_lock(page);
1600 }
1601}
1602
1603static void bio_release_pages(struct bio *bio)
1604{
1605 struct bio_vec *bvec;
1606 int i;
1607
1608 bio_for_each_segment_all(bvec, bio, i) {
1609 struct page *page = bvec->bv_page;
1610
1611 if (page)
1612 put_page(page);
1613 }
1614}
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627static void bio_dirty_fn(struct work_struct *work);
1628
1629static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1630static DEFINE_SPINLOCK(bio_dirty_lock);
1631static struct bio *bio_dirty_list;
1632
1633
1634
1635
1636static void bio_dirty_fn(struct work_struct *work)
1637{
1638 unsigned long flags;
1639 struct bio *bio;
1640
1641 spin_lock_irqsave(&bio_dirty_lock, flags);
1642 bio = bio_dirty_list;
1643 bio_dirty_list = NULL;
1644 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1645
1646 while (bio) {
1647 struct bio *next = bio->bi_private;
1648
1649 bio_set_pages_dirty(bio);
1650 bio_release_pages(bio);
1651 bio_put(bio);
1652 bio = next;
1653 }
1654}
1655
1656void bio_check_pages_dirty(struct bio *bio)
1657{
1658 struct bio_vec *bvec;
1659 int nr_clean_pages = 0;
1660 int i;
1661
1662 bio_for_each_segment_all(bvec, bio, i) {
1663 struct page *page = bvec->bv_page;
1664
1665 if (PageDirty(page) || PageCompound(page)) {
1666 page_cache_release(page);
1667 bvec->bv_page = NULL;
1668 } else {
1669 nr_clean_pages++;
1670 }
1671 }
1672
1673 if (nr_clean_pages) {
1674 unsigned long flags;
1675
1676 spin_lock_irqsave(&bio_dirty_lock, flags);
1677 bio->bi_private = bio_dirty_list;
1678 bio_dirty_list = bio;
1679 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1680 schedule_work(&bio_dirty_work);
1681 } else {
1682 bio_put(bio);
1683 }
1684}
1685
1686#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1687void bio_flush_dcache_pages(struct bio *bi)
1688{
1689 int i;
1690 struct bio_vec *bvec;
1691
1692 bio_for_each_segment(bvec, bi, i)
1693 flush_dcache_page(bvec->bv_page);
1694}
1695EXPORT_SYMBOL(bio_flush_dcache_pages);
1696#endif
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712void bio_endio(struct bio *bio, int error)
1713{
1714 if (error)
1715 clear_bit(BIO_UPTODATE, &bio->bi_flags);
1716 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1717 error = -EIO;
1718
1719 if (bio->bi_end_io)
1720 bio->bi_end_io(bio, error);
1721}
1722EXPORT_SYMBOL(bio_endio);
1723
1724void bio_pair_release(struct bio_pair *bp)
1725{
1726 if (atomic_dec_and_test(&bp->cnt)) {
1727 struct bio *master = bp->bio1.bi_private;
1728
1729 bio_endio(master, bp->error);
1730 mempool_free(bp, bp->bio2.bi_private);
1731 }
1732}
1733EXPORT_SYMBOL(bio_pair_release);
1734
1735static void bio_pair_end_1(struct bio *bi, int err)
1736{
1737 struct bio_pair *bp = container_of(bi, struct bio_pair, bio1);
1738
1739 if (err)
1740 bp->error = err;
1741
1742 bio_pair_release(bp);
1743}
1744
1745static void bio_pair_end_2(struct bio *bi, int err)
1746{
1747 struct bio_pair *bp = container_of(bi, struct bio_pair, bio2);
1748
1749 if (err)
1750 bp->error = err;
1751
1752 bio_pair_release(bp);
1753}
1754
1755
1756
1757
1758struct bio_pair *bio_split(struct bio *bi, int first_sectors)
1759{
1760 struct bio_pair *bp = mempool_alloc(bio_split_pool, GFP_NOIO);
1761
1762 if (!bp)
1763 return bp;
1764
1765 trace_block_split(bdev_get_queue(bi->bi_bdev), bi,
1766 bi->bi_sector + first_sectors);
1767
1768 BUG_ON(bio_segments(bi) > 1);
1769 atomic_set(&bp->cnt, 3);
1770 bp->error = 0;
1771 bp->bio1 = *bi;
1772 bp->bio2 = *bi;
1773 bp->bio2.bi_sector += first_sectors;
1774 bp->bio2.bi_size -= first_sectors << 9;
1775 bp->bio1.bi_size = first_sectors << 9;
1776
1777 if (bi->bi_vcnt != 0) {
1778 bp->bv1 = *bio_iovec(bi);
1779 bp->bv2 = *bio_iovec(bi);
1780
1781 if (bio_is_rw(bi)) {
1782 bp->bv2.bv_offset += first_sectors << 9;
1783 bp->bv2.bv_len -= first_sectors << 9;
1784 bp->bv1.bv_len = first_sectors << 9;
1785 }
1786
1787 bp->bio1.bi_io_vec = &bp->bv1;
1788 bp->bio2.bi_io_vec = &bp->bv2;
1789
1790 bp->bio1.bi_max_vecs = 1;
1791 bp->bio2.bi_max_vecs = 1;
1792 }
1793
1794 bp->bio1.bi_end_io = bio_pair_end_1;
1795 bp->bio2.bi_end_io = bio_pair_end_2;
1796
1797 bp->bio1.bi_private = bi;
1798 bp->bio2.bi_private = bio_split_pool;
1799
1800 if (bio_integrity(bi))
1801 bio_integrity_split(bi, bp, first_sectors);
1802
1803 return bp;
1804}
1805EXPORT_SYMBOL(bio_split);
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817sector_t bio_sector_offset(struct bio *bio, unsigned short index,
1818 unsigned int offset)
1819{
1820 unsigned int sector_sz;
1821 struct bio_vec *bv;
1822 sector_t sectors;
1823 int i;
1824
1825 sector_sz = queue_logical_block_size(bio->bi_bdev->bd_disk->queue);
1826 sectors = 0;
1827
1828 if (index >= bio->bi_idx)
1829 index = bio->bi_vcnt - 1;
1830
1831 bio_for_each_segment_all(bv, bio, i) {
1832 if (i == index) {
1833 if (offset > bv->bv_offset)
1834 sectors += (offset - bv->bv_offset) / sector_sz;
1835 break;
1836 }
1837
1838 sectors += bv->bv_len / sector_sz;
1839 }
1840
1841 return sectors;
1842}
1843EXPORT_SYMBOL(bio_sector_offset);
1844
1845
1846
1847
1848
1849mempool_t *biovec_create_pool(struct bio_set *bs, int pool_entries)
1850{
1851 struct biovec_slab *bp = bvec_slabs + BIOVEC_MAX_IDX;
1852
1853 return mempool_create_slab_pool(pool_entries, bp->slab);
1854}
1855
1856void bioset_free(struct bio_set *bs)
1857{
1858 if (bs->rescue_workqueue)
1859 destroy_workqueue(bs->rescue_workqueue);
1860
1861 if (bs->bio_pool)
1862 mempool_destroy(bs->bio_pool);
1863
1864 if (bs->bvec_pool)
1865 mempool_destroy(bs->bvec_pool);
1866
1867 bioset_integrity_free(bs);
1868 bio_put_slab(bs);
1869
1870 kfree(bs);
1871}
1872EXPORT_SYMBOL(bioset_free);
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
1888{
1889 unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1890 struct bio_set *bs;
1891
1892 bs = kzalloc(sizeof(*bs), GFP_KERNEL);
1893 if (!bs)
1894 return NULL;
1895
1896 bs->front_pad = front_pad;
1897
1898 spin_lock_init(&bs->rescue_lock);
1899 bio_list_init(&bs->rescue_list);
1900 INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1901
1902 bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
1903 if (!bs->bio_slab) {
1904 kfree(bs);
1905 return NULL;
1906 }
1907
1908 bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab);
1909 if (!bs->bio_pool)
1910 goto bad;
1911
1912 bs->bvec_pool = biovec_create_pool(bs, pool_size);
1913 if (!bs->bvec_pool)
1914 goto bad;
1915
1916 bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
1917 if (!bs->rescue_workqueue)
1918 goto bad;
1919
1920 return bs;
1921bad:
1922 bioset_free(bs);
1923 return NULL;
1924}
1925EXPORT_SYMBOL(bioset_create);
1926
1927#ifdef CONFIG_BLK_CGROUP
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941int bio_associate_current(struct bio *bio)
1942{
1943 struct io_context *ioc;
1944 struct cgroup_subsys_state *css;
1945
1946 if (bio->bi_ioc)
1947 return -EBUSY;
1948
1949 ioc = current->io_context;
1950 if (!ioc)
1951 return -ENOENT;
1952
1953
1954 get_io_context_active(ioc);
1955 bio->bi_ioc = ioc;
1956
1957
1958 rcu_read_lock();
1959 css = task_subsys_state(current, blkio_subsys_id);
1960 if (css && css_tryget(css))
1961 bio->bi_css = css;
1962 rcu_read_unlock();
1963
1964 return 0;
1965}
1966
1967
1968
1969
1970
1971void bio_disassociate_task(struct bio *bio)
1972{
1973 if (bio->bi_ioc) {
1974 put_io_context(bio->bi_ioc);
1975 bio->bi_ioc = NULL;
1976 }
1977 if (bio->bi_css) {
1978 css_put(bio->bi_css);
1979 bio->bi_css = NULL;
1980 }
1981}
1982
1983#endif
1984
1985static void __init biovec_init_slabs(void)
1986{
1987 int i;
1988
1989 for (i = 0; i < BIOVEC_NR_POOLS; i++) {
1990 int size;
1991 struct biovec_slab *bvs = bvec_slabs + i;
1992
1993 if (bvs->nr_vecs <= BIO_INLINE_VECS) {
1994 bvs->slab = NULL;
1995 continue;
1996 }
1997
1998 size = bvs->nr_vecs * sizeof(struct bio_vec);
1999 bvs->slab = kmem_cache_create(bvs->name, size, 0,
2000 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2001 }
2002}
2003
2004static int __init init_bio(void)
2005{
2006 bio_slab_max = 2;
2007 bio_slab_nr = 0;
2008 bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL);
2009 if (!bio_slabs)
2010 panic("bio: can't allocate bios\n");
2011
2012 bio_integrity_init();
2013 biovec_init_slabs();
2014
2015 fs_bio_set = bioset_create(BIO_POOL_SIZE, 0);
2016 if (!fs_bio_set)
2017 panic("bio: can't allocate bios\n");
2018
2019 if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE))
2020 panic("bio: can't create integrity pool\n");
2021
2022 bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES,
2023 sizeof(struct bio_pair));
2024 if (!bio_split_pool)
2025 panic("bio: can't create split pool\n");
2026
2027 return 0;
2028}
2029subsys_initcall(init_bio);
2030