1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/bio.h>
21#include <linux/blkdev.h>
22#include <linux/uio.h>
23#include <linux/iocontext.h>
24#include <linux/slab.h>
25#include <linux/init.h>
26#include <linux/kernel.h>
27#include <linux/export.h>
28#include <linux/mempool.h>
29#include <linux/workqueue.h>
30#include <linux/cgroup.h>
31#include <linux/blk-cgroup.h>
32
33#include <trace/events/block.h>
34#include "blk.h"
35#include "blk-rq-qos.h"
36
37
38
39
40
41#define BIO_INLINE_VECS 4
42
43
44
45
46
47
48#define BV(x, n) { .nr_vecs = x, .name = "biovec-"#n }
49static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = {
50 BV(1, 1), BV(4, 4), BV(16, 16), BV(64, 64), BV(128, 128), BV(BIO_MAX_PAGES, max),
51};
52#undef BV
53
54
55
56
57
58struct bio_set fs_bio_set;
59EXPORT_SYMBOL(fs_bio_set);
60
61
62
63
64struct bio_slab {
65 struct kmem_cache *slab;
66 unsigned int slab_ref;
67 unsigned int slab_size;
68 char name[8];
69};
70static DEFINE_MUTEX(bio_slab_lock);
71static struct bio_slab *bio_slabs;
72static unsigned int bio_slab_nr, bio_slab_max;
73
74static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
75{
76 unsigned int sz = sizeof(struct bio) + extra_size;
77 struct kmem_cache *slab = NULL;
78 struct bio_slab *bslab, *new_bio_slabs;
79 unsigned int new_bio_slab_max;
80 unsigned int i, entry = -1;
81
82 mutex_lock(&bio_slab_lock);
83
84 i = 0;
85 while (i < bio_slab_nr) {
86 bslab = &bio_slabs[i];
87
88 if (!bslab->slab && entry == -1)
89 entry = i;
90 else if (bslab->slab_size == sz) {
91 slab = bslab->slab;
92 bslab->slab_ref++;
93 break;
94 }
95 i++;
96 }
97
98 if (slab)
99 goto out_unlock;
100
101 if (bio_slab_nr == bio_slab_max && entry == -1) {
102 new_bio_slab_max = bio_slab_max << 1;
103 new_bio_slabs = krealloc(bio_slabs,
104 new_bio_slab_max * sizeof(struct bio_slab),
105 GFP_KERNEL);
106 if (!new_bio_slabs)
107 goto out_unlock;
108 bio_slab_max = new_bio_slab_max;
109 bio_slabs = new_bio_slabs;
110 }
111 if (entry == -1)
112 entry = bio_slab_nr++;
113
114 bslab = &bio_slabs[entry];
115
116 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
117 slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN,
118 SLAB_HWCACHE_ALIGN, NULL);
119 if (!slab)
120 goto out_unlock;
121
122 bslab->slab = slab;
123 bslab->slab_ref = 1;
124 bslab->slab_size = sz;
125out_unlock:
126 mutex_unlock(&bio_slab_lock);
127 return slab;
128}
129
130static void bio_put_slab(struct bio_set *bs)
131{
132 struct bio_slab *bslab = NULL;
133 unsigned int i;
134
135 mutex_lock(&bio_slab_lock);
136
137 for (i = 0; i < bio_slab_nr; i++) {
138 if (bs->bio_slab == bio_slabs[i].slab) {
139 bslab = &bio_slabs[i];
140 break;
141 }
142 }
143
144 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
145 goto out;
146
147 WARN_ON(!bslab->slab_ref);
148
149 if (--bslab->slab_ref)
150 goto out;
151
152 kmem_cache_destroy(bslab->slab);
153 bslab->slab = NULL;
154
155out:
156 mutex_unlock(&bio_slab_lock);
157}
158
159unsigned int bvec_nr_vecs(unsigned short idx)
160{
161 return bvec_slabs[--idx].nr_vecs;
162}
163
164void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
165{
166 if (!idx)
167 return;
168 idx--;
169
170 BIO_BUG_ON(idx >= BVEC_POOL_NR);
171
172 if (idx == BVEC_POOL_MAX) {
173 mempool_free(bv, pool);
174 } else {
175 struct biovec_slab *bvs = bvec_slabs + idx;
176
177 kmem_cache_free(bvs->slab, bv);
178 }
179}
180
181struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
182 mempool_t *pool)
183{
184 struct bio_vec *bvl;
185
186
187
188
189 switch (nr) {
190 case 1:
191 *idx = 0;
192 break;
193 case 2 ... 4:
194 *idx = 1;
195 break;
196 case 5 ... 16:
197 *idx = 2;
198 break;
199 case 17 ... 64:
200 *idx = 3;
201 break;
202 case 65 ... 128:
203 *idx = 4;
204 break;
205 case 129 ... BIO_MAX_PAGES:
206 *idx = 5;
207 break;
208 default:
209 return NULL;
210 }
211
212
213
214
215
216 if (*idx == BVEC_POOL_MAX) {
217fallback:
218 bvl = mempool_alloc(pool, gfp_mask);
219 } else {
220 struct biovec_slab *bvs = bvec_slabs + *idx;
221 gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO);
222
223
224
225
226
227
228 __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
229
230
231
232
233
234 bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
235 if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) {
236 *idx = BVEC_POOL_MAX;
237 goto fallback;
238 }
239 }
240
241 (*idx)++;
242 return bvl;
243}
244
245void bio_uninit(struct bio *bio)
246{
247 bio_disassociate_blkg(bio);
248}
249EXPORT_SYMBOL(bio_uninit);
250
251static void bio_free(struct bio *bio)
252{
253 struct bio_set *bs = bio->bi_pool;
254 void *p;
255
256 bio_uninit(bio);
257
258 if (bs) {
259 bvec_free(&bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
260
261
262
263
264 p = bio;
265 p -= bs->front_pad;
266
267 mempool_free(p, &bs->bio_pool);
268 } else {
269
270 kfree(bio);
271 }
272}
273
274
275
276
277
278
279void bio_init(struct bio *bio, struct bio_vec *table,
280 unsigned short max_vecs)
281{
282 memset(bio, 0, sizeof(*bio));
283 atomic_set(&bio->__bi_remaining, 1);
284 atomic_set(&bio->__bi_cnt, 1);
285
286 bio->bi_io_vec = table;
287 bio->bi_max_vecs = max_vecs;
288}
289EXPORT_SYMBOL(bio_init);
290
291
292
293
294
295
296
297
298
299
300
301void bio_reset(struct bio *bio)
302{
303 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
304
305 bio_uninit(bio);
306
307 memset(bio, 0, BIO_RESET_BYTES);
308 bio->bi_flags = flags;
309 atomic_set(&bio->__bi_remaining, 1);
310}
311EXPORT_SYMBOL(bio_reset);
312
313static struct bio *__bio_chain_endio(struct bio *bio)
314{
315 struct bio *parent = bio->bi_private;
316
317 if (!parent->bi_status)
318 parent->bi_status = bio->bi_status;
319 bio_put(bio);
320 return parent;
321}
322
323static void bio_chain_endio(struct bio *bio)
324{
325 bio_endio(__bio_chain_endio(bio));
326}
327
328
329
330
331
332
333
334
335
336
337
338
339void bio_chain(struct bio *bio, struct bio *parent)
340{
341 BUG_ON(bio->bi_private || bio->bi_end_io);
342
343 bio->bi_private = parent;
344 bio->bi_end_io = bio_chain_endio;
345 bio_inc_remaining(parent);
346}
347EXPORT_SYMBOL(bio_chain);
348
349static void bio_alloc_rescue(struct work_struct *work)
350{
351 struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
352 struct bio *bio;
353
354 while (1) {
355 spin_lock(&bs->rescue_lock);
356 bio = bio_list_pop(&bs->rescue_list);
357 spin_unlock(&bs->rescue_lock);
358
359 if (!bio)
360 break;
361
362 generic_make_request(bio);
363 }
364}
365
366static void punt_bios_to_rescuer(struct bio_set *bs)
367{
368 struct bio_list punt, nopunt;
369 struct bio *bio;
370
371 if (WARN_ON_ONCE(!bs->rescue_workqueue))
372 return;
373
374
375
376
377
378
379
380
381
382
383
384 bio_list_init(&punt);
385 bio_list_init(&nopunt);
386
387 while ((bio = bio_list_pop(¤t->bio_list[0])))
388 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
389 current->bio_list[0] = nopunt;
390
391 bio_list_init(&nopunt);
392 while ((bio = bio_list_pop(¤t->bio_list[1])))
393 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
394 current->bio_list[1] = nopunt;
395
396 spin_lock(&bs->rescue_lock);
397 bio_list_merge(&bs->rescue_list, &punt);
398 spin_unlock(&bs->rescue_lock);
399
400 queue_work(bs->rescue_workqueue, &bs->rescue_work);
401}
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
439 struct bio_set *bs)
440{
441 gfp_t saved_gfp = gfp_mask;
442 unsigned front_pad;
443 unsigned inline_vecs;
444 struct bio_vec *bvl = NULL;
445 struct bio *bio;
446 void *p;
447
448 if (!bs) {
449 if (nr_iovecs > UIO_MAXIOV)
450 return NULL;
451
452 p = kmalloc(sizeof(struct bio) +
453 nr_iovecs * sizeof(struct bio_vec),
454 gfp_mask);
455 front_pad = 0;
456 inline_vecs = nr_iovecs;
457 } else {
458
459 if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) &&
460 nr_iovecs > 0))
461 return NULL;
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483 if (current->bio_list &&
484 (!bio_list_empty(¤t->bio_list[0]) ||
485 !bio_list_empty(¤t->bio_list[1])) &&
486 bs->rescue_workqueue)
487 gfp_mask &= ~__GFP_DIRECT_RECLAIM;
488
489 p = mempool_alloc(&bs->bio_pool, gfp_mask);
490 if (!p && gfp_mask != saved_gfp) {
491 punt_bios_to_rescuer(bs);
492 gfp_mask = saved_gfp;
493 p = mempool_alloc(&bs->bio_pool, gfp_mask);
494 }
495
496 front_pad = bs->front_pad;
497 inline_vecs = BIO_INLINE_VECS;
498 }
499
500 if (unlikely(!p))
501 return NULL;
502
503 bio = p + front_pad;
504 bio_init(bio, NULL, 0);
505
506 if (nr_iovecs > inline_vecs) {
507 unsigned long idx = 0;
508
509 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
510 if (!bvl && gfp_mask != saved_gfp) {
511 punt_bios_to_rescuer(bs);
512 gfp_mask = saved_gfp;
513 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
514 }
515
516 if (unlikely(!bvl))
517 goto err_free;
518
519 bio->bi_flags |= idx << BVEC_POOL_OFFSET;
520 } else if (nr_iovecs) {
521 bvl = bio->bi_inline_vecs;
522 }
523
524 bio->bi_pool = bs;
525 bio->bi_max_vecs = nr_iovecs;
526 bio->bi_io_vec = bvl;
527 return bio;
528
529err_free:
530 mempool_free(p, &bs->bio_pool);
531 return NULL;
532}
533EXPORT_SYMBOL(bio_alloc_bioset);
534
535void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
536{
537 unsigned long flags;
538 struct bio_vec bv;
539 struct bvec_iter iter;
540
541 __bio_for_each_segment(bv, bio, iter, start) {
542 char *data = bvec_kmap_irq(&bv, &flags);
543 memset(data, 0, bv.bv_len);
544 flush_dcache_page(bv.bv_page);
545 bvec_kunmap_irq(data, &flags);
546 }
547}
548EXPORT_SYMBOL(zero_fill_bio_iter);
549
550
551
552
553
554
555
556
557
558void bio_put(struct bio *bio)
559{
560 if (!bio_flagged(bio, BIO_REFFED))
561 bio_free(bio);
562 else {
563 BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
564
565
566
567
568 if (atomic_dec_and_test(&bio->__bi_cnt))
569 bio_free(bio);
570 }
571}
572EXPORT_SYMBOL(bio_put);
573
574int bio_phys_segments(struct request_queue *q, struct bio *bio)
575{
576 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
577 blk_recount_segments(q, bio);
578
579 return bio->bi_phys_segments;
580}
581
582
583
584
585
586
587
588
589
590
591
592
593void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
594{
595 BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
596
597
598
599
600
601 bio->bi_disk = bio_src->bi_disk;
602 bio->bi_partno = bio_src->bi_partno;
603 bio_set_flag(bio, BIO_CLONED);
604 if (bio_flagged(bio_src, BIO_THROTTLED))
605 bio_set_flag(bio, BIO_THROTTLED);
606 bio->bi_opf = bio_src->bi_opf;
607 bio->bi_ioprio = bio_src->bi_ioprio;
608 bio->bi_write_hint = bio_src->bi_write_hint;
609 bio->bi_iter = bio_src->bi_iter;
610 bio->bi_io_vec = bio_src->bi_io_vec;
611
612 bio_clone_blkg_association(bio, bio_src);
613 blkcg_bio_issue_init(bio);
614}
615EXPORT_SYMBOL(__bio_clone_fast);
616
617
618
619
620
621
622
623
624
625struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
626{
627 struct bio *b;
628
629 b = bio_alloc_bioset(gfp_mask, 0, bs);
630 if (!b)
631 return NULL;
632
633 __bio_clone_fast(b, bio);
634
635 if (bio_integrity(bio)) {
636 int ret;
637
638 ret = bio_integrity_clone(b, bio, gfp_mask);
639
640 if (ret < 0) {
641 bio_put(b);
642 return NULL;
643 }
644 }
645
646 return b;
647}
648EXPORT_SYMBOL(bio_clone_fast);
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
666 *page, unsigned int len, unsigned int offset)
667{
668 int retried_segments = 0;
669 struct bio_vec *bvec;
670
671
672
673
674 if (unlikely(bio_flagged(bio, BIO_CLONED)))
675 return 0;
676
677 if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q))
678 return 0;
679
680
681
682
683
684
685 if (bio->bi_vcnt > 0) {
686 struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
687
688 if (page == prev->bv_page &&
689 offset == prev->bv_offset + prev->bv_len) {
690 prev->bv_len += len;
691 bio->bi_iter.bi_size += len;
692 goto done;
693 }
694
695
696
697
698
699 if (bvec_gap_to_prev(q, prev, offset))
700 return 0;
701 }
702
703 if (bio_full(bio))
704 return 0;
705
706
707
708
709
710 bvec = &bio->bi_io_vec[bio->bi_vcnt];
711 bvec->bv_page = page;
712 bvec->bv_len = len;
713 bvec->bv_offset = offset;
714 bio->bi_vcnt++;
715 bio->bi_phys_segments++;
716 bio->bi_iter.bi_size += len;
717
718
719
720
721
722
723 while (bio->bi_phys_segments > queue_max_segments(q)) {
724
725 if (retried_segments)
726 goto failed;
727
728 retried_segments = 1;
729 blk_recount_segments(q, bio);
730 }
731
732
733 if (bio->bi_vcnt > 1 && biovec_phys_mergeable(q, bvec - 1, bvec))
734 bio_clear_flag(bio, BIO_SEG_VALID);
735
736 done:
737 return len;
738
739 failed:
740 bvec->bv_page = NULL;
741 bvec->bv_len = 0;
742 bvec->bv_offset = 0;
743 bio->bi_vcnt--;
744 bio->bi_iter.bi_size -= len;
745 blk_recount_segments(q, bio);
746 return 0;
747}
748EXPORT_SYMBOL(bio_add_pc_page);
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763bool __bio_try_merge_page(struct bio *bio, struct page *page,
764 unsigned int len, unsigned int off)
765{
766 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
767 return false;
768
769 if (bio->bi_vcnt > 0) {
770 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
771
772 if (page == bv->bv_page && off == bv->bv_offset + bv->bv_len) {
773 bv->bv_len += len;
774 bio->bi_iter.bi_size += len;
775 return true;
776 }
777 }
778 return false;
779}
780EXPORT_SYMBOL_GPL(__bio_try_merge_page);
781
782
783
784
785
786
787
788
789
790
791
792void __bio_add_page(struct bio *bio, struct page *page,
793 unsigned int len, unsigned int off)
794{
795 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
796
797 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
798 WARN_ON_ONCE(bio_full(bio));
799
800 bv->bv_page = page;
801 bv->bv_offset = off;
802 bv->bv_len = len;
803
804 bio->bi_iter.bi_size += len;
805 bio->bi_vcnt++;
806}
807EXPORT_SYMBOL_GPL(__bio_add_page);
808
809
810
811
812
813
814
815
816
817
818
819int bio_add_page(struct bio *bio, struct page *page,
820 unsigned int len, unsigned int offset)
821{
822 if (!__bio_try_merge_page(bio, page, len, offset)) {
823 if (bio_full(bio))
824 return 0;
825 __bio_add_page(bio, page, len, offset);
826 }
827 return len;
828}
829EXPORT_SYMBOL(bio_add_page);
830
831#define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *))
832
833
834
835
836
837
838
839
840
841
842
843static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
844{
845 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
846 unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
847 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
848 struct page **pages = (struct page **)bv;
849 ssize_t size, left;
850 unsigned len, i;
851 size_t offset;
852
853
854
855
856
857
858 BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
859 pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
860
861 size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
862 if (unlikely(size <= 0))
863 return size ? size : -EFAULT;
864
865 for (left = size, i = 0; left > 0; left -= len, i++) {
866 struct page *page = pages[i];
867
868 len = min_t(size_t, PAGE_SIZE - offset, left);
869 if (WARN_ON_ONCE(bio_add_page(bio, page, len, offset) != len))
870 return -EINVAL;
871 offset = 0;
872 }
873
874 iov_iter_advance(iter, size);
875 return 0;
876}
877
878
879
880
881
882
883
884
885
886
887
888
889
890int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
891{
892 unsigned short orig_vcnt = bio->bi_vcnt;
893
894 do {
895 int ret = __bio_iov_iter_get_pages(bio, iter);
896
897 if (unlikely(ret))
898 return bio->bi_vcnt > orig_vcnt ? 0 : ret;
899
900 } while (iov_iter_count(iter) && !bio_full(bio));
901
902 return 0;
903}
904
905static void submit_bio_wait_endio(struct bio *bio)
906{
907 complete(bio->bi_private);
908}
909
910
911
912
913
914
915
916
917
918
919
920
921int submit_bio_wait(struct bio *bio)
922{
923 DECLARE_COMPLETION_ONSTACK_MAP(done, bio->bi_disk->lockdep_map);
924
925 bio->bi_private = &done;
926 bio->bi_end_io = submit_bio_wait_endio;
927 bio->bi_opf |= REQ_SYNC;
928 submit_bio(bio);
929 wait_for_completion_io(&done);
930
931 return blk_status_to_errno(bio->bi_status);
932}
933EXPORT_SYMBOL(submit_bio_wait);
934
935
936
937
938
939
940
941
942
943
944
945
946void bio_advance(struct bio *bio, unsigned bytes)
947{
948 if (bio_integrity(bio))
949 bio_integrity_advance(bio, bytes);
950
951 bio_advance_iter(bio, &bio->bi_iter, bytes);
952}
953EXPORT_SYMBOL(bio_advance);
954
955void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
956 struct bio *src, struct bvec_iter *src_iter)
957{
958 struct bio_vec src_bv, dst_bv;
959 void *src_p, *dst_p;
960 unsigned bytes;
961
962 while (src_iter->bi_size && dst_iter->bi_size) {
963 src_bv = bio_iter_iovec(src, *src_iter);
964 dst_bv = bio_iter_iovec(dst, *dst_iter);
965
966 bytes = min(src_bv.bv_len, dst_bv.bv_len);
967
968 src_p = kmap_atomic(src_bv.bv_page);
969 dst_p = kmap_atomic(dst_bv.bv_page);
970
971 memcpy(dst_p + dst_bv.bv_offset,
972 src_p + src_bv.bv_offset,
973 bytes);
974
975 kunmap_atomic(dst_p);
976 kunmap_atomic(src_p);
977
978 flush_dcache_page(dst_bv.bv_page);
979
980 bio_advance_iter(src, src_iter, bytes);
981 bio_advance_iter(dst, dst_iter, bytes);
982 }
983}
984EXPORT_SYMBOL(bio_copy_data_iter);
985
986
987
988
989
990
991
992
993
994void bio_copy_data(struct bio *dst, struct bio *src)
995{
996 struct bvec_iter src_iter = src->bi_iter;
997 struct bvec_iter dst_iter = dst->bi_iter;
998
999 bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1000}
1001EXPORT_SYMBOL(bio_copy_data);
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013void bio_list_copy_data(struct bio *dst, struct bio *src)
1014{
1015 struct bvec_iter src_iter = src->bi_iter;
1016 struct bvec_iter dst_iter = dst->bi_iter;
1017
1018 while (1) {
1019 if (!src_iter.bi_size) {
1020 src = src->bi_next;
1021 if (!src)
1022 break;
1023
1024 src_iter = src->bi_iter;
1025 }
1026
1027 if (!dst_iter.bi_size) {
1028 dst = dst->bi_next;
1029 if (!dst)
1030 break;
1031
1032 dst_iter = dst->bi_iter;
1033 }
1034
1035 bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1036 }
1037}
1038EXPORT_SYMBOL(bio_list_copy_data);
1039
1040struct bio_map_data {
1041 int is_our_pages;
1042 struct iov_iter iter;
1043 struct iovec iov[];
1044};
1045
1046static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
1047 gfp_t gfp_mask)
1048{
1049 struct bio_map_data *bmd;
1050 if (data->nr_segs > UIO_MAXIOV)
1051 return NULL;
1052
1053 bmd = kmalloc(sizeof(struct bio_map_data) +
1054 sizeof(struct iovec) * data->nr_segs, gfp_mask);
1055 if (!bmd)
1056 return NULL;
1057 memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs);
1058 bmd->iter = *data;
1059 bmd->iter.iov = bmd->iov;
1060 return bmd;
1061}
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
1072{
1073 int i;
1074 struct bio_vec *bvec;
1075
1076 bio_for_each_segment_all(bvec, bio, i) {
1077 ssize_t ret;
1078
1079 ret = copy_page_from_iter(bvec->bv_page,
1080 bvec->bv_offset,
1081 bvec->bv_len,
1082 iter);
1083
1084 if (!iov_iter_count(iter))
1085 break;
1086
1087 if (ret < bvec->bv_len)
1088 return -EFAULT;
1089 }
1090
1091 return 0;
1092}
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
1103{
1104 int i;
1105 struct bio_vec *bvec;
1106
1107 bio_for_each_segment_all(bvec, bio, i) {
1108 ssize_t ret;
1109
1110 ret = copy_page_to_iter(bvec->bv_page,
1111 bvec->bv_offset,
1112 bvec->bv_len,
1113 &iter);
1114
1115 if (!iov_iter_count(&iter))
1116 break;
1117
1118 if (ret < bvec->bv_len)
1119 return -EFAULT;
1120 }
1121
1122 return 0;
1123}
1124
1125void bio_free_pages(struct bio *bio)
1126{
1127 struct bio_vec *bvec;
1128 int i;
1129
1130 bio_for_each_segment_all(bvec, bio, i)
1131 __free_page(bvec->bv_page);
1132}
1133EXPORT_SYMBOL(bio_free_pages);
1134
1135
1136
1137
1138
1139
1140
1141
1142int bio_uncopy_user(struct bio *bio)
1143{
1144 struct bio_map_data *bmd = bio->bi_private;
1145 int ret = 0;
1146
1147 if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1148
1149
1150
1151
1152
1153 if (!current->mm)
1154 ret = -EINTR;
1155 else if (bio_data_dir(bio) == READ)
1156 ret = bio_copy_to_iter(bio, bmd->iter);
1157 if (bmd->is_our_pages)
1158 bio_free_pages(bio);
1159 }
1160 kfree(bmd);
1161 bio_put(bio);
1162 return ret;
1163}
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176struct bio *bio_copy_user_iov(struct request_queue *q,
1177 struct rq_map_data *map_data,
1178 struct iov_iter *iter,
1179 gfp_t gfp_mask)
1180{
1181 struct bio_map_data *bmd;
1182 struct page *page;
1183 struct bio *bio;
1184 int i = 0, ret;
1185 int nr_pages;
1186 unsigned int len = iter->count;
1187 unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
1188
1189 bmd = bio_alloc_map_data(iter, gfp_mask);
1190 if (!bmd)
1191 return ERR_PTR(-ENOMEM);
1192
1193
1194
1195
1196
1197
1198 bmd->is_our_pages = map_data ? 0 : 1;
1199
1200 nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
1201 if (nr_pages > BIO_MAX_PAGES)
1202 nr_pages = BIO_MAX_PAGES;
1203
1204 ret = -ENOMEM;
1205 bio = bio_kmalloc(gfp_mask, nr_pages);
1206 if (!bio)
1207 goto out_bmd;
1208
1209 ret = 0;
1210
1211 if (map_data) {
1212 nr_pages = 1 << map_data->page_order;
1213 i = map_data->offset / PAGE_SIZE;
1214 }
1215 while (len) {
1216 unsigned int bytes = PAGE_SIZE;
1217
1218 bytes -= offset;
1219
1220 if (bytes > len)
1221 bytes = len;
1222
1223 if (map_data) {
1224 if (i == map_data->nr_entries * nr_pages) {
1225 ret = -ENOMEM;
1226 break;
1227 }
1228
1229 page = map_data->pages[i / nr_pages];
1230 page += (i % nr_pages);
1231
1232 i++;
1233 } else {
1234 page = alloc_page(q->bounce_gfp | gfp_mask);
1235 if (!page) {
1236 ret = -ENOMEM;
1237 break;
1238 }
1239 }
1240
1241 if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
1242 break;
1243
1244 len -= bytes;
1245 offset = 0;
1246 }
1247
1248 if (ret)
1249 goto cleanup;
1250
1251 if (map_data)
1252 map_data->offset += bio->bi_iter.bi_size;
1253
1254
1255
1256
1257 if ((iov_iter_rw(iter) == WRITE && (!map_data || !map_data->null_mapped)) ||
1258 (map_data && map_data->from_user)) {
1259 ret = bio_copy_from_iter(bio, iter);
1260 if (ret)
1261 goto cleanup;
1262 } else {
1263 if (bmd->is_our_pages)
1264 zero_fill_bio(bio);
1265 iov_iter_advance(iter, bio->bi_iter.bi_size);
1266 }
1267
1268 bio->bi_private = bmd;
1269 if (map_data && map_data->null_mapped)
1270 bio_set_flag(bio, BIO_NULL_MAPPED);
1271 return bio;
1272cleanup:
1273 if (!map_data)
1274 bio_free_pages(bio);
1275 bio_put(bio);
1276out_bmd:
1277 kfree(bmd);
1278 return ERR_PTR(ret);
1279}
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290struct bio *bio_map_user_iov(struct request_queue *q,
1291 struct iov_iter *iter,
1292 gfp_t gfp_mask)
1293{
1294 int j;
1295 struct bio *bio;
1296 int ret;
1297 struct bio_vec *bvec;
1298
1299 if (!iov_iter_count(iter))
1300 return ERR_PTR(-EINVAL);
1301
1302 bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES));
1303 if (!bio)
1304 return ERR_PTR(-ENOMEM);
1305
1306 while (iov_iter_count(iter)) {
1307 struct page **pages;
1308 ssize_t bytes;
1309 size_t offs, added = 0;
1310 int npages;
1311
1312 bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs);
1313 if (unlikely(bytes <= 0)) {
1314 ret = bytes ? bytes : -EFAULT;
1315 goto out_unmap;
1316 }
1317
1318 npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
1319
1320 if (unlikely(offs & queue_dma_alignment(q))) {
1321 ret = -EINVAL;
1322 j = 0;
1323 } else {
1324 for (j = 0; j < npages; j++) {
1325 struct page *page = pages[j];
1326 unsigned int n = PAGE_SIZE - offs;
1327 unsigned short prev_bi_vcnt = bio->bi_vcnt;
1328
1329 if (n > bytes)
1330 n = bytes;
1331
1332 if (!bio_add_pc_page(q, bio, page, n, offs))
1333 break;
1334
1335
1336
1337
1338
1339 if (bio->bi_vcnt == prev_bi_vcnt)
1340 put_page(page);
1341
1342 added += n;
1343 bytes -= n;
1344 offs = 0;
1345 }
1346 iov_iter_advance(iter, added);
1347 }
1348
1349
1350
1351 while (j < npages)
1352 put_page(pages[j++]);
1353 kvfree(pages);
1354
1355 if (bytes)
1356 break;
1357 }
1358
1359 bio_set_flag(bio, BIO_USER_MAPPED);
1360
1361
1362
1363
1364
1365
1366
1367 bio_get(bio);
1368 return bio;
1369
1370 out_unmap:
1371 bio_for_each_segment_all(bvec, bio, j) {
1372 put_page(bvec->bv_page);
1373 }
1374 bio_put(bio);
1375 return ERR_PTR(ret);
1376}
1377
1378static void __bio_unmap_user(struct bio *bio)
1379{
1380 struct bio_vec *bvec;
1381 int i;
1382
1383
1384
1385
1386 bio_for_each_segment_all(bvec, bio, i) {
1387 if (bio_data_dir(bio) == READ)
1388 set_page_dirty_lock(bvec->bv_page);
1389
1390 put_page(bvec->bv_page);
1391 }
1392
1393 bio_put(bio);
1394}
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405void bio_unmap_user(struct bio *bio)
1406{
1407 __bio_unmap_user(bio);
1408 bio_put(bio);
1409}
1410
1411static void bio_map_kern_endio(struct bio *bio)
1412{
1413 bio_put(bio);
1414}
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
1427 gfp_t gfp_mask)
1428{
1429 unsigned long kaddr = (unsigned long)data;
1430 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1431 unsigned long start = kaddr >> PAGE_SHIFT;
1432 const int nr_pages = end - start;
1433 int offset, i;
1434 struct bio *bio;
1435
1436 bio = bio_kmalloc(gfp_mask, nr_pages);
1437 if (!bio)
1438 return ERR_PTR(-ENOMEM);
1439
1440 offset = offset_in_page(kaddr);
1441 for (i = 0; i < nr_pages; i++) {
1442 unsigned int bytes = PAGE_SIZE - offset;
1443
1444 if (len <= 0)
1445 break;
1446
1447 if (bytes > len)
1448 bytes = len;
1449
1450 if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
1451 offset) < bytes) {
1452
1453 bio_put(bio);
1454 return ERR_PTR(-EINVAL);
1455 }
1456
1457 data += bytes;
1458 len -= bytes;
1459 offset = 0;
1460 }
1461
1462 bio->bi_end_io = bio_map_kern_endio;
1463 return bio;
1464}
1465EXPORT_SYMBOL(bio_map_kern);
1466
1467static void bio_copy_kern_endio(struct bio *bio)
1468{
1469 bio_free_pages(bio);
1470 bio_put(bio);
1471}
1472
1473static void bio_copy_kern_endio_read(struct bio *bio)
1474{
1475 char *p = bio->bi_private;
1476 struct bio_vec *bvec;
1477 int i;
1478
1479 bio_for_each_segment_all(bvec, bio, i) {
1480 memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
1481 p += bvec->bv_len;
1482 }
1483
1484 bio_copy_kern_endio(bio);
1485}
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
1499 gfp_t gfp_mask, int reading)
1500{
1501 unsigned long kaddr = (unsigned long)data;
1502 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1503 unsigned long start = kaddr >> PAGE_SHIFT;
1504 struct bio *bio;
1505 void *p = data;
1506 int nr_pages = 0;
1507
1508
1509
1510
1511 if (end < start)
1512 return ERR_PTR(-EINVAL);
1513
1514 nr_pages = end - start;
1515 bio = bio_kmalloc(gfp_mask, nr_pages);
1516 if (!bio)
1517 return ERR_PTR(-ENOMEM);
1518
1519 while (len) {
1520 struct page *page;
1521 unsigned int bytes = PAGE_SIZE;
1522
1523 if (bytes > len)
1524 bytes = len;
1525
1526 page = alloc_page(q->bounce_gfp | gfp_mask);
1527 if (!page)
1528 goto cleanup;
1529
1530 if (!reading)
1531 memcpy(page_address(page), p, bytes);
1532
1533 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
1534 break;
1535
1536 len -= bytes;
1537 p += bytes;
1538 }
1539
1540 if (reading) {
1541 bio->bi_end_io = bio_copy_kern_endio_read;
1542 bio->bi_private = data;
1543 } else {
1544 bio->bi_end_io = bio_copy_kern_endio;
1545 }
1546
1547 return bio;
1548
1549cleanup:
1550 bio_free_pages(bio);
1551 bio_put(bio);
1552 return ERR_PTR(-ENOMEM);
1553}
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584void bio_set_pages_dirty(struct bio *bio)
1585{
1586 struct bio_vec *bvec;
1587 int i;
1588
1589 bio_for_each_segment_all(bvec, bio, i) {
1590 if (!PageCompound(bvec->bv_page))
1591 set_page_dirty_lock(bvec->bv_page);
1592 }
1593}
1594
1595static void bio_release_pages(struct bio *bio)
1596{
1597 struct bio_vec *bvec;
1598 int i;
1599
1600 bio_for_each_segment_all(bvec, bio, i)
1601 put_page(bvec->bv_page);
1602}
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615static void bio_dirty_fn(struct work_struct *work);
1616
1617static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1618static DEFINE_SPINLOCK(bio_dirty_lock);
1619static struct bio *bio_dirty_list;
1620
1621
1622
1623
1624static void bio_dirty_fn(struct work_struct *work)
1625{
1626 struct bio *bio, *next;
1627
1628 spin_lock_irq(&bio_dirty_lock);
1629 next = bio_dirty_list;
1630 bio_dirty_list = NULL;
1631 spin_unlock_irq(&bio_dirty_lock);
1632
1633 while ((bio = next) != NULL) {
1634 next = bio->bi_private;
1635
1636 bio_set_pages_dirty(bio);
1637 bio_release_pages(bio);
1638 bio_put(bio);
1639 }
1640}
1641
1642void bio_check_pages_dirty(struct bio *bio)
1643{
1644 struct bio_vec *bvec;
1645 unsigned long flags;
1646 int i;
1647
1648 bio_for_each_segment_all(bvec, bio, i) {
1649 if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
1650 goto defer;
1651 }
1652
1653 bio_release_pages(bio);
1654 bio_put(bio);
1655 return;
1656defer:
1657 spin_lock_irqsave(&bio_dirty_lock, flags);
1658 bio->bi_private = bio_dirty_list;
1659 bio_dirty_list = bio;
1660 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1661 schedule_work(&bio_dirty_work);
1662}
1663
1664void update_io_ticks(struct hd_struct *part, unsigned long now)
1665{
1666 unsigned long stamp;
1667again:
1668 stamp = READ_ONCE(part->stamp);
1669 if (unlikely(stamp != now)) {
1670 if (likely(cmpxchg(&part->stamp, stamp, now) == stamp)) {
1671 __part_stat_add(part, io_ticks, 1);
1672 }
1673 }
1674 if (part->partno) {
1675 part = &part_to_disk(part)->part0;
1676 goto again;
1677 }
1678}
1679
1680void generic_start_io_acct(struct request_queue *q, int op,
1681 unsigned long sectors, struct hd_struct *part)
1682{
1683 const int sgrp = op_stat_group(op);
1684
1685 part_stat_lock();
1686
1687 update_io_ticks(part, jiffies);
1688 part_stat_inc(part, ios[sgrp]);
1689 part_stat_add(part, sectors[sgrp], sectors);
1690 part_inc_in_flight(q, part, op_is_write(op));
1691
1692 part_stat_unlock();
1693}
1694EXPORT_SYMBOL(generic_start_io_acct);
1695
1696void generic_end_io_acct(struct request_queue *q, int req_op,
1697 struct hd_struct *part, unsigned long start_time)
1698{
1699 unsigned long now = jiffies;
1700 unsigned long duration = now - start_time;
1701 const int sgrp = op_stat_group(req_op);
1702
1703 part_stat_lock();
1704
1705 update_io_ticks(part, now);
1706 part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration));
1707 part_stat_add(part, time_in_queue, duration);
1708 part_dec_in_flight(q, part, op_is_write(req_op));
1709
1710 part_stat_unlock();
1711}
1712EXPORT_SYMBOL(generic_end_io_acct);
1713
1714#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1715void bio_flush_dcache_pages(struct bio *bi)
1716{
1717 struct bio_vec bvec;
1718 struct bvec_iter iter;
1719
1720 bio_for_each_segment(bvec, bi, iter)
1721 flush_dcache_page(bvec.bv_page);
1722}
1723EXPORT_SYMBOL(bio_flush_dcache_pages);
1724#endif
1725
1726static inline bool bio_remaining_done(struct bio *bio)
1727{
1728
1729
1730
1731
1732 if (!bio_flagged(bio, BIO_CHAIN))
1733 return true;
1734
1735 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1736
1737 if (atomic_dec_and_test(&bio->__bi_remaining)) {
1738 bio_clear_flag(bio, BIO_CHAIN);
1739 return true;
1740 }
1741
1742 return false;
1743}
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759void bio_endio(struct bio *bio)
1760{
1761again:
1762 if (!bio_remaining_done(bio))
1763 return;
1764 if (!bio_integrity_endio(bio))
1765 return;
1766
1767 if (bio->bi_disk)
1768 rq_qos_done_bio(bio->bi_disk->queue, bio);
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778 if (bio->bi_end_io == bio_chain_endio) {
1779 bio = __bio_chain_endio(bio);
1780 goto again;
1781 }
1782
1783 if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
1784 trace_block_bio_complete(bio->bi_disk->queue, bio,
1785 blk_status_to_errno(bio->bi_status));
1786 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1787 }
1788
1789 blk_throtl_bio_endio(bio);
1790
1791 bio_uninit(bio);
1792 if (bio->bi_end_io)
1793 bio->bi_end_io(bio);
1794}
1795EXPORT_SYMBOL(bio_endio);
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811struct bio *bio_split(struct bio *bio, int sectors,
1812 gfp_t gfp, struct bio_set *bs)
1813{
1814 struct bio *split;
1815
1816 BUG_ON(sectors <= 0);
1817 BUG_ON(sectors >= bio_sectors(bio));
1818
1819 split = bio_clone_fast(bio, gfp, bs);
1820 if (!split)
1821 return NULL;
1822
1823 split->bi_iter.bi_size = sectors << 9;
1824
1825 if (bio_integrity(split))
1826 bio_integrity_trim(split);
1827
1828 bio_advance(bio, split->bi_iter.bi_size);
1829
1830 if (bio_flagged(bio, BIO_TRACE_COMPLETION))
1831 bio_set_flag(split, BIO_TRACE_COMPLETION);
1832
1833 return split;
1834}
1835EXPORT_SYMBOL(bio_split);
1836
1837
1838
1839
1840
1841
1842
1843void bio_trim(struct bio *bio, int offset, int size)
1844{
1845
1846
1847
1848
1849 size <<= 9;
1850 if (offset == 0 && size == bio->bi_iter.bi_size)
1851 return;
1852
1853 bio_clear_flag(bio, BIO_SEG_VALID);
1854
1855 bio_advance(bio, offset << 9);
1856
1857 bio->bi_iter.bi_size = size;
1858
1859 if (bio_integrity(bio))
1860 bio_integrity_trim(bio);
1861
1862}
1863EXPORT_SYMBOL_GPL(bio_trim);
1864
1865
1866
1867
1868
1869int biovec_init_pool(mempool_t *pool, int pool_entries)
1870{
1871 struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX;
1872
1873 return mempool_init_slab_pool(pool, pool_entries, bp->slab);
1874}
1875
1876
1877
1878
1879
1880
1881
1882void bioset_exit(struct bio_set *bs)
1883{
1884 if (bs->rescue_workqueue)
1885 destroy_workqueue(bs->rescue_workqueue);
1886 bs->rescue_workqueue = NULL;
1887
1888 mempool_exit(&bs->bio_pool);
1889 mempool_exit(&bs->bvec_pool);
1890
1891 bioset_integrity_free(bs);
1892 if (bs->bio_slab)
1893 bio_put_slab(bs);
1894 bs->bio_slab = NULL;
1895}
1896EXPORT_SYMBOL(bioset_exit);
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919int bioset_init(struct bio_set *bs,
1920 unsigned int pool_size,
1921 unsigned int front_pad,
1922 int flags)
1923{
1924 unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1925
1926 bs->front_pad = front_pad;
1927
1928 spin_lock_init(&bs->rescue_lock);
1929 bio_list_init(&bs->rescue_list);
1930 INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1931
1932 bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
1933 if (!bs->bio_slab)
1934 return -ENOMEM;
1935
1936 if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab))
1937 goto bad;
1938
1939 if ((flags & BIOSET_NEED_BVECS) &&
1940 biovec_init_pool(&bs->bvec_pool, pool_size))
1941 goto bad;
1942
1943 if (!(flags & BIOSET_NEED_RESCUER))
1944 return 0;
1945
1946 bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
1947 if (!bs->rescue_workqueue)
1948 goto bad;
1949
1950 return 0;
1951bad:
1952 bioset_exit(bs);
1953 return -ENOMEM;
1954}
1955EXPORT_SYMBOL(bioset_init);
1956
1957
1958
1959
1960
1961int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
1962{
1963 int flags;
1964
1965 flags = 0;
1966 if (src->bvec_pool.min_nr)
1967 flags |= BIOSET_NEED_BVECS;
1968 if (src->rescue_workqueue)
1969 flags |= BIOSET_NEED_RESCUER;
1970
1971 return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags);
1972}
1973EXPORT_SYMBOL(bioset_init_from_src);
1974
1975#ifdef CONFIG_BLK_CGROUP
1976
1977
1978
1979
1980
1981
1982
1983void bio_disassociate_blkg(struct bio *bio)
1984{
1985 if (bio->bi_blkg) {
1986 blkg_put(bio->bi_blkg);
1987 bio->bi_blkg = NULL;
1988 }
1989}
1990EXPORT_SYMBOL_GPL(bio_disassociate_blkg);
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006static void __bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
2007{
2008 bio_disassociate_blkg(bio);
2009
2010 bio->bi_blkg = blkg_tryget_closest(blkg);
2011}
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022void bio_associate_blkg_from_css(struct bio *bio,
2023 struct cgroup_subsys_state *css)
2024{
2025 struct request_queue *q = bio->bi_disk->queue;
2026 struct blkcg_gq *blkg;
2027
2028 rcu_read_lock();
2029
2030 if (!css || !css->parent)
2031 blkg = q->root_blkg;
2032 else
2033 blkg = blkg_lookup_create(css_to_blkcg(css), q);
2034
2035 __bio_associate_blkg(bio, blkg);
2036
2037 rcu_read_unlock();
2038}
2039EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
2040
2041#ifdef CONFIG_MEMCG
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051void bio_associate_blkg_from_page(struct bio *bio, struct page *page)
2052{
2053 struct cgroup_subsys_state *css;
2054
2055 if (!page->mem_cgroup)
2056 return;
2057
2058 rcu_read_lock();
2059
2060 css = cgroup_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys);
2061 bio_associate_blkg_from_css(bio, css);
2062
2063 rcu_read_unlock();
2064}
2065#endif
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076void bio_associate_blkg(struct bio *bio)
2077{
2078 struct cgroup_subsys_state *css;
2079
2080 rcu_read_lock();
2081
2082 if (bio->bi_blkg)
2083 css = &bio_blkcg(bio)->css;
2084 else
2085 css = blkcg_css();
2086
2087 bio_associate_blkg_from_css(bio, css);
2088
2089 rcu_read_unlock();
2090}
2091EXPORT_SYMBOL_GPL(bio_associate_blkg);
2092
2093
2094
2095
2096
2097
2098void bio_clone_blkg_association(struct bio *dst, struct bio *src)
2099{
2100 rcu_read_lock();
2101
2102 if (src->bi_blkg)
2103 __bio_associate_blkg(dst, src->bi_blkg);
2104
2105 rcu_read_unlock();
2106}
2107EXPORT_SYMBOL_GPL(bio_clone_blkg_association);
2108#endif
2109
2110static void __init biovec_init_slabs(void)
2111{
2112 int i;
2113
2114 for (i = 0; i < BVEC_POOL_NR; i++) {
2115 int size;
2116 struct biovec_slab *bvs = bvec_slabs + i;
2117
2118 if (bvs->nr_vecs <= BIO_INLINE_VECS) {
2119 bvs->slab = NULL;
2120 continue;
2121 }
2122
2123 size = bvs->nr_vecs * sizeof(struct bio_vec);
2124 bvs->slab = kmem_cache_create(bvs->name, size, 0,
2125 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2126 }
2127}
2128
2129static int __init init_bio(void)
2130{
2131 bio_slab_max = 2;
2132 bio_slab_nr = 0;
2133 bio_slabs = kcalloc(bio_slab_max, sizeof(struct bio_slab),
2134 GFP_KERNEL);
2135 if (!bio_slabs)
2136 panic("bio: can't allocate bios\n");
2137
2138 bio_integrity_init();
2139 biovec_init_slabs();
2140
2141 if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS))
2142 panic("bio: can't allocate bios\n");
2143
2144 if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE))
2145 panic("bio: can't create integrity pool\n");
2146
2147 return 0;
2148}
2149subsys_initcall(init_bio);
2150