1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/bio.h>
21#include <linux/blkdev.h>
22#include <linux/uio.h>
23#include <linux/iocontext.h>
24#include <linux/slab.h>
25#include <linux/init.h>
26#include <linux/kernel.h>
27#include <linux/export.h>
28#include <linux/mempool.h>
29#include <linux/workqueue.h>
30#include <linux/cgroup.h>
31#include <linux/blk-cgroup.h>
32
33#include <trace/events/block.h>
34#include "blk.h"
35#include "blk-rq-qos.h"
36
37
38
39
40
41#define BIO_INLINE_VECS 4
42
43
44
45
46
47
48#define BV(x, n) { .nr_vecs = x, .name = "biovec-"#n }
49static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = {
50 BV(1, 1), BV(4, 4), BV(16, 16), BV(64, 64), BV(128, 128), BV(BIO_MAX_PAGES, max),
51};
52#undef BV
53
54
55
56
57
58struct bio_set fs_bio_set;
59EXPORT_SYMBOL(fs_bio_set);
60
61
62
63
64struct bio_slab {
65 struct kmem_cache *slab;
66 unsigned int slab_ref;
67 unsigned int slab_size;
68 char name[8];
69};
70static DEFINE_MUTEX(bio_slab_lock);
71static struct bio_slab *bio_slabs;
72static unsigned int bio_slab_nr, bio_slab_max;
73
74static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
75{
76 unsigned int sz = sizeof(struct bio) + extra_size;
77 struct kmem_cache *slab = NULL;
78 struct bio_slab *bslab, *new_bio_slabs;
79 unsigned int new_bio_slab_max;
80 unsigned int i, entry = -1;
81
82 mutex_lock(&bio_slab_lock);
83
84 i = 0;
85 while (i < bio_slab_nr) {
86 bslab = &bio_slabs[i];
87
88 if (!bslab->slab && entry == -1)
89 entry = i;
90 else if (bslab->slab_size == sz) {
91 slab = bslab->slab;
92 bslab->slab_ref++;
93 break;
94 }
95 i++;
96 }
97
98 if (slab)
99 goto out_unlock;
100
101 if (bio_slab_nr == bio_slab_max && entry == -1) {
102 new_bio_slab_max = bio_slab_max << 1;
103 new_bio_slabs = krealloc(bio_slabs,
104 new_bio_slab_max * sizeof(struct bio_slab),
105 GFP_KERNEL);
106 if (!new_bio_slabs)
107 goto out_unlock;
108 bio_slab_max = new_bio_slab_max;
109 bio_slabs = new_bio_slabs;
110 }
111 if (entry == -1)
112 entry = bio_slab_nr++;
113
114 bslab = &bio_slabs[entry];
115
116 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
117 slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN,
118 SLAB_HWCACHE_ALIGN, NULL);
119 if (!slab)
120 goto out_unlock;
121
122 bslab->slab = slab;
123 bslab->slab_ref = 1;
124 bslab->slab_size = sz;
125out_unlock:
126 mutex_unlock(&bio_slab_lock);
127 return slab;
128}
129
130static void bio_put_slab(struct bio_set *bs)
131{
132 struct bio_slab *bslab = NULL;
133 unsigned int i;
134
135 mutex_lock(&bio_slab_lock);
136
137 for (i = 0; i < bio_slab_nr; i++) {
138 if (bs->bio_slab == bio_slabs[i].slab) {
139 bslab = &bio_slabs[i];
140 break;
141 }
142 }
143
144 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
145 goto out;
146
147 WARN_ON(!bslab->slab_ref);
148
149 if (--bslab->slab_ref)
150 goto out;
151
152 kmem_cache_destroy(bslab->slab);
153 bslab->slab = NULL;
154
155out:
156 mutex_unlock(&bio_slab_lock);
157}
158
159unsigned int bvec_nr_vecs(unsigned short idx)
160{
161 return bvec_slabs[--idx].nr_vecs;
162}
163
164void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
165{
166 if (!idx)
167 return;
168 idx--;
169
170 BIO_BUG_ON(idx >= BVEC_POOL_NR);
171
172 if (idx == BVEC_POOL_MAX) {
173 mempool_free(bv, pool);
174 } else {
175 struct biovec_slab *bvs = bvec_slabs + idx;
176
177 kmem_cache_free(bvs->slab, bv);
178 }
179}
180
181struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
182 mempool_t *pool)
183{
184 struct bio_vec *bvl;
185
186
187
188
189 switch (nr) {
190 case 1:
191 *idx = 0;
192 break;
193 case 2 ... 4:
194 *idx = 1;
195 break;
196 case 5 ... 16:
197 *idx = 2;
198 break;
199 case 17 ... 64:
200 *idx = 3;
201 break;
202 case 65 ... 128:
203 *idx = 4;
204 break;
205 case 129 ... BIO_MAX_PAGES:
206 *idx = 5;
207 break;
208 default:
209 return NULL;
210 }
211
212
213
214
215
216 if (*idx == BVEC_POOL_MAX) {
217fallback:
218 bvl = mempool_alloc(pool, gfp_mask);
219 } else {
220 struct biovec_slab *bvs = bvec_slabs + *idx;
221 gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO);
222
223
224
225
226
227
228 __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
229
230
231
232
233
234 bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
235 if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) {
236 *idx = BVEC_POOL_MAX;
237 goto fallback;
238 }
239 }
240
241 (*idx)++;
242 return bvl;
243}
244
245void bio_uninit(struct bio *bio)
246{
247 bio_disassociate_task(bio);
248}
249EXPORT_SYMBOL(bio_uninit);
250
251static void bio_free(struct bio *bio)
252{
253 struct bio_set *bs = bio->bi_pool;
254 void *p;
255
256 bio_uninit(bio);
257
258 if (bs) {
259 bvec_free(&bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
260
261
262
263
264 p = bio;
265 p -= bs->front_pad;
266
267 mempool_free(p, &bs->bio_pool);
268 } else {
269
270 kfree(bio);
271 }
272}
273
274
275
276
277
278
279void bio_init(struct bio *bio, struct bio_vec *table,
280 unsigned short max_vecs)
281{
282 memset(bio, 0, sizeof(*bio));
283 atomic_set(&bio->__bi_remaining, 1);
284 atomic_set(&bio->__bi_cnt, 1);
285
286 bio->bi_io_vec = table;
287 bio->bi_max_vecs = max_vecs;
288}
289EXPORT_SYMBOL(bio_init);
290
291
292
293
294
295
296
297
298
299
300
301void bio_reset(struct bio *bio)
302{
303 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
304
305 bio_uninit(bio);
306
307 memset(bio, 0, BIO_RESET_BYTES);
308 bio->bi_flags = flags;
309 atomic_set(&bio->__bi_remaining, 1);
310}
311EXPORT_SYMBOL(bio_reset);
312
313static struct bio *__bio_chain_endio(struct bio *bio)
314{
315 struct bio *parent = bio->bi_private;
316
317 if (!parent->bi_status)
318 parent->bi_status = bio->bi_status;
319 bio_put(bio);
320 return parent;
321}
322
323static void bio_chain_endio(struct bio *bio)
324{
325 bio_endio(__bio_chain_endio(bio));
326}
327
328
329
330
331
332
333
334
335
336
337
338
339void bio_chain(struct bio *bio, struct bio *parent)
340{
341 BUG_ON(bio->bi_private || bio->bi_end_io);
342
343 bio->bi_private = parent;
344 bio->bi_end_io = bio_chain_endio;
345 bio_inc_remaining(parent);
346}
347EXPORT_SYMBOL(bio_chain);
348
349static void bio_alloc_rescue(struct work_struct *work)
350{
351 struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
352 struct bio *bio;
353
354 while (1) {
355 spin_lock(&bs->rescue_lock);
356 bio = bio_list_pop(&bs->rescue_list);
357 spin_unlock(&bs->rescue_lock);
358
359 if (!bio)
360 break;
361
362 generic_make_request(bio);
363 }
364}
365
366static void punt_bios_to_rescuer(struct bio_set *bs)
367{
368 struct bio_list punt, nopunt;
369 struct bio *bio;
370
371 if (WARN_ON_ONCE(!bs->rescue_workqueue))
372 return;
373
374
375
376
377
378
379
380
381
382
383
384 bio_list_init(&punt);
385 bio_list_init(&nopunt);
386
387 while ((bio = bio_list_pop(¤t->bio_list[0])))
388 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
389 current->bio_list[0] = nopunt;
390
391 bio_list_init(&nopunt);
392 while ((bio = bio_list_pop(¤t->bio_list[1])))
393 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
394 current->bio_list[1] = nopunt;
395
396 spin_lock(&bs->rescue_lock);
397 bio_list_merge(&bs->rescue_list, &punt);
398 spin_unlock(&bs->rescue_lock);
399
400 queue_work(bs->rescue_workqueue, &bs->rescue_work);
401}
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
439 struct bio_set *bs)
440{
441 gfp_t saved_gfp = gfp_mask;
442 unsigned front_pad;
443 unsigned inline_vecs;
444 struct bio_vec *bvl = NULL;
445 struct bio *bio;
446 void *p;
447
448 if (!bs) {
449 if (nr_iovecs > UIO_MAXIOV)
450 return NULL;
451
452 p = kmalloc(sizeof(struct bio) +
453 nr_iovecs * sizeof(struct bio_vec),
454 gfp_mask);
455 front_pad = 0;
456 inline_vecs = nr_iovecs;
457 } else {
458
459 if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) &&
460 nr_iovecs > 0))
461 return NULL;
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483 if (current->bio_list &&
484 (!bio_list_empty(¤t->bio_list[0]) ||
485 !bio_list_empty(¤t->bio_list[1])) &&
486 bs->rescue_workqueue)
487 gfp_mask &= ~__GFP_DIRECT_RECLAIM;
488
489 p = mempool_alloc(&bs->bio_pool, gfp_mask);
490 if (!p && gfp_mask != saved_gfp) {
491 punt_bios_to_rescuer(bs);
492 gfp_mask = saved_gfp;
493 p = mempool_alloc(&bs->bio_pool, gfp_mask);
494 }
495
496 front_pad = bs->front_pad;
497 inline_vecs = BIO_INLINE_VECS;
498 }
499
500 if (unlikely(!p))
501 return NULL;
502
503 bio = p + front_pad;
504 bio_init(bio, NULL, 0);
505
506 if (nr_iovecs > inline_vecs) {
507 unsigned long idx = 0;
508
509 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
510 if (!bvl && gfp_mask != saved_gfp) {
511 punt_bios_to_rescuer(bs);
512 gfp_mask = saved_gfp;
513 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
514 }
515
516 if (unlikely(!bvl))
517 goto err_free;
518
519 bio->bi_flags |= idx << BVEC_POOL_OFFSET;
520 } else if (nr_iovecs) {
521 bvl = bio->bi_inline_vecs;
522 }
523
524 bio->bi_pool = bs;
525 bio->bi_max_vecs = nr_iovecs;
526 bio->bi_io_vec = bvl;
527 return bio;
528
529err_free:
530 mempool_free(p, &bs->bio_pool);
531 return NULL;
532}
533EXPORT_SYMBOL(bio_alloc_bioset);
534
535void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
536{
537 unsigned long flags;
538 struct bio_vec bv;
539 struct bvec_iter iter;
540
541 __bio_for_each_segment(bv, bio, iter, start) {
542 char *data = bvec_kmap_irq(&bv, &flags);
543 memset(data, 0, bv.bv_len);
544 flush_dcache_page(bv.bv_page);
545 bvec_kunmap_irq(data, &flags);
546 }
547}
548EXPORT_SYMBOL(zero_fill_bio_iter);
549
550
551
552
553
554
555
556
557
558void bio_put(struct bio *bio)
559{
560 if (!bio_flagged(bio, BIO_REFFED))
561 bio_free(bio);
562 else {
563 BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
564
565
566
567
568 if (atomic_dec_and_test(&bio->__bi_cnt))
569 bio_free(bio);
570 }
571}
572EXPORT_SYMBOL(bio_put);
573
574inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
575{
576 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
577 blk_recount_segments(q, bio);
578
579 return bio->bi_phys_segments;
580}
581EXPORT_SYMBOL(bio_phys_segments);
582
583
584
585
586
587
588
589
590
591
592
593
594void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
595{
596 BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
597
598
599
600
601
602 bio->bi_disk = bio_src->bi_disk;
603 bio->bi_partno = bio_src->bi_partno;
604 bio_set_flag(bio, BIO_CLONED);
605 if (bio_flagged(bio_src, BIO_THROTTLED))
606 bio_set_flag(bio, BIO_THROTTLED);
607 bio->bi_opf = bio_src->bi_opf;
608 bio->bi_write_hint = bio_src->bi_write_hint;
609 bio->bi_iter = bio_src->bi_iter;
610 bio->bi_io_vec = bio_src->bi_io_vec;
611
612 bio_clone_blkcg_association(bio, bio_src);
613}
614EXPORT_SYMBOL(__bio_clone_fast);
615
616
617
618
619
620
621
622
623
624struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
625{
626 struct bio *b;
627
628 b = bio_alloc_bioset(gfp_mask, 0, bs);
629 if (!b)
630 return NULL;
631
632 __bio_clone_fast(b, bio);
633
634 if (bio_integrity(bio)) {
635 int ret;
636
637 ret = bio_integrity_clone(b, bio, gfp_mask);
638
639 if (ret < 0) {
640 bio_put(b);
641 return NULL;
642 }
643 }
644
645 return b;
646}
647EXPORT_SYMBOL(bio_clone_fast);
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
665 *page, unsigned int len, unsigned int offset)
666{
667 int retried_segments = 0;
668 struct bio_vec *bvec;
669
670
671
672
673 if (unlikely(bio_flagged(bio, BIO_CLONED)))
674 return 0;
675
676 if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q))
677 return 0;
678
679
680
681
682
683
684 if (bio->bi_vcnt > 0) {
685 struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
686
687 if (page == prev->bv_page &&
688 offset == prev->bv_offset + prev->bv_len) {
689 prev->bv_len += len;
690 bio->bi_iter.bi_size += len;
691 goto done;
692 }
693
694
695
696
697
698 if (bvec_gap_to_prev(q, prev, offset))
699 return 0;
700 }
701
702 if (bio_full(bio))
703 return 0;
704
705
706
707
708
709 bvec = &bio->bi_io_vec[bio->bi_vcnt];
710 bvec->bv_page = page;
711 bvec->bv_len = len;
712 bvec->bv_offset = offset;
713 bio->bi_vcnt++;
714 bio->bi_phys_segments++;
715 bio->bi_iter.bi_size += len;
716
717
718
719
720
721
722 while (bio->bi_phys_segments > queue_max_segments(q)) {
723
724 if (retried_segments)
725 goto failed;
726
727 retried_segments = 1;
728 blk_recount_segments(q, bio);
729 }
730
731
732 if (bio->bi_vcnt > 1 && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
733 bio_clear_flag(bio, BIO_SEG_VALID);
734
735 done:
736 return len;
737
738 failed:
739 bvec->bv_page = NULL;
740 bvec->bv_len = 0;
741 bvec->bv_offset = 0;
742 bio->bi_vcnt--;
743 bio->bi_iter.bi_size -= len;
744 blk_recount_segments(q, bio);
745 return 0;
746}
747EXPORT_SYMBOL(bio_add_pc_page);
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762bool __bio_try_merge_page(struct bio *bio, struct page *page,
763 unsigned int len, unsigned int off)
764{
765 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
766 return false;
767
768 if (bio->bi_vcnt > 0) {
769 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
770
771 if (page == bv->bv_page && off == bv->bv_offset + bv->bv_len) {
772 bv->bv_len += len;
773 bio->bi_iter.bi_size += len;
774 return true;
775 }
776 }
777 return false;
778}
779EXPORT_SYMBOL_GPL(__bio_try_merge_page);
780
781
782
783
784
785
786
787
788
789
790
791void __bio_add_page(struct bio *bio, struct page *page,
792 unsigned int len, unsigned int off)
793{
794 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
795
796 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
797 WARN_ON_ONCE(bio_full(bio));
798
799 bv->bv_page = page;
800 bv->bv_offset = off;
801 bv->bv_len = len;
802
803 bio->bi_iter.bi_size += len;
804 bio->bi_vcnt++;
805}
806EXPORT_SYMBOL_GPL(__bio_add_page);
807
808
809
810
811
812
813
814
815
816
817
818int bio_add_page(struct bio *bio, struct page *page,
819 unsigned int len, unsigned int offset)
820{
821 if (!__bio_try_merge_page(bio, page, len, offset)) {
822 if (bio_full(bio))
823 return 0;
824 __bio_add_page(bio, page, len, offset);
825 }
826 return len;
827}
828EXPORT_SYMBOL(bio_add_page);
829
830
831
832
833
834
835
836
837
838
839
840static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
841{
842 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt, idx;
843 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
844 struct page **pages = (struct page **)bv;
845 size_t offset;
846 ssize_t size;
847
848 size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
849 if (unlikely(size <= 0))
850 return size ? size : -EFAULT;
851 idx = nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
852
853
854
855
856
857
858
859
860
861 bio->bi_iter.bi_size += size;
862 bio->bi_vcnt += nr_pages;
863
864 while (idx--) {
865 bv[idx].bv_page = pages[idx];
866 bv[idx].bv_len = PAGE_SIZE;
867 bv[idx].bv_offset = 0;
868 }
869
870 bv[0].bv_offset += offset;
871 bv[0].bv_len -= offset;
872 bv[nr_pages - 1].bv_len -= nr_pages * PAGE_SIZE - offset - size;
873
874 iov_iter_advance(iter, size);
875 return 0;
876}
877
878
879
880
881
882
883
884
885
886
887
888
889
890int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
891{
892 unsigned short orig_vcnt = bio->bi_vcnt;
893
894 do {
895 int ret = __bio_iov_iter_get_pages(bio, iter);
896
897 if (unlikely(ret))
898 return bio->bi_vcnt > orig_vcnt ? 0 : ret;
899
900 } while (iov_iter_count(iter) && !bio_full(bio));
901
902 return 0;
903}
904EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
905
906static void submit_bio_wait_endio(struct bio *bio)
907{
908 complete(bio->bi_private);
909}
910
911
912
913
914
915
916
917
918
919
920
921
922int submit_bio_wait(struct bio *bio)
923{
924 DECLARE_COMPLETION_ONSTACK_MAP(done, bio->bi_disk->lockdep_map);
925
926 bio->bi_private = &done;
927 bio->bi_end_io = submit_bio_wait_endio;
928 bio->bi_opf |= REQ_SYNC;
929 submit_bio(bio);
930 wait_for_completion_io(&done);
931
932 return blk_status_to_errno(bio->bi_status);
933}
934EXPORT_SYMBOL(submit_bio_wait);
935
936
937
938
939
940
941
942
943
944
945
946
947void bio_advance(struct bio *bio, unsigned bytes)
948{
949 if (bio_integrity(bio))
950 bio_integrity_advance(bio, bytes);
951
952 bio_advance_iter(bio, &bio->bi_iter, bytes);
953}
954EXPORT_SYMBOL(bio_advance);
955
956void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
957 struct bio *src, struct bvec_iter *src_iter)
958{
959 struct bio_vec src_bv, dst_bv;
960 void *src_p, *dst_p;
961 unsigned bytes;
962
963 while (src_iter->bi_size && dst_iter->bi_size) {
964 src_bv = bio_iter_iovec(src, *src_iter);
965 dst_bv = bio_iter_iovec(dst, *dst_iter);
966
967 bytes = min(src_bv.bv_len, dst_bv.bv_len);
968
969 src_p = kmap_atomic(src_bv.bv_page);
970 dst_p = kmap_atomic(dst_bv.bv_page);
971
972 memcpy(dst_p + dst_bv.bv_offset,
973 src_p + src_bv.bv_offset,
974 bytes);
975
976 kunmap_atomic(dst_p);
977 kunmap_atomic(src_p);
978
979 flush_dcache_page(dst_bv.bv_page);
980
981 bio_advance_iter(src, src_iter, bytes);
982 bio_advance_iter(dst, dst_iter, bytes);
983 }
984}
985EXPORT_SYMBOL(bio_copy_data_iter);
986
987
988
989
990
991
992
993
994
995void bio_copy_data(struct bio *dst, struct bio *src)
996{
997 struct bvec_iter src_iter = src->bi_iter;
998 struct bvec_iter dst_iter = dst->bi_iter;
999
1000 bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1001}
1002EXPORT_SYMBOL(bio_copy_data);
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014void bio_list_copy_data(struct bio *dst, struct bio *src)
1015{
1016 struct bvec_iter src_iter = src->bi_iter;
1017 struct bvec_iter dst_iter = dst->bi_iter;
1018
1019 while (1) {
1020 if (!src_iter.bi_size) {
1021 src = src->bi_next;
1022 if (!src)
1023 break;
1024
1025 src_iter = src->bi_iter;
1026 }
1027
1028 if (!dst_iter.bi_size) {
1029 dst = dst->bi_next;
1030 if (!dst)
1031 break;
1032
1033 dst_iter = dst->bi_iter;
1034 }
1035
1036 bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1037 }
1038}
1039EXPORT_SYMBOL(bio_list_copy_data);
1040
1041struct bio_map_data {
1042 int is_our_pages;
1043 struct iov_iter iter;
1044 struct iovec iov[];
1045};
1046
1047static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
1048 gfp_t gfp_mask)
1049{
1050 struct bio_map_data *bmd;
1051 if (data->nr_segs > UIO_MAXIOV)
1052 return NULL;
1053
1054 bmd = kmalloc(sizeof(struct bio_map_data) +
1055 sizeof(struct iovec) * data->nr_segs, gfp_mask);
1056 if (!bmd)
1057 return NULL;
1058 memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs);
1059 bmd->iter = *data;
1060 bmd->iter.iov = bmd->iov;
1061 return bmd;
1062}
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
1073{
1074 int i;
1075 struct bio_vec *bvec;
1076
1077 bio_for_each_segment_all(bvec, bio, i) {
1078 ssize_t ret;
1079
1080 ret = copy_page_from_iter(bvec->bv_page,
1081 bvec->bv_offset,
1082 bvec->bv_len,
1083 iter);
1084
1085 if (!iov_iter_count(iter))
1086 break;
1087
1088 if (ret < bvec->bv_len)
1089 return -EFAULT;
1090 }
1091
1092 return 0;
1093}
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
1104{
1105 int i;
1106 struct bio_vec *bvec;
1107
1108 bio_for_each_segment_all(bvec, bio, i) {
1109 ssize_t ret;
1110
1111 ret = copy_page_to_iter(bvec->bv_page,
1112 bvec->bv_offset,
1113 bvec->bv_len,
1114 &iter);
1115
1116 if (!iov_iter_count(&iter))
1117 break;
1118
1119 if (ret < bvec->bv_len)
1120 return -EFAULT;
1121 }
1122
1123 return 0;
1124}
1125
1126void bio_free_pages(struct bio *bio)
1127{
1128 struct bio_vec *bvec;
1129 int i;
1130
1131 bio_for_each_segment_all(bvec, bio, i)
1132 __free_page(bvec->bv_page);
1133}
1134EXPORT_SYMBOL(bio_free_pages);
1135
1136
1137
1138
1139
1140
1141
1142
1143int bio_uncopy_user(struct bio *bio)
1144{
1145 struct bio_map_data *bmd = bio->bi_private;
1146 int ret = 0;
1147
1148 if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1149
1150
1151
1152
1153
1154 if (!current->mm)
1155 ret = -EINTR;
1156 else if (bio_data_dir(bio) == READ)
1157 ret = bio_copy_to_iter(bio, bmd->iter);
1158 if (bmd->is_our_pages)
1159 bio_free_pages(bio);
1160 }
1161 kfree(bmd);
1162 bio_put(bio);
1163 return ret;
1164}
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177struct bio *bio_copy_user_iov(struct request_queue *q,
1178 struct rq_map_data *map_data,
1179 struct iov_iter *iter,
1180 gfp_t gfp_mask)
1181{
1182 struct bio_map_data *bmd;
1183 struct page *page;
1184 struct bio *bio;
1185 int i = 0, ret;
1186 int nr_pages;
1187 unsigned int len = iter->count;
1188 unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
1189
1190 bmd = bio_alloc_map_data(iter, gfp_mask);
1191 if (!bmd)
1192 return ERR_PTR(-ENOMEM);
1193
1194
1195
1196
1197
1198
1199 bmd->is_our_pages = map_data ? 0 : 1;
1200
1201 nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
1202 if (nr_pages > BIO_MAX_PAGES)
1203 nr_pages = BIO_MAX_PAGES;
1204
1205 ret = -ENOMEM;
1206 bio = bio_kmalloc(gfp_mask, nr_pages);
1207 if (!bio)
1208 goto out_bmd;
1209
1210 ret = 0;
1211
1212 if (map_data) {
1213 nr_pages = 1 << map_data->page_order;
1214 i = map_data->offset / PAGE_SIZE;
1215 }
1216 while (len) {
1217 unsigned int bytes = PAGE_SIZE;
1218
1219 bytes -= offset;
1220
1221 if (bytes > len)
1222 bytes = len;
1223
1224 if (map_data) {
1225 if (i == map_data->nr_entries * nr_pages) {
1226 ret = -ENOMEM;
1227 break;
1228 }
1229
1230 page = map_data->pages[i / nr_pages];
1231 page += (i % nr_pages);
1232
1233 i++;
1234 } else {
1235 page = alloc_page(q->bounce_gfp | gfp_mask);
1236 if (!page) {
1237 ret = -ENOMEM;
1238 break;
1239 }
1240 }
1241
1242 if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
1243 break;
1244
1245 len -= bytes;
1246 offset = 0;
1247 }
1248
1249 if (ret)
1250 goto cleanup;
1251
1252 if (map_data)
1253 map_data->offset += bio->bi_iter.bi_size;
1254
1255
1256
1257
1258 if (((iter->type & WRITE) && (!map_data || !map_data->null_mapped)) ||
1259 (map_data && map_data->from_user)) {
1260 ret = bio_copy_from_iter(bio, iter);
1261 if (ret)
1262 goto cleanup;
1263 } else {
1264 iov_iter_advance(iter, bio->bi_iter.bi_size);
1265 }
1266
1267 bio->bi_private = bmd;
1268 if (map_data && map_data->null_mapped)
1269 bio_set_flag(bio, BIO_NULL_MAPPED);
1270 return bio;
1271cleanup:
1272 if (!map_data)
1273 bio_free_pages(bio);
1274 bio_put(bio);
1275out_bmd:
1276 kfree(bmd);
1277 return ERR_PTR(ret);
1278}
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289struct bio *bio_map_user_iov(struct request_queue *q,
1290 struct iov_iter *iter,
1291 gfp_t gfp_mask)
1292{
1293 int j;
1294 struct bio *bio;
1295 int ret;
1296 struct bio_vec *bvec;
1297
1298 if (!iov_iter_count(iter))
1299 return ERR_PTR(-EINVAL);
1300
1301 bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES));
1302 if (!bio)
1303 return ERR_PTR(-ENOMEM);
1304
1305 while (iov_iter_count(iter)) {
1306 struct page **pages;
1307 ssize_t bytes;
1308 size_t offs, added = 0;
1309 int npages;
1310
1311 bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs);
1312 if (unlikely(bytes <= 0)) {
1313 ret = bytes ? bytes : -EFAULT;
1314 goto out_unmap;
1315 }
1316
1317 npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
1318
1319 if (unlikely(offs & queue_dma_alignment(q))) {
1320 ret = -EINVAL;
1321 j = 0;
1322 } else {
1323 for (j = 0; j < npages; j++) {
1324 struct page *page = pages[j];
1325 unsigned int n = PAGE_SIZE - offs;
1326 unsigned short prev_bi_vcnt = bio->bi_vcnt;
1327
1328 if (n > bytes)
1329 n = bytes;
1330
1331 if (!bio_add_pc_page(q, bio, page, n, offs))
1332 break;
1333
1334
1335
1336
1337
1338 if (bio->bi_vcnt == prev_bi_vcnt)
1339 put_page(page);
1340
1341 added += n;
1342 bytes -= n;
1343 offs = 0;
1344 }
1345 iov_iter_advance(iter, added);
1346 }
1347
1348
1349
1350 while (j < npages)
1351 put_page(pages[j++]);
1352 kvfree(pages);
1353
1354 if (bytes)
1355 break;
1356 }
1357
1358 bio_set_flag(bio, BIO_USER_MAPPED);
1359
1360
1361
1362
1363
1364
1365
1366 bio_get(bio);
1367 return bio;
1368
1369 out_unmap:
1370 bio_for_each_segment_all(bvec, bio, j) {
1371 put_page(bvec->bv_page);
1372 }
1373 bio_put(bio);
1374 return ERR_PTR(ret);
1375}
1376
1377static void __bio_unmap_user(struct bio *bio)
1378{
1379 struct bio_vec *bvec;
1380 int i;
1381
1382
1383
1384
1385 bio_for_each_segment_all(bvec, bio, i) {
1386 if (bio_data_dir(bio) == READ)
1387 set_page_dirty_lock(bvec->bv_page);
1388
1389 put_page(bvec->bv_page);
1390 }
1391
1392 bio_put(bio);
1393}
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404void bio_unmap_user(struct bio *bio)
1405{
1406 __bio_unmap_user(bio);
1407 bio_put(bio);
1408}
1409
1410static void bio_map_kern_endio(struct bio *bio)
1411{
1412 bio_put(bio);
1413}
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
1426 gfp_t gfp_mask)
1427{
1428 unsigned long kaddr = (unsigned long)data;
1429 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1430 unsigned long start = kaddr >> PAGE_SHIFT;
1431 const int nr_pages = end - start;
1432 int offset, i;
1433 struct bio *bio;
1434
1435 bio = bio_kmalloc(gfp_mask, nr_pages);
1436 if (!bio)
1437 return ERR_PTR(-ENOMEM);
1438
1439 offset = offset_in_page(kaddr);
1440 for (i = 0; i < nr_pages; i++) {
1441 unsigned int bytes = PAGE_SIZE - offset;
1442
1443 if (len <= 0)
1444 break;
1445
1446 if (bytes > len)
1447 bytes = len;
1448
1449 if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
1450 offset) < bytes) {
1451
1452 bio_put(bio);
1453 return ERR_PTR(-EINVAL);
1454 }
1455
1456 data += bytes;
1457 len -= bytes;
1458 offset = 0;
1459 }
1460
1461 bio->bi_end_io = bio_map_kern_endio;
1462 return bio;
1463}
1464EXPORT_SYMBOL(bio_map_kern);
1465
1466static void bio_copy_kern_endio(struct bio *bio)
1467{
1468 bio_free_pages(bio);
1469 bio_put(bio);
1470}
1471
1472static void bio_copy_kern_endio_read(struct bio *bio)
1473{
1474 char *p = bio->bi_private;
1475 struct bio_vec *bvec;
1476 int i;
1477
1478 bio_for_each_segment_all(bvec, bio, i) {
1479 memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
1480 p += bvec->bv_len;
1481 }
1482
1483 bio_copy_kern_endio(bio);
1484}
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
1498 gfp_t gfp_mask, int reading)
1499{
1500 unsigned long kaddr = (unsigned long)data;
1501 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1502 unsigned long start = kaddr >> PAGE_SHIFT;
1503 struct bio *bio;
1504 void *p = data;
1505 int nr_pages = 0;
1506
1507
1508
1509
1510 if (end < start)
1511 return ERR_PTR(-EINVAL);
1512
1513 nr_pages = end - start;
1514 bio = bio_kmalloc(gfp_mask, nr_pages);
1515 if (!bio)
1516 return ERR_PTR(-ENOMEM);
1517
1518 while (len) {
1519 struct page *page;
1520 unsigned int bytes = PAGE_SIZE;
1521
1522 if (bytes > len)
1523 bytes = len;
1524
1525 page = alloc_page(q->bounce_gfp | gfp_mask);
1526 if (!page)
1527 goto cleanup;
1528
1529 if (!reading)
1530 memcpy(page_address(page), p, bytes);
1531
1532 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
1533 break;
1534
1535 len -= bytes;
1536 p += bytes;
1537 }
1538
1539 if (reading) {
1540 bio->bi_end_io = bio_copy_kern_endio_read;
1541 bio->bi_private = data;
1542 } else {
1543 bio->bi_end_io = bio_copy_kern_endio;
1544 }
1545
1546 return bio;
1547
1548cleanup:
1549 bio_free_pages(bio);
1550 bio_put(bio);
1551 return ERR_PTR(-ENOMEM);
1552}
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583void bio_set_pages_dirty(struct bio *bio)
1584{
1585 struct bio_vec *bvec;
1586 int i;
1587
1588 bio_for_each_segment_all(bvec, bio, i) {
1589 if (!PageCompound(bvec->bv_page))
1590 set_page_dirty_lock(bvec->bv_page);
1591 }
1592}
1593EXPORT_SYMBOL_GPL(bio_set_pages_dirty);
1594
1595static void bio_release_pages(struct bio *bio)
1596{
1597 struct bio_vec *bvec;
1598 int i;
1599
1600 bio_for_each_segment_all(bvec, bio, i)
1601 put_page(bvec->bv_page);
1602}
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615static void bio_dirty_fn(struct work_struct *work);
1616
1617static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1618static DEFINE_SPINLOCK(bio_dirty_lock);
1619static struct bio *bio_dirty_list;
1620
1621
1622
1623
1624static void bio_dirty_fn(struct work_struct *work)
1625{
1626 struct bio *bio, *next;
1627
1628 spin_lock_irq(&bio_dirty_lock);
1629 next = bio_dirty_list;
1630 bio_dirty_list = NULL;
1631 spin_unlock_irq(&bio_dirty_lock);
1632
1633 while ((bio = next) != NULL) {
1634 next = bio->bi_private;
1635
1636 bio_set_pages_dirty(bio);
1637 bio_release_pages(bio);
1638 bio_put(bio);
1639 }
1640}
1641
1642void bio_check_pages_dirty(struct bio *bio)
1643{
1644 struct bio_vec *bvec;
1645 unsigned long flags;
1646 int i;
1647
1648 bio_for_each_segment_all(bvec, bio, i) {
1649 if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
1650 goto defer;
1651 }
1652
1653 bio_release_pages(bio);
1654 bio_put(bio);
1655 return;
1656defer:
1657 spin_lock_irqsave(&bio_dirty_lock, flags);
1658 bio->bi_private = bio_dirty_list;
1659 bio_dirty_list = bio;
1660 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1661 schedule_work(&bio_dirty_work);
1662}
1663EXPORT_SYMBOL_GPL(bio_check_pages_dirty);
1664
1665void generic_start_io_acct(struct request_queue *q, int op,
1666 unsigned long sectors, struct hd_struct *part)
1667{
1668 const int sgrp = op_stat_group(op);
1669 int cpu = part_stat_lock();
1670
1671 part_round_stats(q, cpu, part);
1672 part_stat_inc(cpu, part, ios[sgrp]);
1673 part_stat_add(cpu, part, sectors[sgrp], sectors);
1674 part_inc_in_flight(q, part, op_is_write(op));
1675
1676 part_stat_unlock();
1677}
1678EXPORT_SYMBOL(generic_start_io_acct);
1679
1680void generic_end_io_acct(struct request_queue *q, int req_op,
1681 struct hd_struct *part, unsigned long start_time)
1682{
1683 unsigned long duration = jiffies - start_time;
1684 const int sgrp = op_stat_group(req_op);
1685 int cpu = part_stat_lock();
1686
1687 part_stat_add(cpu, part, nsecs[sgrp], jiffies_to_nsecs(duration));
1688 part_round_stats(q, cpu, part);
1689 part_dec_in_flight(q, part, op_is_write(req_op));
1690
1691 part_stat_unlock();
1692}
1693EXPORT_SYMBOL(generic_end_io_acct);
1694
1695#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1696void bio_flush_dcache_pages(struct bio *bi)
1697{
1698 struct bio_vec bvec;
1699 struct bvec_iter iter;
1700
1701 bio_for_each_segment(bvec, bi, iter)
1702 flush_dcache_page(bvec.bv_page);
1703}
1704EXPORT_SYMBOL(bio_flush_dcache_pages);
1705#endif
1706
1707static inline bool bio_remaining_done(struct bio *bio)
1708{
1709
1710
1711
1712
1713 if (!bio_flagged(bio, BIO_CHAIN))
1714 return true;
1715
1716 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1717
1718 if (atomic_dec_and_test(&bio->__bi_remaining)) {
1719 bio_clear_flag(bio, BIO_CHAIN);
1720 return true;
1721 }
1722
1723 return false;
1724}
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740void bio_endio(struct bio *bio)
1741{
1742again:
1743 if (!bio_remaining_done(bio))
1744 return;
1745 if (!bio_integrity_endio(bio))
1746 return;
1747
1748 if (bio->bi_disk)
1749 rq_qos_done_bio(bio->bi_disk->queue, bio);
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759 if (bio->bi_end_io == bio_chain_endio) {
1760 bio = __bio_chain_endio(bio);
1761 goto again;
1762 }
1763
1764 if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
1765 trace_block_bio_complete(bio->bi_disk->queue, bio,
1766 blk_status_to_errno(bio->bi_status));
1767 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1768 }
1769
1770 blk_throtl_bio_endio(bio);
1771
1772 bio_uninit(bio);
1773 if (bio->bi_end_io)
1774 bio->bi_end_io(bio);
1775}
1776EXPORT_SYMBOL(bio_endio);
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792struct bio *bio_split(struct bio *bio, int sectors,
1793 gfp_t gfp, struct bio_set *bs)
1794{
1795 struct bio *split;
1796
1797 BUG_ON(sectors <= 0);
1798 BUG_ON(sectors >= bio_sectors(bio));
1799
1800 split = bio_clone_fast(bio, gfp, bs);
1801 if (!split)
1802 return NULL;
1803
1804 split->bi_iter.bi_size = sectors << 9;
1805
1806 if (bio_integrity(split))
1807 bio_integrity_trim(split);
1808
1809 bio_advance(bio, split->bi_iter.bi_size);
1810 bio->bi_iter.bi_done = 0;
1811
1812 if (bio_flagged(bio, BIO_TRACE_COMPLETION))
1813 bio_set_flag(split, BIO_TRACE_COMPLETION);
1814
1815 return split;
1816}
1817EXPORT_SYMBOL(bio_split);
1818
1819
1820
1821
1822
1823
1824
1825void bio_trim(struct bio *bio, int offset, int size)
1826{
1827
1828
1829
1830
1831 size <<= 9;
1832 if (offset == 0 && size == bio->bi_iter.bi_size)
1833 return;
1834
1835 bio_clear_flag(bio, BIO_SEG_VALID);
1836
1837 bio_advance(bio, offset << 9);
1838
1839 bio->bi_iter.bi_size = size;
1840
1841 if (bio_integrity(bio))
1842 bio_integrity_trim(bio);
1843
1844}
1845EXPORT_SYMBOL_GPL(bio_trim);
1846
1847
1848
1849
1850
1851int biovec_init_pool(mempool_t *pool, int pool_entries)
1852{
1853 struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX;
1854
1855 return mempool_init_slab_pool(pool, pool_entries, bp->slab);
1856}
1857
1858
1859
1860
1861
1862
1863
1864void bioset_exit(struct bio_set *bs)
1865{
1866 if (bs->rescue_workqueue)
1867 destroy_workqueue(bs->rescue_workqueue);
1868 bs->rescue_workqueue = NULL;
1869
1870 mempool_exit(&bs->bio_pool);
1871 mempool_exit(&bs->bvec_pool);
1872
1873 bioset_integrity_free(bs);
1874 if (bs->bio_slab)
1875 bio_put_slab(bs);
1876 bs->bio_slab = NULL;
1877}
1878EXPORT_SYMBOL(bioset_exit);
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901int bioset_init(struct bio_set *bs,
1902 unsigned int pool_size,
1903 unsigned int front_pad,
1904 int flags)
1905{
1906 unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1907
1908 bs->front_pad = front_pad;
1909
1910 spin_lock_init(&bs->rescue_lock);
1911 bio_list_init(&bs->rescue_list);
1912 INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1913
1914 bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
1915 if (!bs->bio_slab)
1916 return -ENOMEM;
1917
1918 if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab))
1919 goto bad;
1920
1921 if ((flags & BIOSET_NEED_BVECS) &&
1922 biovec_init_pool(&bs->bvec_pool, pool_size))
1923 goto bad;
1924
1925 if (!(flags & BIOSET_NEED_RESCUER))
1926 return 0;
1927
1928 bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
1929 if (!bs->rescue_workqueue)
1930 goto bad;
1931
1932 return 0;
1933bad:
1934 bioset_exit(bs);
1935 return -ENOMEM;
1936}
1937EXPORT_SYMBOL(bioset_init);
1938
1939
1940
1941
1942
1943int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
1944{
1945 int flags;
1946
1947 flags = 0;
1948 if (src->bvec_pool.min_nr)
1949 flags |= BIOSET_NEED_BVECS;
1950 if (src->rescue_workqueue)
1951 flags |= BIOSET_NEED_RESCUER;
1952
1953 return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags);
1954}
1955EXPORT_SYMBOL(bioset_init_from_src);
1956
1957#ifdef CONFIG_BLK_CGROUP
1958
1959#ifdef CONFIG_MEMCG
1960
1961
1962
1963
1964
1965
1966
1967
1968int bio_associate_blkcg_from_page(struct bio *bio, struct page *page)
1969{
1970 struct cgroup_subsys_state *blkcg_css;
1971
1972 if (unlikely(bio->bi_css))
1973 return -EBUSY;
1974 if (!page->mem_cgroup)
1975 return 0;
1976 blkcg_css = cgroup_get_e_css(page->mem_cgroup->css.cgroup,
1977 &io_cgrp_subsys);
1978 bio->bi_css = blkcg_css;
1979 return 0;
1980}
1981#endif
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
1996{
1997 if (unlikely(bio->bi_css))
1998 return -EBUSY;
1999 css_get(blkcg_css);
2000 bio->bi_css = blkcg_css;
2001 return 0;
2002}
2003EXPORT_SYMBOL_GPL(bio_associate_blkcg);
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
2015{
2016 if (unlikely(bio->bi_blkg))
2017 return -EBUSY;
2018 if (!blkg_try_get(blkg))
2019 return -ENODEV;
2020 bio->bi_blkg = blkg;
2021 return 0;
2022}
2023
2024
2025
2026
2027
2028void bio_disassociate_task(struct bio *bio)
2029{
2030 if (bio->bi_ioc) {
2031 put_io_context(bio->bi_ioc);
2032 bio->bi_ioc = NULL;
2033 }
2034 if (bio->bi_css) {
2035 css_put(bio->bi_css);
2036 bio->bi_css = NULL;
2037 }
2038 if (bio->bi_blkg) {
2039 blkg_put(bio->bi_blkg);
2040 bio->bi_blkg = NULL;
2041 }
2042}
2043
2044
2045
2046
2047
2048
2049void bio_clone_blkcg_association(struct bio *dst, struct bio *src)
2050{
2051 if (src->bi_css)
2052 WARN_ON(bio_associate_blkcg(dst, src->bi_css));
2053}
2054EXPORT_SYMBOL_GPL(bio_clone_blkcg_association);
2055#endif
2056
2057static void __init biovec_init_slabs(void)
2058{
2059 int i;
2060
2061 for (i = 0; i < BVEC_POOL_NR; i++) {
2062 int size;
2063 struct biovec_slab *bvs = bvec_slabs + i;
2064
2065 if (bvs->nr_vecs <= BIO_INLINE_VECS) {
2066 bvs->slab = NULL;
2067 continue;
2068 }
2069
2070 size = bvs->nr_vecs * sizeof(struct bio_vec);
2071 bvs->slab = kmem_cache_create(bvs->name, size, 0,
2072 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2073 }
2074}
2075
2076static int __init init_bio(void)
2077{
2078 bio_slab_max = 2;
2079 bio_slab_nr = 0;
2080 bio_slabs = kcalloc(bio_slab_max, sizeof(struct bio_slab),
2081 GFP_KERNEL);
2082 if (!bio_slabs)
2083 panic("bio: can't allocate bios\n");
2084
2085 bio_integrity_init();
2086 biovec_init_slabs();
2087
2088 if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS))
2089 panic("bio: can't allocate bios\n");
2090
2091 if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE))
2092 panic("bio: can't create integrity pool\n");
2093
2094 return 0;
2095}
2096subsys_initcall(init_bio);
2097