1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/blkdev.h>
24#include <linux/mempool.h>
25#include <linux/export.h>
26#include <linux/bio.h>
27#include <linux/workqueue.h>
28#include <linux/slab.h>
29
30struct integrity_slab {
31 struct kmem_cache *slab;
32 unsigned short nr_vecs;
33 char name[8];
34};
35
36#define IS(x) { .nr_vecs = x, .name = "bip-"__stringify(x) }
37struct integrity_slab bip_slab[BIOVEC_NR_POOLS] __read_mostly = {
38 IS(1), IS(4), IS(16), IS(64), IS(128), IS(BIO_MAX_PAGES),
39};
40#undef IS
41
42static struct workqueue_struct *kintegrityd_wq;
43
44static inline unsigned int vecs_to_idx(unsigned int nr)
45{
46 switch (nr) {
47 case 1:
48 return 0;
49 case 2 ... 4:
50 return 1;
51 case 5 ... 16:
52 return 2;
53 case 17 ... 64:
54 return 3;
55 case 65 ... 128:
56 return 4;
57 case 129 ... BIO_MAX_PAGES:
58 return 5;
59 default:
60 BUG();
61 }
62}
63
64static inline int use_bip_pool(unsigned int idx)
65{
66 if (idx == BIOVEC_MAX_IDX)
67 return 1;
68
69 return 0;
70}
71
72
73
74
75
76
77
78
79
80
81
82
83struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio,
84 gfp_t gfp_mask,
85 unsigned int nr_vecs,
86 struct bio_set *bs)
87{
88 struct bio_integrity_payload *bip;
89 unsigned int idx = vecs_to_idx(nr_vecs);
90
91 BUG_ON(bio == NULL);
92 bip = NULL;
93
94
95 if (!use_bip_pool(idx))
96 bip = kmem_cache_alloc(bip_slab[idx].slab, gfp_mask);
97
98
99 if (bip == NULL) {
100 idx = BIOVEC_MAX_IDX;
101 bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask);
102
103 if (unlikely(bip == NULL)) {
104 printk(KERN_ERR "%s: could not alloc bip\n", __func__);
105 return NULL;
106 }
107 }
108
109 memset(bip, 0, sizeof(*bip));
110
111 bip->bip_slab = idx;
112 bip->bip_bio = bio;
113 bio->bi_integrity = bip;
114
115 return bip;
116}
117EXPORT_SYMBOL(bio_integrity_alloc_bioset);
118
119
120
121
122
123
124
125
126
127
128
129struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
130 gfp_t gfp_mask,
131 unsigned int nr_vecs)
132{
133 return bio_integrity_alloc_bioset(bio, gfp_mask, nr_vecs, fs_bio_set);
134}
135EXPORT_SYMBOL(bio_integrity_alloc);
136
137
138
139
140
141
142
143
144
145void bio_integrity_free(struct bio *bio, struct bio_set *bs)
146{
147 struct bio_integrity_payload *bip = bio->bi_integrity;
148
149 BUG_ON(bip == NULL);
150
151
152 if (!bio_flagged(bio, BIO_CLONED) && !bio_flagged(bio, BIO_FS_INTEGRITY)
153 && bip->bip_buf != NULL)
154 kfree(bip->bip_buf);
155
156 if (use_bip_pool(bip->bip_slab))
157 mempool_free(bip, bs->bio_integrity_pool);
158 else
159 kmem_cache_free(bip_slab[bip->bip_slab].slab, bip);
160
161 bio->bi_integrity = NULL;
162}
163EXPORT_SYMBOL(bio_integrity_free);
164
165
166
167
168
169
170
171
172
173
174int bio_integrity_add_page(struct bio *bio, struct page *page,
175 unsigned int len, unsigned int offset)
176{
177 struct bio_integrity_payload *bip = bio->bi_integrity;
178 struct bio_vec *iv;
179
180 if (bip->bip_vcnt >= bvec_nr_vecs(bip->bip_slab)) {
181 printk(KERN_ERR "%s: bip_vec full\n", __func__);
182 return 0;
183 }
184
185 iv = bip_vec_idx(bip, bip->bip_vcnt);
186 BUG_ON(iv == NULL);
187
188 iv->bv_page = page;
189 iv->bv_len = len;
190 iv->bv_offset = offset;
191 bip->bip_vcnt++;
192
193 return len;
194}
195EXPORT_SYMBOL(bio_integrity_add_page);
196
197static int bdev_integrity_enabled(struct block_device *bdev, int rw)
198{
199 struct blk_integrity *bi = bdev_get_integrity(bdev);
200
201 if (bi == NULL)
202 return 0;
203
204 if (rw == READ && bi->verify_fn != NULL &&
205 (bi->flags & INTEGRITY_FLAG_READ))
206 return 1;
207
208 if (rw == WRITE && bi->generate_fn != NULL &&
209 (bi->flags & INTEGRITY_FLAG_WRITE))
210 return 1;
211
212 return 0;
213}
214
215
216
217
218
219
220
221
222
223
224int bio_integrity_enabled(struct bio *bio)
225{
226
227 if (bio_integrity(bio))
228 return 0;
229
230 return bdev_integrity_enabled(bio->bi_bdev, bio_data_dir(bio));
231}
232EXPORT_SYMBOL(bio_integrity_enabled);
233
234
235
236
237
238
239
240
241
242
243
244static inline unsigned int bio_integrity_hw_sectors(struct blk_integrity *bi,
245 unsigned int sectors)
246{
247
248 if (bi->sector_size == 4096)
249 return sectors >>= 3;
250
251 return sectors;
252}
253
254
255
256
257
258
259
260
261
262unsigned int bio_integrity_tag_size(struct bio *bio)
263{
264 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
265
266 BUG_ON(bio->bi_size == 0);
267
268 return bi->tag_size * (bio->bi_size / bi->sector_size);
269}
270EXPORT_SYMBOL(bio_integrity_tag_size);
271
272int bio_integrity_tag(struct bio *bio, void *tag_buf, unsigned int len, int set)
273{
274 struct bio_integrity_payload *bip = bio->bi_integrity;
275 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
276 unsigned int nr_sectors;
277
278 BUG_ON(bip->bip_buf == NULL);
279
280 if (bi->tag_size == 0)
281 return -1;
282
283 nr_sectors = bio_integrity_hw_sectors(bi,
284 DIV_ROUND_UP(len, bi->tag_size));
285
286 if (nr_sectors * bi->tuple_size > bip->bip_size) {
287 printk(KERN_ERR "%s: tag too big for bio: %u > %u\n",
288 __func__, nr_sectors * bi->tuple_size, bip->bip_size);
289 return -1;
290 }
291
292 if (set)
293 bi->set_tag_fn(bip->bip_buf, tag_buf, nr_sectors);
294 else
295 bi->get_tag_fn(bip->bip_buf, tag_buf, nr_sectors);
296
297 return 0;
298}
299
300
301
302
303
304
305
306
307
308
309
310
311int bio_integrity_set_tag(struct bio *bio, void *tag_buf, unsigned int len)
312{
313 BUG_ON(bio_data_dir(bio) != WRITE);
314
315 return bio_integrity_tag(bio, tag_buf, len, 1);
316}
317EXPORT_SYMBOL(bio_integrity_set_tag);
318
319
320
321
322
323
324
325
326
327
328
329int bio_integrity_get_tag(struct bio *bio, void *tag_buf, unsigned int len)
330{
331 BUG_ON(bio_data_dir(bio) != READ);
332
333 return bio_integrity_tag(bio, tag_buf, len, 0);
334}
335EXPORT_SYMBOL(bio_integrity_get_tag);
336
337
338
339
340
341
342
343
344
345
346static void bio_integrity_generate(struct bio *bio)
347{
348 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
349 struct blk_integrity_exchg bix;
350 struct bio_vec *bv;
351 sector_t sector = bio->bi_sector;
352 unsigned int i, sectors, total;
353 void *prot_buf = bio->bi_integrity->bip_buf;
354
355 total = 0;
356 bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
357 bix.sector_size = bi->sector_size;
358
359 bio_for_each_segment(bv, bio, i) {
360 void *kaddr = kmap_atomic(bv->bv_page);
361 bix.data_buf = kaddr + bv->bv_offset;
362 bix.data_size = bv->bv_len;
363 bix.prot_buf = prot_buf;
364 bix.sector = sector;
365
366 bi->generate_fn(&bix);
367
368 sectors = bv->bv_len / bi->sector_size;
369 sector += sectors;
370 prot_buf += sectors * bi->tuple_size;
371 total += sectors * bi->tuple_size;
372 BUG_ON(total > bio->bi_integrity->bip_size);
373
374 kunmap_atomic(kaddr);
375 }
376}
377
378static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi)
379{
380 if (bi)
381 return bi->tuple_size;
382
383 return 0;
384}
385
386
387
388
389
390
391
392
393
394
395
396
397int bio_integrity_prep(struct bio *bio)
398{
399 struct bio_integrity_payload *bip;
400 struct blk_integrity *bi;
401 struct request_queue *q;
402 void *buf;
403 unsigned long start, end;
404 unsigned int len, nr_pages;
405 unsigned int bytes, offset, i;
406 unsigned int sectors;
407
408 bi = bdev_get_integrity(bio->bi_bdev);
409 q = bdev_get_queue(bio->bi_bdev);
410 BUG_ON(bi == NULL);
411 BUG_ON(bio_integrity(bio));
412
413 sectors = bio_integrity_hw_sectors(bi, bio_sectors(bio));
414
415
416 len = sectors * blk_integrity_tuple_size(bi);
417 buf = kmalloc(len, GFP_NOIO | q->bounce_gfp);
418 if (unlikely(buf == NULL)) {
419 printk(KERN_ERR "could not allocate integrity buffer\n");
420 return -ENOMEM;
421 }
422
423 end = (((unsigned long) buf) + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
424 start = ((unsigned long) buf) >> PAGE_SHIFT;
425 nr_pages = end - start;
426
427
428 bip = bio_integrity_alloc(bio, GFP_NOIO, nr_pages);
429 if (unlikely(bip == NULL)) {
430 printk(KERN_ERR "could not allocate data integrity bioset\n");
431 kfree(buf);
432 return -EIO;
433 }
434
435 bip->bip_buf = buf;
436 bip->bip_size = len;
437 bip->bip_sector = bio->bi_sector;
438
439
440 offset = offset_in_page(buf);
441 for (i = 0 ; i < nr_pages ; i++) {
442 int ret;
443 bytes = PAGE_SIZE - offset;
444
445 if (len <= 0)
446 break;
447
448 if (bytes > len)
449 bytes = len;
450
451 ret = bio_integrity_add_page(bio, virt_to_page(buf),
452 bytes, offset);
453
454 if (ret == 0)
455 return 0;
456
457 if (ret < bytes)
458 break;
459
460 buf += bytes;
461 len -= bytes;
462 offset = 0;
463 }
464
465
466 if (bio_data_dir(bio) == READ) {
467 bip->bip_end_io = bio->bi_end_io;
468 bio->bi_end_io = bio_integrity_endio;
469 }
470
471
472 if (bio_data_dir(bio) == WRITE)
473 bio_integrity_generate(bio);
474
475 return 0;
476}
477EXPORT_SYMBOL(bio_integrity_prep);
478
479
480
481
482
483
484
485
486
487static int bio_integrity_verify(struct bio *bio)
488{
489 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
490 struct blk_integrity_exchg bix;
491 struct bio_vec *bv;
492 sector_t sector = bio->bi_integrity->bip_sector;
493 unsigned int i, sectors, total, ret;
494 void *prot_buf = bio->bi_integrity->bip_buf;
495
496 ret = total = 0;
497 bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
498 bix.sector_size = bi->sector_size;
499
500 bio_for_each_segment(bv, bio, i) {
501 void *kaddr = kmap_atomic(bv->bv_page);
502 bix.data_buf = kaddr + bv->bv_offset;
503 bix.data_size = bv->bv_len;
504 bix.prot_buf = prot_buf;
505 bix.sector = sector;
506
507 ret = bi->verify_fn(&bix);
508
509 if (ret) {
510 kunmap_atomic(kaddr);
511 return ret;
512 }
513
514 sectors = bv->bv_len / bi->sector_size;
515 sector += sectors;
516 prot_buf += sectors * bi->tuple_size;
517 total += sectors * bi->tuple_size;
518 BUG_ON(total > bio->bi_integrity->bip_size);
519
520 kunmap_atomic(kaddr);
521 }
522
523 return ret;
524}
525
526
527
528
529
530
531
532
533
534static void bio_integrity_verify_fn(struct work_struct *work)
535{
536 struct bio_integrity_payload *bip =
537 container_of(work, struct bio_integrity_payload, bip_work);
538 struct bio *bio = bip->bip_bio;
539 int error;
540
541 error = bio_integrity_verify(bio);
542
543
544 bio->bi_end_io = bip->bip_end_io;
545 bio_endio(bio, error);
546}
547
548
549
550
551
552
553
554
555
556
557
558
559
560void bio_integrity_endio(struct bio *bio, int error)
561{
562 struct bio_integrity_payload *bip = bio->bi_integrity;
563
564 BUG_ON(bip->bip_bio != bio);
565
566
567
568
569
570 if (error) {
571 bio->bi_end_io = bip->bip_end_io;
572 bio_endio(bio, error);
573
574 return;
575 }
576
577 INIT_WORK(&bip->bip_work, bio_integrity_verify_fn);
578 queue_work(kintegrityd_wq, &bip->bip_work);
579}
580EXPORT_SYMBOL(bio_integrity_endio);
581
582
583
584
585
586
587void bio_integrity_mark_head(struct bio_integrity_payload *bip,
588 unsigned int skip)
589{
590 struct bio_vec *iv;
591 unsigned int i;
592
593 bip_for_each_vec(iv, bip, i) {
594 if (skip == 0) {
595 bip->bip_idx = i;
596 return;
597 } else if (skip >= iv->bv_len) {
598 skip -= iv->bv_len;
599 } else {
600 iv->bv_offset += skip;
601 iv->bv_len -= skip;
602 bip->bip_idx = i;
603 return;
604 }
605 }
606}
607
608
609
610
611
612
613void bio_integrity_mark_tail(struct bio_integrity_payload *bip,
614 unsigned int len)
615{
616 struct bio_vec *iv;
617 unsigned int i;
618
619 bip_for_each_vec(iv, bip, i) {
620 if (len == 0) {
621 bip->bip_vcnt = i;
622 return;
623 } else if (len >= iv->bv_len) {
624 len -= iv->bv_len;
625 } else {
626 iv->bv_len = len;
627 len = 0;
628 }
629 }
630}
631
632
633
634
635
636
637
638
639
640
641void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
642{
643 struct bio_integrity_payload *bip = bio->bi_integrity;
644 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
645 unsigned int nr_sectors;
646
647 BUG_ON(bip == NULL);
648 BUG_ON(bi == NULL);
649
650 nr_sectors = bio_integrity_hw_sectors(bi, bytes_done >> 9);
651 bio_integrity_mark_head(bip, nr_sectors * bi->tuple_size);
652}
653EXPORT_SYMBOL(bio_integrity_advance);
654
655
656
657
658
659
660
661
662
663
664
665
666void bio_integrity_trim(struct bio *bio, unsigned int offset,
667 unsigned int sectors)
668{
669 struct bio_integrity_payload *bip = bio->bi_integrity;
670 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
671 unsigned int nr_sectors;
672
673 BUG_ON(bip == NULL);
674 BUG_ON(bi == NULL);
675 BUG_ON(!bio_flagged(bio, BIO_CLONED));
676
677 nr_sectors = bio_integrity_hw_sectors(bi, sectors);
678 bip->bip_sector = bip->bip_sector + offset;
679 bio_integrity_mark_head(bip, offset * bi->tuple_size);
680 bio_integrity_mark_tail(bip, sectors * bi->tuple_size);
681}
682EXPORT_SYMBOL(bio_integrity_trim);
683
684
685
686
687
688
689
690
691
692void bio_integrity_split(struct bio *bio, struct bio_pair *bp, int sectors)
693{
694 struct blk_integrity *bi;
695 struct bio_integrity_payload *bip = bio->bi_integrity;
696 unsigned int nr_sectors;
697
698 if (bio_integrity(bio) == 0)
699 return;
700
701 bi = bdev_get_integrity(bio->bi_bdev);
702 BUG_ON(bi == NULL);
703 BUG_ON(bip->bip_vcnt != 1);
704
705 nr_sectors = bio_integrity_hw_sectors(bi, sectors);
706
707 bp->bio1.bi_integrity = &bp->bip1;
708 bp->bio2.bi_integrity = &bp->bip2;
709
710 bp->iv1 = bip->bip_vec[0];
711 bp->iv2 = bip->bip_vec[0];
712
713 bp->bip1.bip_vec[0] = bp->iv1;
714 bp->bip2.bip_vec[0] = bp->iv2;
715
716 bp->iv1.bv_len = sectors * bi->tuple_size;
717 bp->iv2.bv_offset += sectors * bi->tuple_size;
718 bp->iv2.bv_len -= sectors * bi->tuple_size;
719
720 bp->bip1.bip_sector = bio->bi_integrity->bip_sector;
721 bp->bip2.bip_sector = bio->bi_integrity->bip_sector + nr_sectors;
722
723 bp->bip1.bip_vcnt = bp->bip2.bip_vcnt = 1;
724 bp->bip1.bip_idx = bp->bip2.bip_idx = 0;
725}
726EXPORT_SYMBOL(bio_integrity_split);
727
728
729
730
731
732
733
734
735
736
737int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
738 gfp_t gfp_mask, struct bio_set *bs)
739{
740 struct bio_integrity_payload *bip_src = bio_src->bi_integrity;
741 struct bio_integrity_payload *bip;
742
743 BUG_ON(bip_src == NULL);
744
745 bip = bio_integrity_alloc_bioset(bio, gfp_mask, bip_src->bip_vcnt, bs);
746
747 if (bip == NULL)
748 return -EIO;
749
750 memcpy(bip->bip_vec, bip_src->bip_vec,
751 bip_src->bip_vcnt * sizeof(struct bio_vec));
752
753 bip->bip_sector = bip_src->bip_sector;
754 bip->bip_vcnt = bip_src->bip_vcnt;
755 bip->bip_idx = bip_src->bip_idx;
756
757 return 0;
758}
759EXPORT_SYMBOL(bio_integrity_clone);
760
761int bioset_integrity_create(struct bio_set *bs, int pool_size)
762{
763 unsigned int max_slab = vecs_to_idx(BIO_MAX_PAGES);
764
765 if (bs->bio_integrity_pool)
766 return 0;
767
768 bs->bio_integrity_pool =
769 mempool_create_slab_pool(pool_size, bip_slab[max_slab].slab);
770
771 if (!bs->bio_integrity_pool)
772 return -1;
773
774 return 0;
775}
776EXPORT_SYMBOL(bioset_integrity_create);
777
778void bioset_integrity_free(struct bio_set *bs)
779{
780 if (bs->bio_integrity_pool)
781 mempool_destroy(bs->bio_integrity_pool);
782}
783EXPORT_SYMBOL(bioset_integrity_free);
784
785void __init bio_integrity_init(void)
786{
787 unsigned int i;
788
789
790
791
792
793 kintegrityd_wq = alloc_workqueue("kintegrityd", WQ_MEM_RECLAIM |
794 WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);
795 if (!kintegrityd_wq)
796 panic("Failed to create kintegrityd\n");
797
798 for (i = 0 ; i < BIOVEC_NR_POOLS ; i++) {
799 unsigned int size;
800
801 size = sizeof(struct bio_integrity_payload)
802 + bip_slab[i].nr_vecs * sizeof(struct bio_vec);
803
804 bip_slab[i].slab =
805 kmem_cache_create(bip_slab[i].name, size, 0,
806 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
807 }
808}
809