1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/blkdev.h>
24#include <linux/mempool.h>
25#include <linux/bio.h>
26#include <linux/workqueue.h>
27
28struct integrity_slab {
29 struct kmem_cache *slab;
30 unsigned short nr_vecs;
31 char name[8];
32};
33
34#define IS(x) { .nr_vecs = x, .name = "bip-"__stringify(x) }
35struct integrity_slab bip_slab[BIOVEC_NR_POOLS] __read_mostly = {
36 IS(1), IS(4), IS(16), IS(64), IS(128), IS(BIO_MAX_PAGES),
37};
38#undef IS
39
40static struct workqueue_struct *kintegrityd_wq;
41
42static inline unsigned int vecs_to_idx(unsigned int nr)
43{
44 switch (nr) {
45 case 1:
46 return 0;
47 case 2 ... 4:
48 return 1;
49 case 5 ... 16:
50 return 2;
51 case 17 ... 64:
52 return 3;
53 case 65 ... 128:
54 return 4;
55 case 129 ... BIO_MAX_PAGES:
56 return 5;
57 default:
58 BUG();
59 }
60}
61
62static inline int use_bip_pool(unsigned int idx)
63{
64 if (idx == BIOVEC_NR_POOLS)
65 return 1;
66
67 return 0;
68}
69
70
71
72
73
74
75
76
77
78
79
80
81struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio,
82 gfp_t gfp_mask,
83 unsigned int nr_vecs,
84 struct bio_set *bs)
85{
86 struct bio_integrity_payload *bip;
87 unsigned int idx = vecs_to_idx(nr_vecs);
88
89 BUG_ON(bio == NULL);
90 bip = NULL;
91
92
93 if (!use_bip_pool(idx))
94 bip = kmem_cache_alloc(bip_slab[idx].slab, gfp_mask);
95
96
97 if (bip == NULL) {
98 bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask);
99
100 if (unlikely(bip == NULL)) {
101 printk(KERN_ERR "%s: could not alloc bip\n", __func__);
102 return NULL;
103 }
104 }
105
106 memset(bip, 0, sizeof(*bip));
107
108 bip->bip_slab = idx;
109 bip->bip_bio = bio;
110 bio->bi_integrity = bip;
111
112 return bip;
113}
114EXPORT_SYMBOL(bio_integrity_alloc_bioset);
115
116
117
118
119
120
121
122
123
124
125
126struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
127 gfp_t gfp_mask,
128 unsigned int nr_vecs)
129{
130 return bio_integrity_alloc_bioset(bio, gfp_mask, nr_vecs, fs_bio_set);
131}
132EXPORT_SYMBOL(bio_integrity_alloc);
133
134
135
136
137
138
139
140
141
142void bio_integrity_free(struct bio *bio, struct bio_set *bs)
143{
144 struct bio_integrity_payload *bip = bio->bi_integrity;
145
146 BUG_ON(bip == NULL);
147
148
149 if (!bio_flagged(bio, BIO_CLONED) && !bio_flagged(bio, BIO_FS_INTEGRITY)
150 && bip->bip_buf != NULL)
151 kfree(bip->bip_buf);
152
153 if (use_bip_pool(bip->bip_slab))
154 mempool_free(bip, bs->bio_integrity_pool);
155 else
156 kmem_cache_free(bip_slab[bip->bip_slab].slab, bip);
157
158 bio->bi_integrity = NULL;
159}
160EXPORT_SYMBOL(bio_integrity_free);
161
162
163
164
165
166
167
168
169
170
171int bio_integrity_add_page(struct bio *bio, struct page *page,
172 unsigned int len, unsigned int offset)
173{
174 struct bio_integrity_payload *bip = bio->bi_integrity;
175 struct bio_vec *iv;
176
177 if (bip->bip_vcnt >= bvec_nr_vecs(bip->bip_slab)) {
178 printk(KERN_ERR "%s: bip_vec full\n", __func__);
179 return 0;
180 }
181
182 iv = bip_vec_idx(bip, bip->bip_vcnt);
183 BUG_ON(iv == NULL);
184
185 iv->bv_page = page;
186 iv->bv_len = len;
187 iv->bv_offset = offset;
188 bip->bip_vcnt++;
189
190 return len;
191}
192EXPORT_SYMBOL(bio_integrity_add_page);
193
194static int bdev_integrity_enabled(struct block_device *bdev, int rw)
195{
196 struct blk_integrity *bi = bdev_get_integrity(bdev);
197
198 if (bi == NULL)
199 return 0;
200
201 if (rw == READ && bi->verify_fn != NULL &&
202 (bi->flags & INTEGRITY_FLAG_READ))
203 return 1;
204
205 if (rw == WRITE && bi->generate_fn != NULL &&
206 (bi->flags & INTEGRITY_FLAG_WRITE))
207 return 1;
208
209 return 0;
210}
211
212
213
214
215
216
217
218
219
220
221int bio_integrity_enabled(struct bio *bio)
222{
223
224 if (bio_integrity(bio))
225 return 0;
226
227 return bdev_integrity_enabled(bio->bi_bdev, bio_data_dir(bio));
228}
229EXPORT_SYMBOL(bio_integrity_enabled);
230
231
232
233
234
235
236
237
238
239
240
241static inline unsigned int bio_integrity_hw_sectors(struct blk_integrity *bi,
242 unsigned int sectors)
243{
244
245 if (bi->sector_size == 4096)
246 return sectors >>= 3;
247
248 return sectors;
249}
250
251
252
253
254
255
256
257
258
259unsigned int bio_integrity_tag_size(struct bio *bio)
260{
261 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
262
263 BUG_ON(bio->bi_size == 0);
264
265 return bi->tag_size * (bio->bi_size / bi->sector_size);
266}
267EXPORT_SYMBOL(bio_integrity_tag_size);
268
269int bio_integrity_tag(struct bio *bio, void *tag_buf, unsigned int len, int set)
270{
271 struct bio_integrity_payload *bip = bio->bi_integrity;
272 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
273 unsigned int nr_sectors;
274
275 BUG_ON(bip->bip_buf == NULL);
276
277 if (bi->tag_size == 0)
278 return -1;
279
280 nr_sectors = bio_integrity_hw_sectors(bi,
281 DIV_ROUND_UP(len, bi->tag_size));
282
283 if (nr_sectors * bi->tuple_size > bip->bip_size) {
284 printk(KERN_ERR "%s: tag too big for bio: %u > %u\n",
285 __func__, nr_sectors * bi->tuple_size, bip->bip_size);
286 return -1;
287 }
288
289 if (set)
290 bi->set_tag_fn(bip->bip_buf, tag_buf, nr_sectors);
291 else
292 bi->get_tag_fn(bip->bip_buf, tag_buf, nr_sectors);
293
294 return 0;
295}
296
297
298
299
300
301
302
303
304
305
306
307
308int bio_integrity_set_tag(struct bio *bio, void *tag_buf, unsigned int len)
309{
310 BUG_ON(bio_data_dir(bio) != WRITE);
311
312 return bio_integrity_tag(bio, tag_buf, len, 1);
313}
314EXPORT_SYMBOL(bio_integrity_set_tag);
315
316
317
318
319
320
321
322
323
324
325
326int bio_integrity_get_tag(struct bio *bio, void *tag_buf, unsigned int len)
327{
328 BUG_ON(bio_data_dir(bio) != READ);
329
330 return bio_integrity_tag(bio, tag_buf, len, 0);
331}
332EXPORT_SYMBOL(bio_integrity_get_tag);
333
334
335
336
337
338
339
340
341
342
343static void bio_integrity_generate(struct bio *bio)
344{
345 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
346 struct blk_integrity_exchg bix;
347 struct bio_vec *bv;
348 sector_t sector = bio->bi_sector;
349 unsigned int i, sectors, total;
350 void *prot_buf = bio->bi_integrity->bip_buf;
351
352 total = 0;
353 bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
354 bix.sector_size = bi->sector_size;
355
356 bio_for_each_segment(bv, bio, i) {
357 void *kaddr = kmap_atomic(bv->bv_page, KM_USER0);
358 bix.data_buf = kaddr + bv->bv_offset;
359 bix.data_size = bv->bv_len;
360 bix.prot_buf = prot_buf;
361 bix.sector = sector;
362
363 bi->generate_fn(&bix);
364
365 sectors = bv->bv_len / bi->sector_size;
366 sector += sectors;
367 prot_buf += sectors * bi->tuple_size;
368 total += sectors * bi->tuple_size;
369 BUG_ON(total > bio->bi_integrity->bip_size);
370
371 kunmap_atomic(kaddr, KM_USER0);
372 }
373}
374
375static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi)
376{
377 if (bi)
378 return bi->tuple_size;
379
380 return 0;
381}
382
383
384
385
386
387
388
389
390
391
392
393
394int bio_integrity_prep(struct bio *bio)
395{
396 struct bio_integrity_payload *bip;
397 struct blk_integrity *bi;
398 struct request_queue *q;
399 void *buf;
400 unsigned long start, end;
401 unsigned int len, nr_pages;
402 unsigned int bytes, offset, i;
403 unsigned int sectors;
404
405 bi = bdev_get_integrity(bio->bi_bdev);
406 q = bdev_get_queue(bio->bi_bdev);
407 BUG_ON(bi == NULL);
408 BUG_ON(bio_integrity(bio));
409
410 sectors = bio_integrity_hw_sectors(bi, bio_sectors(bio));
411
412
413 len = sectors * blk_integrity_tuple_size(bi);
414 buf = kmalloc(len, GFP_NOIO | __GFP_NOFAIL | q->bounce_gfp);
415 if (unlikely(buf == NULL)) {
416 printk(KERN_ERR "could not allocate integrity buffer\n");
417 return -EIO;
418 }
419
420 end = (((unsigned long) buf) + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
421 start = ((unsigned long) buf) >> PAGE_SHIFT;
422 nr_pages = end - start;
423
424
425 bip = bio_integrity_alloc(bio, GFP_NOIO, nr_pages);
426 if (unlikely(bip == NULL)) {
427 printk(KERN_ERR "could not allocate data integrity bioset\n");
428 kfree(buf);
429 return -EIO;
430 }
431
432 bip->bip_buf = buf;
433 bip->bip_size = len;
434 bip->bip_sector = bio->bi_sector;
435
436
437 offset = offset_in_page(buf);
438 for (i = 0 ; i < nr_pages ; i++) {
439 int ret;
440 bytes = PAGE_SIZE - offset;
441
442 if (len <= 0)
443 break;
444
445 if (bytes > len)
446 bytes = len;
447
448 ret = bio_integrity_add_page(bio, virt_to_page(buf),
449 bytes, offset);
450
451 if (ret == 0)
452 return 0;
453
454 if (ret < bytes)
455 break;
456
457 buf += bytes;
458 len -= bytes;
459 offset = 0;
460 }
461
462
463 if (bio_data_dir(bio) == READ) {
464 bip->bip_end_io = bio->bi_end_io;
465 bio->bi_end_io = bio_integrity_endio;
466 }
467
468
469 if (bio_data_dir(bio) == WRITE)
470 bio_integrity_generate(bio);
471
472 return 0;
473}
474EXPORT_SYMBOL(bio_integrity_prep);
475
476
477
478
479
480
481
482
483
484static int bio_integrity_verify(struct bio *bio)
485{
486 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
487 struct blk_integrity_exchg bix;
488 struct bio_vec *bv;
489 sector_t sector = bio->bi_integrity->bip_sector;
490 unsigned int i, sectors, total, ret;
491 void *prot_buf = bio->bi_integrity->bip_buf;
492
493 ret = total = 0;
494 bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
495 bix.sector_size = bi->sector_size;
496
497 bio_for_each_segment(bv, bio, i) {
498 void *kaddr = kmap_atomic(bv->bv_page, KM_USER0);
499 bix.data_buf = kaddr + bv->bv_offset;
500 bix.data_size = bv->bv_len;
501 bix.prot_buf = prot_buf;
502 bix.sector = sector;
503
504 ret = bi->verify_fn(&bix);
505
506 if (ret) {
507 kunmap_atomic(kaddr, KM_USER0);
508 return ret;
509 }
510
511 sectors = bv->bv_len / bi->sector_size;
512 sector += sectors;
513 prot_buf += sectors * bi->tuple_size;
514 total += sectors * bi->tuple_size;
515 BUG_ON(total > bio->bi_integrity->bip_size);
516
517 kunmap_atomic(kaddr, KM_USER0);
518 }
519
520 return ret;
521}
522
523
524
525
526
527
528
529
530
531static void bio_integrity_verify_fn(struct work_struct *work)
532{
533 struct bio_integrity_payload *bip =
534 container_of(work, struct bio_integrity_payload, bip_work);
535 struct bio *bio = bip->bip_bio;
536 int error;
537
538 error = bio_integrity_verify(bio);
539
540
541 bio->bi_end_io = bip->bip_end_io;
542 bio_endio(bio, error);
543}
544
545
546
547
548
549
550
551
552
553
554
555
556
557void bio_integrity_endio(struct bio *bio, int error)
558{
559 struct bio_integrity_payload *bip = bio->bi_integrity;
560
561 BUG_ON(bip->bip_bio != bio);
562
563
564
565
566
567 if (error) {
568 bio->bi_end_io = bip->bip_end_io;
569 bio_endio(bio, error);
570
571 return;
572 }
573
574 INIT_WORK(&bip->bip_work, bio_integrity_verify_fn);
575 queue_work(kintegrityd_wq, &bip->bip_work);
576}
577EXPORT_SYMBOL(bio_integrity_endio);
578
579
580
581
582
583
584void bio_integrity_mark_head(struct bio_integrity_payload *bip,
585 unsigned int skip)
586{
587 struct bio_vec *iv;
588 unsigned int i;
589
590 bip_for_each_vec(iv, bip, i) {
591 if (skip == 0) {
592 bip->bip_idx = i;
593 return;
594 } else if (skip >= iv->bv_len) {
595 skip -= iv->bv_len;
596 } else {
597 iv->bv_offset += skip;
598 iv->bv_len -= skip;
599 bip->bip_idx = i;
600 return;
601 }
602 }
603}
604
605
606
607
608
609
610void bio_integrity_mark_tail(struct bio_integrity_payload *bip,
611 unsigned int len)
612{
613 struct bio_vec *iv;
614 unsigned int i;
615
616 bip_for_each_vec(iv, bip, i) {
617 if (len == 0) {
618 bip->bip_vcnt = i;
619 return;
620 } else if (len >= iv->bv_len) {
621 len -= iv->bv_len;
622 } else {
623 iv->bv_len = len;
624 len = 0;
625 }
626 }
627}
628
629
630
631
632
633
634
635
636
637
638void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
639{
640 struct bio_integrity_payload *bip = bio->bi_integrity;
641 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
642 unsigned int nr_sectors;
643
644 BUG_ON(bip == NULL);
645 BUG_ON(bi == NULL);
646
647 nr_sectors = bio_integrity_hw_sectors(bi, bytes_done >> 9);
648 bio_integrity_mark_head(bip, nr_sectors * bi->tuple_size);
649}
650EXPORT_SYMBOL(bio_integrity_advance);
651
652
653
654
655
656
657
658
659
660
661
662
663void bio_integrity_trim(struct bio *bio, unsigned int offset,
664 unsigned int sectors)
665{
666 struct bio_integrity_payload *bip = bio->bi_integrity;
667 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
668 unsigned int nr_sectors;
669
670 BUG_ON(bip == NULL);
671 BUG_ON(bi == NULL);
672 BUG_ON(!bio_flagged(bio, BIO_CLONED));
673
674 nr_sectors = bio_integrity_hw_sectors(bi, sectors);
675 bip->bip_sector = bip->bip_sector + offset;
676 bio_integrity_mark_head(bip, offset * bi->tuple_size);
677 bio_integrity_mark_tail(bip, sectors * bi->tuple_size);
678}
679EXPORT_SYMBOL(bio_integrity_trim);
680
681
682
683
684
685
686
687
688
689void bio_integrity_split(struct bio *bio, struct bio_pair *bp, int sectors)
690{
691 struct blk_integrity *bi;
692 struct bio_integrity_payload *bip = bio->bi_integrity;
693 unsigned int nr_sectors;
694
695 if (bio_integrity(bio) == 0)
696 return;
697
698 bi = bdev_get_integrity(bio->bi_bdev);
699 BUG_ON(bi == NULL);
700 BUG_ON(bip->bip_vcnt != 1);
701
702 nr_sectors = bio_integrity_hw_sectors(bi, sectors);
703
704 bp->bio1.bi_integrity = &bp->bip1;
705 bp->bio2.bi_integrity = &bp->bip2;
706
707 bp->iv1 = bip->bip_vec[0];
708 bp->iv2 = bip->bip_vec[0];
709
710 bp->bip1.bip_vec[0] = bp->iv1;
711 bp->bip2.bip_vec[0] = bp->iv2;
712
713 bp->iv1.bv_len = sectors * bi->tuple_size;
714 bp->iv2.bv_offset += sectors * bi->tuple_size;
715 bp->iv2.bv_len -= sectors * bi->tuple_size;
716
717 bp->bip1.bip_sector = bio->bi_integrity->bip_sector;
718 bp->bip2.bip_sector = bio->bi_integrity->bip_sector + nr_sectors;
719
720 bp->bip1.bip_vcnt = bp->bip2.bip_vcnt = 1;
721 bp->bip1.bip_idx = bp->bip2.bip_idx = 0;
722}
723EXPORT_SYMBOL(bio_integrity_split);
724
725
726
727
728
729
730
731
732
733
734int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
735 gfp_t gfp_mask, struct bio_set *bs)
736{
737 struct bio_integrity_payload *bip_src = bio_src->bi_integrity;
738 struct bio_integrity_payload *bip;
739
740 BUG_ON(bip_src == NULL);
741
742 bip = bio_integrity_alloc_bioset(bio, gfp_mask, bip_src->bip_vcnt, bs);
743
744 if (bip == NULL)
745 return -EIO;
746
747 memcpy(bip->bip_vec, bip_src->bip_vec,
748 bip_src->bip_vcnt * sizeof(struct bio_vec));
749
750 bip->bip_sector = bip_src->bip_sector;
751 bip->bip_vcnt = bip_src->bip_vcnt;
752 bip->bip_idx = bip_src->bip_idx;
753
754 return 0;
755}
756EXPORT_SYMBOL(bio_integrity_clone);
757
758int bioset_integrity_create(struct bio_set *bs, int pool_size)
759{
760 unsigned int max_slab = vecs_to_idx(BIO_MAX_PAGES);
761
762 bs->bio_integrity_pool =
763 mempool_create_slab_pool(pool_size, bip_slab[max_slab].slab);
764
765 if (!bs->bio_integrity_pool)
766 return -1;
767
768 return 0;
769}
770EXPORT_SYMBOL(bioset_integrity_create);
771
772void bioset_integrity_free(struct bio_set *bs)
773{
774 if (bs->bio_integrity_pool)
775 mempool_destroy(bs->bio_integrity_pool);
776}
777EXPORT_SYMBOL(bioset_integrity_free);
778
779void __init bio_integrity_init(void)
780{
781 unsigned int i;
782
783 kintegrityd_wq = create_workqueue("kintegrityd");
784 if (!kintegrityd_wq)
785 panic("Failed to create kintegrityd\n");
786
787 for (i = 0 ; i < BIOVEC_NR_POOLS ; i++) {
788 unsigned int size;
789
790 size = sizeof(struct bio_integrity_payload)
791 + bip_slab[i].nr_vecs * sizeof(struct bio_vec);
792
793 bip_slab[i].slab =
794 kmem_cache_create(bip_slab[i].name, size, 0,
795 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
796 }
797}
798