1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/blkdev.h>
24#include <linux/mempool.h>
25#include <linux/export.h>
26#include <linux/bio.h>
27#include <linux/workqueue.h>
28#include <linux/slab.h>
29
30#define BIP_INLINE_VECS 4
31
32static struct kmem_cache *bip_slab;
33static struct workqueue_struct *kintegrityd_wq;
34
35
36
37
38
39
40
41
42
43
44
45struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
46 gfp_t gfp_mask,
47 unsigned int nr_vecs)
48{
49 struct bio_integrity_payload *bip;
50 struct bio_set *bs = bio->bi_pool;
51 unsigned long idx = BIO_POOL_NONE;
52 unsigned inline_vecs;
53
54 if (!bs) {
55 bip = kmalloc(sizeof(struct bio_integrity_payload) +
56 sizeof(struct bio_vec) * nr_vecs, gfp_mask);
57 inline_vecs = nr_vecs;
58 } else {
59 bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask);
60 inline_vecs = BIP_INLINE_VECS;
61 }
62
63 if (unlikely(!bip))
64 return NULL;
65
66 memset(bip, 0, sizeof(*bip));
67
68 if (nr_vecs > inline_vecs) {
69 bip->bip_vec = bvec_alloc(gfp_mask, nr_vecs, &idx,
70 bs->bvec_integrity_pool);
71 if (!bip->bip_vec)
72 goto err;
73 } else {
74 bip->bip_vec = bip->bip_inline_vecs;
75 }
76
77 bip->bip_slab = idx;
78 bip->bip_bio = bio;
79 bio->bi_integrity = bip;
80
81 return bip;
82err:
83 mempool_free(bip, bs->bio_integrity_pool);
84 return NULL;
85}
86EXPORT_SYMBOL(bio_integrity_alloc);
87
88
89
90
91
92
93
94
95void bio_integrity_free(struct bio *bio)
96{
97 struct bio_integrity_payload *bip = bio->bi_integrity;
98 struct bio_set *bs = bio->bi_pool;
99
100 if (bip->bip_owns_buf)
101 kfree(bip->bip_buf);
102
103 if (bs) {
104 if (bip->bip_slab != BIO_POOL_NONE)
105 bvec_free(bs->bvec_integrity_pool, bip->bip_vec,
106 bip->bip_slab);
107
108 mempool_free(bip, bs->bio_integrity_pool);
109 } else {
110 kfree(bip);
111 }
112
113 bio->bi_integrity = NULL;
114}
115EXPORT_SYMBOL(bio_integrity_free);
116
117static inline unsigned int bip_integrity_vecs(struct bio_integrity_payload *bip)
118{
119 if (bip->bip_slab == BIO_POOL_NONE)
120 return BIP_INLINE_VECS;
121
122 return bvec_nr_vecs(bip->bip_slab);
123}
124
125
126
127
128
129
130
131
132
133
134int bio_integrity_add_page(struct bio *bio, struct page *page,
135 unsigned int len, unsigned int offset)
136{
137 struct bio_integrity_payload *bip = bio->bi_integrity;
138 struct bio_vec *iv;
139
140 if (bip->bip_vcnt >= bip_integrity_vecs(bip)) {
141 printk(KERN_ERR "%s: bip_vec full\n", __func__);
142 return 0;
143 }
144
145 iv = bip->bip_vec + bip->bip_vcnt;
146
147 iv->bv_page = page;
148 iv->bv_len = len;
149 iv->bv_offset = offset;
150 bip->bip_vcnt++;
151
152 return len;
153}
154EXPORT_SYMBOL(bio_integrity_add_page);
155
156static int bdev_integrity_enabled(struct block_device *bdev, int rw)
157{
158 struct blk_integrity *bi = bdev_get_integrity(bdev);
159
160 if (bi == NULL)
161 return 0;
162
163 if (rw == READ && bi->verify_fn != NULL &&
164 (bi->flags & INTEGRITY_FLAG_READ))
165 return 1;
166
167 if (rw == WRITE && bi->generate_fn != NULL &&
168 (bi->flags & INTEGRITY_FLAG_WRITE))
169 return 1;
170
171 return 0;
172}
173
174
175
176
177
178
179
180
181
182
183int bio_integrity_enabled(struct bio *bio)
184{
185 if (!bio_is_rw(bio))
186 return 0;
187
188
189 if (bio_integrity(bio))
190 return 0;
191
192 return bdev_integrity_enabled(bio->bi_bdev, bio_data_dir(bio));
193}
194EXPORT_SYMBOL(bio_integrity_enabled);
195
196
197
198
199
200
201
202
203
204
205
206static inline unsigned int bio_integrity_hw_sectors(struct blk_integrity *bi,
207 unsigned int sectors)
208{
209
210 if (bi->sector_size == 4096)
211 return sectors >>= 3;
212
213 return sectors;
214}
215
216static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
217 unsigned int sectors)
218{
219 return bio_integrity_hw_sectors(bi, sectors) * bi->tuple_size;
220}
221
222
223
224
225
226
227
228
229
230unsigned int bio_integrity_tag_size(struct bio *bio)
231{
232 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
233
234 BUG_ON(bio->bi_iter.bi_size == 0);
235
236 return bi->tag_size * (bio->bi_iter.bi_size / bi->sector_size);
237}
238EXPORT_SYMBOL(bio_integrity_tag_size);
239
240static int bio_integrity_tag(struct bio *bio, void *tag_buf, unsigned int len,
241 int set)
242{
243 struct bio_integrity_payload *bip = bio->bi_integrity;
244 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
245 unsigned int nr_sectors;
246
247 BUG_ON(bip->bip_buf == NULL);
248
249 if (bi->tag_size == 0)
250 return -1;
251
252 nr_sectors = bio_integrity_hw_sectors(bi,
253 DIV_ROUND_UP(len, bi->tag_size));
254
255 if (nr_sectors * bi->tuple_size > bip->bip_iter.bi_size) {
256 printk(KERN_ERR "%s: tag too big for bio: %u > %u\n", __func__,
257 nr_sectors * bi->tuple_size, bip->bip_iter.bi_size);
258 return -1;
259 }
260
261 if (set)
262 bi->set_tag_fn(bip->bip_buf, tag_buf, nr_sectors);
263 else
264 bi->get_tag_fn(bip->bip_buf, tag_buf, nr_sectors);
265
266 return 0;
267}
268
269
270
271
272
273
274
275
276
277
278
279
280int bio_integrity_set_tag(struct bio *bio, void *tag_buf, unsigned int len)
281{
282 BUG_ON(bio_data_dir(bio) != WRITE);
283
284 return bio_integrity_tag(bio, tag_buf, len, 1);
285}
286EXPORT_SYMBOL(bio_integrity_set_tag);
287
288
289
290
291
292
293
294
295
296
297
298int bio_integrity_get_tag(struct bio *bio, void *tag_buf, unsigned int len)
299{
300 BUG_ON(bio_data_dir(bio) != READ);
301
302 return bio_integrity_tag(bio, tag_buf, len, 0);
303}
304EXPORT_SYMBOL(bio_integrity_get_tag);
305
306
307
308
309
310
311static int bio_integrity_generate_verify(struct bio *bio, int operate)
312{
313 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
314 struct blk_integrity_exchg bix;
315 struct bio_vec *bv;
316 sector_t sector;
317 unsigned int sectors, ret = 0, i;
318 void *prot_buf = bio->bi_integrity->bip_buf;
319
320 if (operate)
321 sector = bio->bi_iter.bi_sector;
322 else
323 sector = bio->bi_integrity->bip_iter.bi_sector;
324
325 bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
326 bix.sector_size = bi->sector_size;
327
328 bio_for_each_segment_all(bv, bio, i) {
329 void *kaddr = kmap_atomic(bv->bv_page);
330 bix.data_buf = kaddr + bv->bv_offset;
331 bix.data_size = bv->bv_len;
332 bix.prot_buf = prot_buf;
333 bix.sector = sector;
334
335 if (operate)
336 bi->generate_fn(&bix);
337 else {
338 ret = bi->verify_fn(&bix);
339 if (ret) {
340 kunmap_atomic(kaddr);
341 return ret;
342 }
343 }
344
345 sectors = bv->bv_len / bi->sector_size;
346 sector += sectors;
347 prot_buf += sectors * bi->tuple_size;
348
349 kunmap_atomic(kaddr);
350 }
351 return ret;
352}
353
354
355
356
357
358
359
360
361
362
363static void bio_integrity_generate(struct bio *bio)
364{
365 bio_integrity_generate_verify(bio, 1);
366}
367
368static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi)
369{
370 if (bi)
371 return bi->tuple_size;
372
373 return 0;
374}
375
376
377
378
379
380
381
382
383
384
385
386
387int bio_integrity_prep(struct bio *bio)
388{
389 struct bio_integrity_payload *bip;
390 struct blk_integrity *bi;
391 struct request_queue *q;
392 void *buf;
393 unsigned long start, end;
394 unsigned int len, nr_pages;
395 unsigned int bytes, offset, i;
396 unsigned int sectors;
397
398 bi = bdev_get_integrity(bio->bi_bdev);
399 q = bdev_get_queue(bio->bi_bdev);
400 BUG_ON(bi == NULL);
401 BUG_ON(bio_integrity(bio));
402
403 sectors = bio_integrity_hw_sectors(bi, bio_sectors(bio));
404
405
406 len = sectors * blk_integrity_tuple_size(bi);
407 buf = kmalloc(len, GFP_NOIO | q->bounce_gfp);
408 if (unlikely(buf == NULL)) {
409 printk(KERN_ERR "could not allocate integrity buffer\n");
410 return -ENOMEM;
411 }
412
413 end = (((unsigned long) buf) + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
414 start = ((unsigned long) buf) >> PAGE_SHIFT;
415 nr_pages = end - start;
416
417
418 bip = bio_integrity_alloc(bio, GFP_NOIO, nr_pages);
419 if (unlikely(bip == NULL)) {
420 printk(KERN_ERR "could not allocate data integrity bioset\n");
421 kfree(buf);
422 return -EIO;
423 }
424
425 bip->bip_owns_buf = 1;
426 bip->bip_buf = buf;
427 bip->bip_iter.bi_size = len;
428 bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
429
430
431 offset = offset_in_page(buf);
432 for (i = 0 ; i < nr_pages ; i++) {
433 int ret;
434 bytes = PAGE_SIZE - offset;
435
436 if (len <= 0)
437 break;
438
439 if (bytes > len)
440 bytes = len;
441
442 ret = bio_integrity_add_page(bio, virt_to_page(buf),
443 bytes, offset);
444
445 if (ret == 0)
446 return 0;
447
448 if (ret < bytes)
449 break;
450
451 buf += bytes;
452 len -= bytes;
453 offset = 0;
454 }
455
456
457 if (bio_data_dir(bio) == READ) {
458 bip->bip_end_io = bio->bi_end_io;
459 bio->bi_end_io = bio_integrity_endio;
460 }
461
462
463 if (bio_data_dir(bio) == WRITE)
464 bio_integrity_generate(bio);
465
466 return 0;
467}
468EXPORT_SYMBOL(bio_integrity_prep);
469
470
471
472
473
474
475
476
477
478static int bio_integrity_verify(struct bio *bio)
479{
480 return bio_integrity_generate_verify(bio, 0);
481}
482
483
484
485
486
487
488
489
490
491static void bio_integrity_verify_fn(struct work_struct *work)
492{
493 struct bio_integrity_payload *bip =
494 container_of(work, struct bio_integrity_payload, bip_work);
495 struct bio *bio = bip->bip_bio;
496 int error;
497
498 error = bio_integrity_verify(bio);
499
500
501 bio->bi_end_io = bip->bip_end_io;
502 bio_endio_nodec(bio, error);
503}
504
505
506
507
508
509
510
511
512
513
514
515
516
517void bio_integrity_endio(struct bio *bio, int error)
518{
519 struct bio_integrity_payload *bip = bio->bi_integrity;
520
521 BUG_ON(bip->bip_bio != bio);
522
523
524
525
526
527 if (error) {
528 bio->bi_end_io = bip->bip_end_io;
529 bio_endio(bio, error);
530
531 return;
532 }
533
534 INIT_WORK(&bip->bip_work, bio_integrity_verify_fn);
535 queue_work(kintegrityd_wq, &bip->bip_work);
536}
537EXPORT_SYMBOL(bio_integrity_endio);
538
539
540
541
542
543
544
545
546
547
548void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
549{
550 struct bio_integrity_payload *bip = bio->bi_integrity;
551 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
552 unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
553
554 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes);
555}
556EXPORT_SYMBOL(bio_integrity_advance);
557
558
559
560
561
562
563
564
565
566
567
568
569void bio_integrity_trim(struct bio *bio, unsigned int offset,
570 unsigned int sectors)
571{
572 struct bio_integrity_payload *bip = bio->bi_integrity;
573 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
574
575 bio_integrity_advance(bio, offset << 9);
576 bip->bip_iter.bi_size = bio_integrity_bytes(bi, sectors);
577}
578EXPORT_SYMBOL(bio_integrity_trim);
579
580
581
582
583
584
585
586
587
588int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
589 gfp_t gfp_mask)
590{
591 struct bio_integrity_payload *bip_src = bio_src->bi_integrity;
592 struct bio_integrity_payload *bip;
593
594 BUG_ON(bip_src == NULL);
595
596 bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt);
597
598 if (bip == NULL)
599 return -EIO;
600
601 memcpy(bip->bip_vec, bip_src->bip_vec,
602 bip_src->bip_vcnt * sizeof(struct bio_vec));
603
604 bip->bip_vcnt = bip_src->bip_vcnt;
605 bip->bip_iter = bip_src->bip_iter;
606
607 return 0;
608}
609EXPORT_SYMBOL(bio_integrity_clone);
610
611int bioset_integrity_create(struct bio_set *bs, int pool_size)
612{
613 if (bs->bio_integrity_pool)
614 return 0;
615
616 bs->bio_integrity_pool = mempool_create_slab_pool(pool_size, bip_slab);
617 if (!bs->bio_integrity_pool)
618 return -1;
619
620 bs->bvec_integrity_pool = biovec_create_pool(pool_size);
621 if (!bs->bvec_integrity_pool) {
622 mempool_destroy(bs->bio_integrity_pool);
623 return -1;
624 }
625
626 return 0;
627}
628EXPORT_SYMBOL(bioset_integrity_create);
629
630void bioset_integrity_free(struct bio_set *bs)
631{
632 if (bs->bio_integrity_pool)
633 mempool_destroy(bs->bio_integrity_pool);
634
635 if (bs->bvec_integrity_pool)
636 mempool_destroy(bs->bvec_integrity_pool);
637}
638EXPORT_SYMBOL(bioset_integrity_free);
639
640void __init bio_integrity_init(void)
641{
642
643
644
645
646 kintegrityd_wq = alloc_workqueue("kintegrityd", WQ_MEM_RECLAIM |
647 WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);
648 if (!kintegrityd_wq)
649 panic("Failed to create kintegrityd\n");
650
651 bip_slab = kmem_cache_create("bio_integrity_payload",
652 sizeof(struct bio_integrity_payload) +
653 sizeof(struct bio_vec) * BIP_INLINE_VECS,
654 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
655 if (!bip_slab)
656 panic("Failed to create slab\n");
657}
658