1
2
3
4
5
6
7
8
9
10#include <linux/completion.h>
11#include <linux/err.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/key.h>
16#include <linux/bio.h>
17#include <linux/blkdev.h>
18#include <linux/mempool.h>
19#include <linux/slab.h>
20#include <linux/crypto.h>
21#include <linux/workqueue.h>
22#include <linux/kthread.h>
23#include <linux/backing-dev.h>
24#include <linux/atomic.h>
25#include <linux/scatterlist.h>
26#include <linux/rbtree.h>
27#include <linux/ctype.h>
28#include <asm/page.h>
29#include <asm/unaligned.h>
30#include <crypto/hash.h>
31#include <crypto/md5.h>
32#include <crypto/algapi.h>
33#include <crypto/skcipher.h>
34#include <crypto/aead.h>
35#include <crypto/authenc.h>
36#include <linux/rtnetlink.h>
37#include <linux/key-type.h>
38#include <keys/user-type.h>
39#include <keys/encrypted-type.h>
40#include <keys/trusted-type.h>
41
42#include <linux/device-mapper.h>
43
44#define DM_MSG_PREFIX "crypt"
45
46
47
48
49struct convert_context {
50 struct completion restart;
51 struct bio *bio_in;
52 struct bio *bio_out;
53 struct bvec_iter iter_in;
54 struct bvec_iter iter_out;
55 u64 cc_sector;
56 atomic_t cc_pending;
57 union {
58 struct skcipher_request *req;
59 struct aead_request *req_aead;
60 } r;
61
62};
63
64
65
66
67struct dm_crypt_io {
68 struct crypt_config *cc;
69 struct bio *base_bio;
70 u8 *integrity_metadata;
71 bool integrity_metadata_from_pool;
72 struct work_struct work;
73 struct tasklet_struct tasklet;
74
75 struct convert_context ctx;
76
77 atomic_t io_pending;
78 blk_status_t error;
79 sector_t sector;
80
81 struct rb_node rb_node;
82} CRYPTO_MINALIGN_ATTR;
83
84struct dm_crypt_request {
85 struct convert_context *ctx;
86 struct scatterlist sg_in[4];
87 struct scatterlist sg_out[4];
88 u64 iv_sector;
89};
90
91struct crypt_config;
92
93struct crypt_iv_operations {
94 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
95 const char *opts);
96 void (*dtr)(struct crypt_config *cc);
97 int (*init)(struct crypt_config *cc);
98 int (*wipe)(struct crypt_config *cc);
99 int (*generator)(struct crypt_config *cc, u8 *iv,
100 struct dm_crypt_request *dmreq);
101 int (*post)(struct crypt_config *cc, u8 *iv,
102 struct dm_crypt_request *dmreq);
103};
104
105struct iv_benbi_private {
106 int shift;
107};
108
109#define LMK_SEED_SIZE 64
110struct iv_lmk_private {
111 struct crypto_shash *hash_tfm;
112 u8 *seed;
113};
114
115#define TCW_WHITENING_SIZE 16
116struct iv_tcw_private {
117 struct crypto_shash *crc32_tfm;
118 u8 *iv_seed;
119 u8 *whitening;
120};
121
122#define ELEPHANT_MAX_KEY_SIZE 32
123struct iv_elephant_private {
124 struct crypto_skcipher *tfm;
125};
126
127
128
129
130
131enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
132 DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
133 DM_CRYPT_NO_READ_WORKQUEUE, DM_CRYPT_NO_WRITE_WORKQUEUE,
134 DM_CRYPT_WRITE_INLINE };
135
136enum cipher_flags {
137 CRYPT_MODE_INTEGRITY_AEAD,
138 CRYPT_IV_LARGE_SECTORS,
139 CRYPT_ENCRYPT_PREPROCESS,
140};
141
142
143
144
145struct crypt_config {
146 struct dm_dev *dev;
147 sector_t start;
148
149 struct percpu_counter n_allocated_pages;
150
151 struct workqueue_struct *io_queue;
152 struct workqueue_struct *crypt_queue;
153
154 spinlock_t write_thread_lock;
155 struct task_struct *write_thread;
156 struct rb_root write_tree;
157
158 char *cipher_string;
159 char *cipher_auth;
160 char *key_string;
161
162 const struct crypt_iv_operations *iv_gen_ops;
163 union {
164 struct iv_benbi_private benbi;
165 struct iv_lmk_private lmk;
166 struct iv_tcw_private tcw;
167 struct iv_elephant_private elephant;
168 } iv_gen_private;
169 u64 iv_offset;
170 unsigned int iv_size;
171 unsigned short int sector_size;
172 unsigned char sector_shift;
173
174 union {
175 struct crypto_skcipher **tfms;
176 struct crypto_aead **tfms_aead;
177 } cipher_tfm;
178 unsigned tfms_count;
179 unsigned long cipher_flags;
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194 unsigned int dmreq_start;
195
196 unsigned int per_bio_data_size;
197
198 unsigned long flags;
199 unsigned int key_size;
200 unsigned int key_parts;
201 unsigned int key_extra_size;
202 unsigned int key_mac_size;
203
204 unsigned int integrity_tag_size;
205 unsigned int integrity_iv_size;
206 unsigned int on_disk_tag_size;
207
208
209
210
211
212 unsigned tag_pool_max_sectors;
213 mempool_t tag_pool;
214 mempool_t req_pool;
215 mempool_t page_pool;
216
217 struct bio_set bs;
218 struct mutex bio_alloc_lock;
219
220 u8 *authenc_key;
221 u8 key[];
222};
223
224#define MIN_IOS 64
225#define MAX_TAG_SIZE 480
226#define POOL_ENTRY_SIZE 512
227
228static DEFINE_SPINLOCK(dm_crypt_clients_lock);
229static unsigned dm_crypt_clients_n = 0;
230static volatile unsigned long dm_crypt_pages_per_client;
231#define DM_CRYPT_MEMORY_PERCENT 2
232#define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_VECS * 16)
233
234static void clone_init(struct dm_crypt_io *, struct bio *);
235static void kcryptd_queue_crypt(struct dm_crypt_io *io);
236static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
237 struct scatterlist *sg);
238
239static bool crypt_integrity_aead(struct crypt_config *cc);
240
241
242
243
244static struct crypto_skcipher *any_tfm(struct crypt_config *cc)
245{
246 return cc->cipher_tfm.tfms[0];
247}
248
249static struct crypto_aead *any_tfm_aead(struct crypt_config *cc)
250{
251 return cc->cipher_tfm.tfms_aead[0];
252}
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
311 struct dm_crypt_request *dmreq)
312{
313 memset(iv, 0, cc->iv_size);
314 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
315
316 return 0;
317}
318
319static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
320 struct dm_crypt_request *dmreq)
321{
322 memset(iv, 0, cc->iv_size);
323 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
324
325 return 0;
326}
327
328static int crypt_iv_plain64be_gen(struct crypt_config *cc, u8 *iv,
329 struct dm_crypt_request *dmreq)
330{
331 memset(iv, 0, cc->iv_size);
332
333 *(__be64 *)&iv[cc->iv_size - sizeof(u64)] = cpu_to_be64(dmreq->iv_sector);
334
335 return 0;
336}
337
338static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
339 struct dm_crypt_request *dmreq)
340{
341
342
343
344
345 memset(iv, 0, cc->iv_size);
346 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
347
348 return 0;
349}
350
351static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
352 const char *opts)
353{
354 unsigned bs;
355 int log;
356
357 if (crypt_integrity_aead(cc))
358 bs = crypto_aead_blocksize(any_tfm_aead(cc));
359 else
360 bs = crypto_skcipher_blocksize(any_tfm(cc));
361 log = ilog2(bs);
362
363
364
365
366 if (1 << log != bs) {
367 ti->error = "cypher blocksize is not a power of 2";
368 return -EINVAL;
369 }
370
371 if (log > 9) {
372 ti->error = "cypher blocksize is > 512";
373 return -EINVAL;
374 }
375
376 cc->iv_gen_private.benbi.shift = 9 - log;
377
378 return 0;
379}
380
381static void crypt_iv_benbi_dtr(struct crypt_config *cc)
382{
383}
384
385static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
386 struct dm_crypt_request *dmreq)
387{
388 __be64 val;
389
390 memset(iv, 0, cc->iv_size - sizeof(u64));
391
392 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
393 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
394
395 return 0;
396}
397
398static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
399 struct dm_crypt_request *dmreq)
400{
401 memset(iv, 0, cc->iv_size);
402
403 return 0;
404}
405
406static void crypt_iv_lmk_dtr(struct crypt_config *cc)
407{
408 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
409
410 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
411 crypto_free_shash(lmk->hash_tfm);
412 lmk->hash_tfm = NULL;
413
414 kfree_sensitive(lmk->seed);
415 lmk->seed = NULL;
416}
417
418static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
419 const char *opts)
420{
421 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
422
423 if (cc->sector_size != (1 << SECTOR_SHIFT)) {
424 ti->error = "Unsupported sector size for LMK";
425 return -EINVAL;
426 }
427
428 lmk->hash_tfm = crypto_alloc_shash("md5", 0,
429 CRYPTO_ALG_ALLOCATES_MEMORY);
430 if (IS_ERR(lmk->hash_tfm)) {
431 ti->error = "Error initializing LMK hash";
432 return PTR_ERR(lmk->hash_tfm);
433 }
434
435
436 if (cc->key_parts == cc->tfms_count) {
437 lmk->seed = NULL;
438 return 0;
439 }
440
441 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
442 if (!lmk->seed) {
443 crypt_iv_lmk_dtr(cc);
444 ti->error = "Error kmallocing seed storage in LMK";
445 return -ENOMEM;
446 }
447
448 return 0;
449}
450
451static int crypt_iv_lmk_init(struct crypt_config *cc)
452{
453 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
454 int subkey_size = cc->key_size / cc->key_parts;
455
456
457 if (lmk->seed)
458 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
459 crypto_shash_digestsize(lmk->hash_tfm));
460
461 return 0;
462}
463
464static int crypt_iv_lmk_wipe(struct crypt_config *cc)
465{
466 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
467
468 if (lmk->seed)
469 memset(lmk->seed, 0, LMK_SEED_SIZE);
470
471 return 0;
472}
473
474static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
475 struct dm_crypt_request *dmreq,
476 u8 *data)
477{
478 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
479 SHASH_DESC_ON_STACK(desc, lmk->hash_tfm);
480 struct md5_state md5state;
481 __le32 buf[4];
482 int i, r;
483
484 desc->tfm = lmk->hash_tfm;
485
486 r = crypto_shash_init(desc);
487 if (r)
488 return r;
489
490 if (lmk->seed) {
491 r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE);
492 if (r)
493 return r;
494 }
495
496
497 r = crypto_shash_update(desc, data + 16, 16 * 31);
498 if (r)
499 return r;
500
501
502 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
503 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
504 buf[2] = cpu_to_le32(4024);
505 buf[3] = 0;
506 r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf));
507 if (r)
508 return r;
509
510
511 r = crypto_shash_export(desc, &md5state);
512 if (r)
513 return r;
514
515 for (i = 0; i < MD5_HASH_WORDS; i++)
516 __cpu_to_le32s(&md5state.hash[i]);
517 memcpy(iv, &md5state.hash, cc->iv_size);
518
519 return 0;
520}
521
522static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
523 struct dm_crypt_request *dmreq)
524{
525 struct scatterlist *sg;
526 u8 *src;
527 int r = 0;
528
529 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
530 sg = crypt_get_sg_data(cc, dmreq->sg_in);
531 src = kmap_atomic(sg_page(sg));
532 r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset);
533 kunmap_atomic(src);
534 } else
535 memset(iv, 0, cc->iv_size);
536
537 return r;
538}
539
540static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
541 struct dm_crypt_request *dmreq)
542{
543 struct scatterlist *sg;
544 u8 *dst;
545 int r;
546
547 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
548 return 0;
549
550 sg = crypt_get_sg_data(cc, dmreq->sg_out);
551 dst = kmap_atomic(sg_page(sg));
552 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset);
553
554
555 if (!r)
556 crypto_xor(dst + sg->offset, iv, cc->iv_size);
557
558 kunmap_atomic(dst);
559 return r;
560}
561
562static void crypt_iv_tcw_dtr(struct crypt_config *cc)
563{
564 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
565
566 kfree_sensitive(tcw->iv_seed);
567 tcw->iv_seed = NULL;
568 kfree_sensitive(tcw->whitening);
569 tcw->whitening = NULL;
570
571 if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
572 crypto_free_shash(tcw->crc32_tfm);
573 tcw->crc32_tfm = NULL;
574}
575
576static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
577 const char *opts)
578{
579 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
580
581 if (cc->sector_size != (1 << SECTOR_SHIFT)) {
582 ti->error = "Unsupported sector size for TCW";
583 return -EINVAL;
584 }
585
586 if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
587 ti->error = "Wrong key size for TCW";
588 return -EINVAL;
589 }
590
591 tcw->crc32_tfm = crypto_alloc_shash("crc32", 0,
592 CRYPTO_ALG_ALLOCATES_MEMORY);
593 if (IS_ERR(tcw->crc32_tfm)) {
594 ti->error = "Error initializing CRC32 in TCW";
595 return PTR_ERR(tcw->crc32_tfm);
596 }
597
598 tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
599 tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
600 if (!tcw->iv_seed || !tcw->whitening) {
601 crypt_iv_tcw_dtr(cc);
602 ti->error = "Error allocating seed storage in TCW";
603 return -ENOMEM;
604 }
605
606 return 0;
607}
608
609static int crypt_iv_tcw_init(struct crypt_config *cc)
610{
611 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
612 int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
613
614 memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
615 memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
616 TCW_WHITENING_SIZE);
617
618 return 0;
619}
620
621static int crypt_iv_tcw_wipe(struct crypt_config *cc)
622{
623 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
624
625 memset(tcw->iv_seed, 0, cc->iv_size);
626 memset(tcw->whitening, 0, TCW_WHITENING_SIZE);
627
628 return 0;
629}
630
631static int crypt_iv_tcw_whitening(struct crypt_config *cc,
632 struct dm_crypt_request *dmreq,
633 u8 *data)
634{
635 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
636 __le64 sector = cpu_to_le64(dmreq->iv_sector);
637 u8 buf[TCW_WHITENING_SIZE];
638 SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm);
639 int i, r;
640
641
642 crypto_xor_cpy(buf, tcw->whitening, (u8 *)§or, 8);
643 crypto_xor_cpy(&buf[8], tcw->whitening + 8, (u8 *)§or, 8);
644
645
646 desc->tfm = tcw->crc32_tfm;
647 for (i = 0; i < 4; i++) {
648 r = crypto_shash_init(desc);
649 if (r)
650 goto out;
651 r = crypto_shash_update(desc, &buf[i * 4], 4);
652 if (r)
653 goto out;
654 r = crypto_shash_final(desc, &buf[i * 4]);
655 if (r)
656 goto out;
657 }
658 crypto_xor(&buf[0], &buf[12], 4);
659 crypto_xor(&buf[4], &buf[8], 4);
660
661
662 for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
663 crypto_xor(data + i * 8, buf, 8);
664out:
665 memzero_explicit(buf, sizeof(buf));
666 return r;
667}
668
669static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
670 struct dm_crypt_request *dmreq)
671{
672 struct scatterlist *sg;
673 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
674 __le64 sector = cpu_to_le64(dmreq->iv_sector);
675 u8 *src;
676 int r = 0;
677
678
679 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
680 sg = crypt_get_sg_data(cc, dmreq->sg_in);
681 src = kmap_atomic(sg_page(sg));
682 r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset);
683 kunmap_atomic(src);
684 }
685
686
687 crypto_xor_cpy(iv, tcw->iv_seed, (u8 *)§or, 8);
688 if (cc->iv_size > 8)
689 crypto_xor_cpy(&iv[8], tcw->iv_seed + 8, (u8 *)§or,
690 cc->iv_size - 8);
691
692 return r;
693}
694
695static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
696 struct dm_crypt_request *dmreq)
697{
698 struct scatterlist *sg;
699 u8 *dst;
700 int r;
701
702 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
703 return 0;
704
705
706 sg = crypt_get_sg_data(cc, dmreq->sg_out);
707 dst = kmap_atomic(sg_page(sg));
708 r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset);
709 kunmap_atomic(dst);
710
711 return r;
712}
713
714static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv,
715 struct dm_crypt_request *dmreq)
716{
717
718 get_random_bytes(iv, cc->iv_size);
719 return 0;
720}
721
722static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti,
723 const char *opts)
724{
725 if (crypt_integrity_aead(cc)) {
726 ti->error = "AEAD transforms not supported for EBOIV";
727 return -EINVAL;
728 }
729
730 if (crypto_skcipher_blocksize(any_tfm(cc)) != cc->iv_size) {
731 ti->error = "Block size of EBOIV cipher does "
732 "not match IV size of block cipher";
733 return -EINVAL;
734 }
735
736 return 0;
737}
738
739static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
740 struct dm_crypt_request *dmreq)
741{
742 u8 buf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(__le64));
743 struct skcipher_request *req;
744 struct scatterlist src, dst;
745 DECLARE_CRYPTO_WAIT(wait);
746 int err;
747
748 req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO);
749 if (!req)
750 return -ENOMEM;
751
752 memset(buf, 0, cc->iv_size);
753 *(__le64 *)buf = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
754
755 sg_init_one(&src, page_address(ZERO_PAGE(0)), cc->iv_size);
756 sg_init_one(&dst, iv, cc->iv_size);
757 skcipher_request_set_crypt(req, &src, &dst, cc->iv_size, buf);
758 skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
759 err = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
760 skcipher_request_free(req);
761
762 return err;
763}
764
765static void crypt_iv_elephant_dtr(struct crypt_config *cc)
766{
767 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
768
769 crypto_free_skcipher(elephant->tfm);
770 elephant->tfm = NULL;
771}
772
773static int crypt_iv_elephant_ctr(struct crypt_config *cc, struct dm_target *ti,
774 const char *opts)
775{
776 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
777 int r;
778
779 elephant->tfm = crypto_alloc_skcipher("ecb(aes)", 0,
780 CRYPTO_ALG_ALLOCATES_MEMORY);
781 if (IS_ERR(elephant->tfm)) {
782 r = PTR_ERR(elephant->tfm);
783 elephant->tfm = NULL;
784 return r;
785 }
786
787 r = crypt_iv_eboiv_ctr(cc, ti, NULL);
788 if (r)
789 crypt_iv_elephant_dtr(cc);
790 return r;
791}
792
793static void diffuser_disk_to_cpu(u32 *d, size_t n)
794{
795#ifndef __LITTLE_ENDIAN
796 int i;
797
798 for (i = 0; i < n; i++)
799 d[i] = le32_to_cpu((__le32)d[i]);
800#endif
801}
802
803static void diffuser_cpu_to_disk(__le32 *d, size_t n)
804{
805#ifndef __LITTLE_ENDIAN
806 int i;
807
808 for (i = 0; i < n; i++)
809 d[i] = cpu_to_le32((u32)d[i]);
810#endif
811}
812
813static void diffuser_a_decrypt(u32 *d, size_t n)
814{
815 int i, i1, i2, i3;
816
817 for (i = 0; i < 5; i++) {
818 i1 = 0;
819 i2 = n - 2;
820 i3 = n - 5;
821
822 while (i1 < (n - 1)) {
823 d[i1] += d[i2] ^ (d[i3] << 9 | d[i3] >> 23);
824 i1++; i2++; i3++;
825
826 if (i3 >= n)
827 i3 -= n;
828
829 d[i1] += d[i2] ^ d[i3];
830 i1++; i2++; i3++;
831
832 if (i2 >= n)
833 i2 -= n;
834
835 d[i1] += d[i2] ^ (d[i3] << 13 | d[i3] >> 19);
836 i1++; i2++; i3++;
837
838 d[i1] += d[i2] ^ d[i3];
839 i1++; i2++; i3++;
840 }
841 }
842}
843
844static void diffuser_a_encrypt(u32 *d, size_t n)
845{
846 int i, i1, i2, i3;
847
848 for (i = 0; i < 5; i++) {
849 i1 = n - 1;
850 i2 = n - 2 - 1;
851 i3 = n - 5 - 1;
852
853 while (i1 > 0) {
854 d[i1] -= d[i2] ^ d[i3];
855 i1--; i2--; i3--;
856
857 d[i1] -= d[i2] ^ (d[i3] << 13 | d[i3] >> 19);
858 i1--; i2--; i3--;
859
860 if (i2 < 0)
861 i2 += n;
862
863 d[i1] -= d[i2] ^ d[i3];
864 i1--; i2--; i3--;
865
866 if (i3 < 0)
867 i3 += n;
868
869 d[i1] -= d[i2] ^ (d[i3] << 9 | d[i3] >> 23);
870 i1--; i2--; i3--;
871 }
872 }
873}
874
875static void diffuser_b_decrypt(u32 *d, size_t n)
876{
877 int i, i1, i2, i3;
878
879 for (i = 0; i < 3; i++) {
880 i1 = 0;
881 i2 = 2;
882 i3 = 5;
883
884 while (i1 < (n - 1)) {
885 d[i1] += d[i2] ^ d[i3];
886 i1++; i2++; i3++;
887
888 d[i1] += d[i2] ^ (d[i3] << 10 | d[i3] >> 22);
889 i1++; i2++; i3++;
890
891 if (i2 >= n)
892 i2 -= n;
893
894 d[i1] += d[i2] ^ d[i3];
895 i1++; i2++; i3++;
896
897 if (i3 >= n)
898 i3 -= n;
899
900 d[i1] += d[i2] ^ (d[i3] << 25 | d[i3] >> 7);
901 i1++; i2++; i3++;
902 }
903 }
904}
905
906static void diffuser_b_encrypt(u32 *d, size_t n)
907{
908 int i, i1, i2, i3;
909
910 for (i = 0; i < 3; i++) {
911 i1 = n - 1;
912 i2 = 2 - 1;
913 i3 = 5 - 1;
914
915 while (i1 > 0) {
916 d[i1] -= d[i2] ^ (d[i3] << 25 | d[i3] >> 7);
917 i1--; i2--; i3--;
918
919 if (i3 < 0)
920 i3 += n;
921
922 d[i1] -= d[i2] ^ d[i3];
923 i1--; i2--; i3--;
924
925 if (i2 < 0)
926 i2 += n;
927
928 d[i1] -= d[i2] ^ (d[i3] << 10 | d[i3] >> 22);
929 i1--; i2--; i3--;
930
931 d[i1] -= d[i2] ^ d[i3];
932 i1--; i2--; i3--;
933 }
934 }
935}
936
937static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *dmreq)
938{
939 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
940 u8 *es, *ks, *data, *data2, *data_offset;
941 struct skcipher_request *req;
942 struct scatterlist *sg, *sg2, src, dst;
943 DECLARE_CRYPTO_WAIT(wait);
944 int i, r;
945
946 req = skcipher_request_alloc(elephant->tfm, GFP_NOIO);
947 es = kzalloc(16, GFP_NOIO);
948 ks = kzalloc(32, GFP_NOIO);
949
950 if (!req || !es || !ks) {
951 r = -ENOMEM;
952 goto out;
953 }
954
955 *(__le64 *)es = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
956
957
958 sg_init_one(&src, es, 16);
959 sg_init_one(&dst, ks, 16);
960 skcipher_request_set_crypt(req, &src, &dst, 16, NULL);
961 skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
962 r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
963 if (r)
964 goto out;
965
966
967 es[15] = 0x80;
968 sg_init_one(&dst, &ks[16], 16);
969 r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
970 if (r)
971 goto out;
972
973 sg = crypt_get_sg_data(cc, dmreq->sg_out);
974 data = kmap_atomic(sg_page(sg));
975 data_offset = data + sg->offset;
976
977
978 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
979 sg2 = crypt_get_sg_data(cc, dmreq->sg_in);
980 data2 = kmap_atomic(sg_page(sg2));
981 memcpy(data_offset, data2 + sg2->offset, cc->sector_size);
982 kunmap_atomic(data2);
983 }
984
985 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
986 diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32));
987 diffuser_b_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
988 diffuser_a_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
989 diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32));
990 }
991
992 for (i = 0; i < (cc->sector_size / 32); i++)
993 crypto_xor(data_offset + i * 32, ks, 32);
994
995 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
996 diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32));
997 diffuser_a_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
998 diffuser_b_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
999 diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32));
1000 }
1001
1002 kunmap_atomic(data);
1003out:
1004 kfree_sensitive(ks);
1005 kfree_sensitive(es);
1006 skcipher_request_free(req);
1007 return r;
1008}
1009
1010static int crypt_iv_elephant_gen(struct crypt_config *cc, u8 *iv,
1011 struct dm_crypt_request *dmreq)
1012{
1013 int r;
1014
1015 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
1016 r = crypt_iv_elephant(cc, dmreq);
1017 if (r)
1018 return r;
1019 }
1020
1021 return crypt_iv_eboiv_gen(cc, iv, dmreq);
1022}
1023
1024static int crypt_iv_elephant_post(struct crypt_config *cc, u8 *iv,
1025 struct dm_crypt_request *dmreq)
1026{
1027 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
1028 return crypt_iv_elephant(cc, dmreq);
1029
1030 return 0;
1031}
1032
1033static int crypt_iv_elephant_init(struct crypt_config *cc)
1034{
1035 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
1036 int key_offset = cc->key_size - cc->key_extra_size;
1037
1038 return crypto_skcipher_setkey(elephant->tfm, &cc->key[key_offset], cc->key_extra_size);
1039}
1040
1041static int crypt_iv_elephant_wipe(struct crypt_config *cc)
1042{
1043 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
1044 u8 key[ELEPHANT_MAX_KEY_SIZE];
1045
1046 memset(key, 0, cc->key_extra_size);
1047 return crypto_skcipher_setkey(elephant->tfm, key, cc->key_extra_size);
1048}
1049
1050static const struct crypt_iv_operations crypt_iv_plain_ops = {
1051 .generator = crypt_iv_plain_gen
1052};
1053
1054static const struct crypt_iv_operations crypt_iv_plain64_ops = {
1055 .generator = crypt_iv_plain64_gen
1056};
1057
1058static const struct crypt_iv_operations crypt_iv_plain64be_ops = {
1059 .generator = crypt_iv_plain64be_gen
1060};
1061
1062static const struct crypt_iv_operations crypt_iv_essiv_ops = {
1063 .generator = crypt_iv_essiv_gen
1064};
1065
1066static const struct crypt_iv_operations crypt_iv_benbi_ops = {
1067 .ctr = crypt_iv_benbi_ctr,
1068 .dtr = crypt_iv_benbi_dtr,
1069 .generator = crypt_iv_benbi_gen
1070};
1071
1072static const struct crypt_iv_operations crypt_iv_null_ops = {
1073 .generator = crypt_iv_null_gen
1074};
1075
1076static const struct crypt_iv_operations crypt_iv_lmk_ops = {
1077 .ctr = crypt_iv_lmk_ctr,
1078 .dtr = crypt_iv_lmk_dtr,
1079 .init = crypt_iv_lmk_init,
1080 .wipe = crypt_iv_lmk_wipe,
1081 .generator = crypt_iv_lmk_gen,
1082 .post = crypt_iv_lmk_post
1083};
1084
1085static const struct crypt_iv_operations crypt_iv_tcw_ops = {
1086 .ctr = crypt_iv_tcw_ctr,
1087 .dtr = crypt_iv_tcw_dtr,
1088 .init = crypt_iv_tcw_init,
1089 .wipe = crypt_iv_tcw_wipe,
1090 .generator = crypt_iv_tcw_gen,
1091 .post = crypt_iv_tcw_post
1092};
1093
1094static const struct crypt_iv_operations crypt_iv_random_ops = {
1095 .generator = crypt_iv_random_gen
1096};
1097
1098static const struct crypt_iv_operations crypt_iv_eboiv_ops = {
1099 .ctr = crypt_iv_eboiv_ctr,
1100 .generator = crypt_iv_eboiv_gen
1101};
1102
1103static const struct crypt_iv_operations crypt_iv_elephant_ops = {
1104 .ctr = crypt_iv_elephant_ctr,
1105 .dtr = crypt_iv_elephant_dtr,
1106 .init = crypt_iv_elephant_init,
1107 .wipe = crypt_iv_elephant_wipe,
1108 .generator = crypt_iv_elephant_gen,
1109 .post = crypt_iv_elephant_post
1110};
1111
1112
1113
1114
1115static bool crypt_integrity_aead(struct crypt_config *cc)
1116{
1117 return test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
1118}
1119
1120static bool crypt_integrity_hmac(struct crypt_config *cc)
1121{
1122 return crypt_integrity_aead(cc) && cc->key_mac_size;
1123}
1124
1125
1126static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
1127 struct scatterlist *sg)
1128{
1129 if (unlikely(crypt_integrity_aead(cc)))
1130 return &sg[2];
1131
1132 return sg;
1133}
1134
1135static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
1136{
1137 struct bio_integrity_payload *bip;
1138 unsigned int tag_len;
1139 int ret;
1140
1141 if (!bio_sectors(bio) || !io->cc->on_disk_tag_size)
1142 return 0;
1143
1144 bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
1145 if (IS_ERR(bip))
1146 return PTR_ERR(bip);
1147
1148 tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift);
1149
1150 bip->bip_iter.bi_size = tag_len;
1151 bip->bip_iter.bi_sector = io->cc->start + io->sector;
1152
1153 ret = bio_integrity_add_page(bio, virt_to_page(io->integrity_metadata),
1154 tag_len, offset_in_page(io->integrity_metadata));
1155 if (unlikely(ret != tag_len))
1156 return -ENOMEM;
1157
1158 return 0;
1159}
1160
1161static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
1162{
1163#ifdef CONFIG_BLK_DEV_INTEGRITY
1164 struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk);
1165 struct mapped_device *md = dm_table_get_md(ti->table);
1166
1167
1168 if (!bi || strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) {
1169 ti->error = "Integrity profile not supported.";
1170 return -EINVAL;
1171 }
1172
1173 if (bi->tag_size != cc->on_disk_tag_size ||
1174 bi->tuple_size != cc->on_disk_tag_size) {
1175 ti->error = "Integrity profile tag size mismatch.";
1176 return -EINVAL;
1177 }
1178 if (1 << bi->interval_exp != cc->sector_size) {
1179 ti->error = "Integrity profile sector size mismatch.";
1180 return -EINVAL;
1181 }
1182
1183 if (crypt_integrity_aead(cc)) {
1184 cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size;
1185 DMDEBUG("%s: Integrity AEAD, tag size %u, IV size %u.", dm_device_name(md),
1186 cc->integrity_tag_size, cc->integrity_iv_size);
1187
1188 if (crypto_aead_setauthsize(any_tfm_aead(cc), cc->integrity_tag_size)) {
1189 ti->error = "Integrity AEAD auth tag size is not supported.";
1190 return -EINVAL;
1191 }
1192 } else if (cc->integrity_iv_size)
1193 DMDEBUG("%s: Additional per-sector space %u bytes for IV.", dm_device_name(md),
1194 cc->integrity_iv_size);
1195
1196 if ((cc->integrity_tag_size + cc->integrity_iv_size) != bi->tag_size) {
1197 ti->error = "Not enough space for integrity tag in the profile.";
1198 return -EINVAL;
1199 }
1200
1201 return 0;
1202#else
1203 ti->error = "Integrity profile not supported.";
1204 return -EINVAL;
1205#endif
1206}
1207
1208static void crypt_convert_init(struct crypt_config *cc,
1209 struct convert_context *ctx,
1210 struct bio *bio_out, struct bio *bio_in,
1211 sector_t sector)
1212{
1213 ctx->bio_in = bio_in;
1214 ctx->bio_out = bio_out;
1215 if (bio_in)
1216 ctx->iter_in = bio_in->bi_iter;
1217 if (bio_out)
1218 ctx->iter_out = bio_out->bi_iter;
1219 ctx->cc_sector = sector + cc->iv_offset;
1220 init_completion(&ctx->restart);
1221}
1222
1223static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
1224 void *req)
1225{
1226 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
1227}
1228
1229static void *req_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq)
1230{
1231 return (void *)((char *)dmreq - cc->dmreq_start);
1232}
1233
1234static u8 *iv_of_dmreq(struct crypt_config *cc,
1235 struct dm_crypt_request *dmreq)
1236{
1237 if (crypt_integrity_aead(cc))
1238 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
1239 crypto_aead_alignmask(any_tfm_aead(cc)) + 1);
1240 else
1241 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
1242 crypto_skcipher_alignmask(any_tfm(cc)) + 1);
1243}
1244
1245static u8 *org_iv_of_dmreq(struct crypt_config *cc,
1246 struct dm_crypt_request *dmreq)
1247{
1248 return iv_of_dmreq(cc, dmreq) + cc->iv_size;
1249}
1250
1251static __le64 *org_sector_of_dmreq(struct crypt_config *cc,
1252 struct dm_crypt_request *dmreq)
1253{
1254 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size;
1255 return (__le64 *) ptr;
1256}
1257
1258static unsigned int *org_tag_of_dmreq(struct crypt_config *cc,
1259 struct dm_crypt_request *dmreq)
1260{
1261 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size +
1262 cc->iv_size + sizeof(uint64_t);
1263 return (unsigned int*)ptr;
1264}
1265
1266static void *tag_from_dmreq(struct crypt_config *cc,
1267 struct dm_crypt_request *dmreq)
1268{
1269 struct convert_context *ctx = dmreq->ctx;
1270 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
1271
1272 return &io->integrity_metadata[*org_tag_of_dmreq(cc, dmreq) *
1273 cc->on_disk_tag_size];
1274}
1275
1276static void *iv_tag_from_dmreq(struct crypt_config *cc,
1277 struct dm_crypt_request *dmreq)
1278{
1279 return tag_from_dmreq(cc, dmreq) + cc->integrity_tag_size;
1280}
1281
1282static int crypt_convert_block_aead(struct crypt_config *cc,
1283 struct convert_context *ctx,
1284 struct aead_request *req,
1285 unsigned int tag_offset)
1286{
1287 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
1288 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
1289 struct dm_crypt_request *dmreq;
1290 u8 *iv, *org_iv, *tag_iv, *tag;
1291 __le64 *sector;
1292 int r = 0;
1293
1294 BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size);
1295
1296
1297 if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
1298 return -EIO;
1299
1300 dmreq = dmreq_of_req(cc, req);
1301 dmreq->iv_sector = ctx->cc_sector;
1302 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
1303 dmreq->iv_sector >>= cc->sector_shift;
1304 dmreq->ctx = ctx;
1305
1306 *org_tag_of_dmreq(cc, dmreq) = tag_offset;
1307
1308 sector = org_sector_of_dmreq(cc, dmreq);
1309 *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
1310
1311 iv = iv_of_dmreq(cc, dmreq);
1312 org_iv = org_iv_of_dmreq(cc, dmreq);
1313 tag = tag_from_dmreq(cc, dmreq);
1314 tag_iv = iv_tag_from_dmreq(cc, dmreq);
1315
1316
1317
1318
1319
1320
1321 sg_init_table(dmreq->sg_in, 4);
1322 sg_set_buf(&dmreq->sg_in[0], sector, sizeof(uint64_t));
1323 sg_set_buf(&dmreq->sg_in[1], org_iv, cc->iv_size);
1324 sg_set_page(&dmreq->sg_in[2], bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
1325 sg_set_buf(&dmreq->sg_in[3], tag, cc->integrity_tag_size);
1326
1327 sg_init_table(dmreq->sg_out, 4);
1328 sg_set_buf(&dmreq->sg_out[0], sector, sizeof(uint64_t));
1329 sg_set_buf(&dmreq->sg_out[1], org_iv, cc->iv_size);
1330 sg_set_page(&dmreq->sg_out[2], bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
1331 sg_set_buf(&dmreq->sg_out[3], tag, cc->integrity_tag_size);
1332
1333 if (cc->iv_gen_ops) {
1334
1335 if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) {
1336 memcpy(org_iv, tag_iv, cc->iv_size);
1337 } else {
1338 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
1339 if (r < 0)
1340 return r;
1341
1342 if (cc->integrity_iv_size)
1343 memcpy(tag_iv, org_iv, cc->iv_size);
1344 }
1345
1346 memcpy(iv, org_iv, cc->iv_size);
1347 }
1348
1349 aead_request_set_ad(req, sizeof(uint64_t) + cc->iv_size);
1350 if (bio_data_dir(ctx->bio_in) == WRITE) {
1351 aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
1352 cc->sector_size, iv);
1353 r = crypto_aead_encrypt(req);
1354 if (cc->integrity_tag_size + cc->integrity_iv_size != cc->on_disk_tag_size)
1355 memset(tag + cc->integrity_tag_size + cc->integrity_iv_size, 0,
1356 cc->on_disk_tag_size - (cc->integrity_tag_size + cc->integrity_iv_size));
1357 } else {
1358 aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
1359 cc->sector_size + cc->integrity_tag_size, iv);
1360 r = crypto_aead_decrypt(req);
1361 }
1362
1363 if (r == -EBADMSG) {
1364 char b[BDEVNAME_SIZE];
1365 DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b),
1366 (unsigned long long)le64_to_cpu(*sector));
1367 }
1368
1369 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
1370 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
1371
1372 bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
1373 bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
1374
1375 return r;
1376}
1377
1378static int crypt_convert_block_skcipher(struct crypt_config *cc,
1379 struct convert_context *ctx,
1380 struct skcipher_request *req,
1381 unsigned int tag_offset)
1382{
1383 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
1384 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
1385 struct scatterlist *sg_in, *sg_out;
1386 struct dm_crypt_request *dmreq;
1387 u8 *iv, *org_iv, *tag_iv;
1388 __le64 *sector;
1389 int r = 0;
1390
1391
1392 if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
1393 return -EIO;
1394
1395 dmreq = dmreq_of_req(cc, req);
1396 dmreq->iv_sector = ctx->cc_sector;
1397 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
1398 dmreq->iv_sector >>= cc->sector_shift;
1399 dmreq->ctx = ctx;
1400
1401 *org_tag_of_dmreq(cc, dmreq) = tag_offset;
1402
1403 iv = iv_of_dmreq(cc, dmreq);
1404 org_iv = org_iv_of_dmreq(cc, dmreq);
1405 tag_iv = iv_tag_from_dmreq(cc, dmreq);
1406
1407 sector = org_sector_of_dmreq(cc, dmreq);
1408 *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
1409
1410
1411 sg_in = &dmreq->sg_in[0];
1412 sg_out = &dmreq->sg_out[0];
1413
1414 sg_init_table(sg_in, 1);
1415 sg_set_page(sg_in, bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
1416
1417 sg_init_table(sg_out, 1);
1418 sg_set_page(sg_out, bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
1419
1420 if (cc->iv_gen_ops) {
1421
1422 if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) {
1423 memcpy(org_iv, tag_iv, cc->integrity_iv_size);
1424 } else {
1425 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
1426 if (r < 0)
1427 return r;
1428
1429 if (test_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags))
1430 sg_in = sg_out;
1431
1432 if (cc->integrity_iv_size)
1433 memcpy(tag_iv, org_iv, cc->integrity_iv_size);
1434 }
1435
1436 memcpy(iv, org_iv, cc->iv_size);
1437 }
1438
1439 skcipher_request_set_crypt(req, sg_in, sg_out, cc->sector_size, iv);
1440
1441 if (bio_data_dir(ctx->bio_in) == WRITE)
1442 r = crypto_skcipher_encrypt(req);
1443 else
1444 r = crypto_skcipher_decrypt(req);
1445
1446 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
1447 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
1448
1449 bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
1450 bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
1451
1452 return r;
1453}
1454
1455static void kcryptd_async_done(struct crypto_async_request *async_req,
1456 int error);
1457
1458static int crypt_alloc_req_skcipher(struct crypt_config *cc,
1459 struct convert_context *ctx)
1460{
1461 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
1462
1463 if (!ctx->r.req) {
1464 ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
1465 if (!ctx->r.req)
1466 return -ENOMEM;
1467 }
1468
1469 skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]);
1470
1471
1472
1473
1474
1475 skcipher_request_set_callback(ctx->r.req,
1476 CRYPTO_TFM_REQ_MAY_BACKLOG,
1477 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
1478
1479 return 0;
1480}
1481
1482static int crypt_alloc_req_aead(struct crypt_config *cc,
1483 struct convert_context *ctx)
1484{
1485 if (!ctx->r.req_aead) {
1486 ctx->r.req_aead = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
1487 if (!ctx->r.req_aead)
1488 return -ENOMEM;
1489 }
1490
1491 aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]);
1492
1493
1494
1495
1496
1497 aead_request_set_callback(ctx->r.req_aead,
1498 CRYPTO_TFM_REQ_MAY_BACKLOG,
1499 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
1500
1501 return 0;
1502}
1503
1504static int crypt_alloc_req(struct crypt_config *cc,
1505 struct convert_context *ctx)
1506{
1507 if (crypt_integrity_aead(cc))
1508 return crypt_alloc_req_aead(cc, ctx);
1509 else
1510 return crypt_alloc_req_skcipher(cc, ctx);
1511}
1512
1513static void crypt_free_req_skcipher(struct crypt_config *cc,
1514 struct skcipher_request *req, struct bio *base_bio)
1515{
1516 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
1517
1518 if ((struct skcipher_request *)(io + 1) != req)
1519 mempool_free(req, &cc->req_pool);
1520}
1521
1522static void crypt_free_req_aead(struct crypt_config *cc,
1523 struct aead_request *req, struct bio *base_bio)
1524{
1525 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
1526
1527 if ((struct aead_request *)(io + 1) != req)
1528 mempool_free(req, &cc->req_pool);
1529}
1530
1531static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_bio)
1532{
1533 if (crypt_integrity_aead(cc))
1534 crypt_free_req_aead(cc, req, base_bio);
1535 else
1536 crypt_free_req_skcipher(cc, req, base_bio);
1537}
1538
1539
1540
1541
1542static blk_status_t crypt_convert(struct crypt_config *cc,
1543 struct convert_context *ctx, bool atomic, bool reset_pending)
1544{
1545 unsigned int tag_offset = 0;
1546 unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
1547 int r;
1548
1549
1550
1551
1552
1553
1554 if (reset_pending)
1555 atomic_set(&ctx->cc_pending, 1);
1556
1557 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
1558
1559 r = crypt_alloc_req(cc, ctx);
1560 if (r) {
1561 complete(&ctx->restart);
1562 return BLK_STS_DEV_RESOURCE;
1563 }
1564
1565 atomic_inc(&ctx->cc_pending);
1566
1567 if (crypt_integrity_aead(cc))
1568 r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, tag_offset);
1569 else
1570 r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, tag_offset);
1571
1572 switch (r) {
1573
1574
1575
1576
1577 case -EBUSY:
1578 if (in_interrupt()) {
1579 if (try_wait_for_completion(&ctx->restart)) {
1580
1581
1582
1583
1584 } else {
1585
1586
1587
1588
1589 ctx->r.req = NULL;
1590 ctx->cc_sector += sector_step;
1591 tag_offset++;
1592 return BLK_STS_DEV_RESOURCE;
1593 }
1594 } else {
1595 wait_for_completion(&ctx->restart);
1596 }
1597 reinit_completion(&ctx->restart);
1598 fallthrough;
1599
1600
1601
1602
1603 case -EINPROGRESS:
1604 ctx->r.req = NULL;
1605 ctx->cc_sector += sector_step;
1606 tag_offset++;
1607 continue;
1608
1609
1610
1611 case 0:
1612 atomic_dec(&ctx->cc_pending);
1613 ctx->cc_sector += sector_step;
1614 tag_offset++;
1615 if (!atomic)
1616 cond_resched();
1617 continue;
1618
1619
1620
1621 case -EBADMSG:
1622 atomic_dec(&ctx->cc_pending);
1623 return BLK_STS_PROTECTION;
1624
1625
1626
1627 default:
1628 atomic_dec(&ctx->cc_pending);
1629 return BLK_STS_IOERR;
1630 }
1631 }
1632
1633 return 0;
1634}
1635
1636static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
1656{
1657 struct crypt_config *cc = io->cc;
1658 struct bio *clone;
1659 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1660 gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
1661 unsigned i, len, remaining_size;
1662 struct page *page;
1663
1664retry:
1665 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
1666 mutex_lock(&cc->bio_alloc_lock);
1667
1668 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, &cc->bs);
1669 if (!clone)
1670 goto out;
1671
1672 clone_init(io, clone);
1673
1674 remaining_size = size;
1675
1676 for (i = 0; i < nr_iovecs; i++) {
1677 page = mempool_alloc(&cc->page_pool, gfp_mask);
1678 if (!page) {
1679 crypt_free_buffer_pages(cc, clone);
1680 bio_put(clone);
1681 gfp_mask |= __GFP_DIRECT_RECLAIM;
1682 goto retry;
1683 }
1684
1685 len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
1686
1687 bio_add_page(clone, page, len, 0);
1688
1689 remaining_size -= len;
1690 }
1691
1692
1693 if (dm_crypt_integrity_io_alloc(io, clone)) {
1694 crypt_free_buffer_pages(cc, clone);
1695 bio_put(clone);
1696 clone = NULL;
1697 }
1698out:
1699 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
1700 mutex_unlock(&cc->bio_alloc_lock);
1701
1702 return clone;
1703}
1704
1705static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
1706{
1707 struct bio_vec *bv;
1708 struct bvec_iter_all iter_all;
1709
1710 bio_for_each_segment_all(bv, clone, iter_all) {
1711 BUG_ON(!bv->bv_page);
1712 mempool_free(bv->bv_page, &cc->page_pool);
1713 }
1714}
1715
1716static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
1717 struct bio *bio, sector_t sector)
1718{
1719 io->cc = cc;
1720 io->base_bio = bio;
1721 io->sector = sector;
1722 io->error = 0;
1723 io->ctx.r.req = NULL;
1724 io->integrity_metadata = NULL;
1725 io->integrity_metadata_from_pool = false;
1726 atomic_set(&io->io_pending, 0);
1727}
1728
1729static void crypt_inc_pending(struct dm_crypt_io *io)
1730{
1731 atomic_inc(&io->io_pending);
1732}
1733
1734static void kcryptd_io_bio_endio(struct work_struct *work)
1735{
1736 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1737 bio_endio(io->base_bio);
1738}
1739
1740
1741
1742
1743
1744static void crypt_dec_pending(struct dm_crypt_io *io)
1745{
1746 struct crypt_config *cc = io->cc;
1747 struct bio *base_bio = io->base_bio;
1748 blk_status_t error = io->error;
1749
1750 if (!atomic_dec_and_test(&io->io_pending))
1751 return;
1752
1753 if (io->ctx.r.req)
1754 crypt_free_req(cc, io->ctx.r.req, base_bio);
1755
1756 if (unlikely(io->integrity_metadata_from_pool))
1757 mempool_free(io->integrity_metadata, &io->cc->tag_pool);
1758 else
1759 kfree(io->integrity_metadata);
1760
1761 base_bio->bi_status = error;
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771 if (tasklet_trylock(&io->tasklet)) {
1772 tasklet_unlock(&io->tasklet);
1773 bio_endio(base_bio);
1774 return;
1775 }
1776
1777 INIT_WORK(&io->work, kcryptd_io_bio_endio);
1778 queue_work(cc->io_queue, &io->work);
1779}
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798static void crypt_endio(struct bio *clone)
1799{
1800 struct dm_crypt_io *io = clone->bi_private;
1801 struct crypt_config *cc = io->cc;
1802 unsigned rw = bio_data_dir(clone);
1803 blk_status_t error;
1804
1805
1806
1807
1808 if (rw == WRITE)
1809 crypt_free_buffer_pages(cc, clone);
1810
1811 error = clone->bi_status;
1812 bio_put(clone);
1813
1814 if (rw == READ && !error) {
1815 kcryptd_queue_crypt(io);
1816 return;
1817 }
1818
1819 if (unlikely(error))
1820 io->error = error;
1821
1822 crypt_dec_pending(io);
1823}
1824
1825static void clone_init(struct dm_crypt_io *io, struct bio *clone)
1826{
1827 struct crypt_config *cc = io->cc;
1828
1829 clone->bi_private = io;
1830 clone->bi_end_io = crypt_endio;
1831 bio_set_dev(clone, cc->dev->bdev);
1832 clone->bi_opf = io->base_bio->bi_opf;
1833}
1834
1835static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
1836{
1837 struct crypt_config *cc = io->cc;
1838 struct bio *clone;
1839
1840
1841
1842
1843
1844
1845
1846 clone = bio_clone_fast(io->base_bio, gfp, &cc->bs);
1847 if (!clone)
1848 return 1;
1849
1850 crypt_inc_pending(io);
1851
1852 clone_init(io, clone);
1853 clone->bi_iter.bi_sector = cc->start + io->sector;
1854
1855 if (dm_crypt_integrity_io_alloc(io, clone)) {
1856 crypt_dec_pending(io);
1857 bio_put(clone);
1858 return 1;
1859 }
1860
1861 submit_bio_noacct(clone);
1862 return 0;
1863}
1864
1865static void kcryptd_io_read_work(struct work_struct *work)
1866{
1867 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1868
1869 crypt_inc_pending(io);
1870 if (kcryptd_io_read(io, GFP_NOIO))
1871 io->error = BLK_STS_RESOURCE;
1872 crypt_dec_pending(io);
1873}
1874
1875static void kcryptd_queue_read(struct dm_crypt_io *io)
1876{
1877 struct crypt_config *cc = io->cc;
1878
1879 INIT_WORK(&io->work, kcryptd_io_read_work);
1880 queue_work(cc->io_queue, &io->work);
1881}
1882
1883static void kcryptd_io_write(struct dm_crypt_io *io)
1884{
1885 struct bio *clone = io->ctx.bio_out;
1886
1887 submit_bio_noacct(clone);
1888}
1889
1890#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
1891
1892static int dmcrypt_write(void *data)
1893{
1894 struct crypt_config *cc = data;
1895 struct dm_crypt_io *io;
1896
1897 while (1) {
1898 struct rb_root write_tree;
1899 struct blk_plug plug;
1900
1901 spin_lock_irq(&cc->write_thread_lock);
1902continue_locked:
1903
1904 if (!RB_EMPTY_ROOT(&cc->write_tree))
1905 goto pop_from_list;
1906
1907 set_current_state(TASK_INTERRUPTIBLE);
1908
1909 spin_unlock_irq(&cc->write_thread_lock);
1910
1911 if (unlikely(kthread_should_stop())) {
1912 set_current_state(TASK_RUNNING);
1913 break;
1914 }
1915
1916 schedule();
1917
1918 set_current_state(TASK_RUNNING);
1919 spin_lock_irq(&cc->write_thread_lock);
1920 goto continue_locked;
1921
1922pop_from_list:
1923 write_tree = cc->write_tree;
1924 cc->write_tree = RB_ROOT;
1925 spin_unlock_irq(&cc->write_thread_lock);
1926
1927 BUG_ON(rb_parent(write_tree.rb_node));
1928
1929
1930
1931
1932
1933 blk_start_plug(&plug);
1934 do {
1935 io = crypt_io_from_node(rb_first(&write_tree));
1936 rb_erase(&io->rb_node, &write_tree);
1937 kcryptd_io_write(io);
1938 } while (!RB_EMPTY_ROOT(&write_tree));
1939 blk_finish_plug(&plug);
1940 }
1941 return 0;
1942}
1943
1944static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
1945{
1946 struct bio *clone = io->ctx.bio_out;
1947 struct crypt_config *cc = io->cc;
1948 unsigned long flags;
1949 sector_t sector;
1950 struct rb_node **rbp, *parent;
1951
1952 if (unlikely(io->error)) {
1953 crypt_free_buffer_pages(cc, clone);
1954 bio_put(clone);
1955 crypt_dec_pending(io);
1956 return;
1957 }
1958
1959
1960 BUG_ON(io->ctx.iter_out.bi_size);
1961
1962 clone->bi_iter.bi_sector = cc->start + io->sector;
1963
1964 if ((likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) ||
1965 test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) {
1966 submit_bio_noacct(clone);
1967 return;
1968 }
1969
1970 spin_lock_irqsave(&cc->write_thread_lock, flags);
1971 if (RB_EMPTY_ROOT(&cc->write_tree))
1972 wake_up_process(cc->write_thread);
1973 rbp = &cc->write_tree.rb_node;
1974 parent = NULL;
1975 sector = io->sector;
1976 while (*rbp) {
1977 parent = *rbp;
1978 if (sector < crypt_io_from_node(parent)->sector)
1979 rbp = &(*rbp)->rb_left;
1980 else
1981 rbp = &(*rbp)->rb_right;
1982 }
1983 rb_link_node(&io->rb_node, parent, rbp);
1984 rb_insert_color(&io->rb_node, &cc->write_tree);
1985 spin_unlock_irqrestore(&cc->write_thread_lock, flags);
1986}
1987
1988static bool kcryptd_crypt_write_inline(struct crypt_config *cc,
1989 struct convert_context *ctx)
1990
1991{
1992 if (!test_bit(DM_CRYPT_WRITE_INLINE, &cc->flags))
1993 return false;
1994
1995
1996
1997
1998
1999
2000 switch (bio_op(ctx->bio_in)) {
2001 case REQ_OP_WRITE:
2002 case REQ_OP_WRITE_SAME:
2003 case REQ_OP_WRITE_ZEROES:
2004 return true;
2005 default:
2006 return false;
2007 }
2008}
2009
2010static void kcryptd_crypt_write_continue(struct work_struct *work)
2011{
2012 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
2013 struct crypt_config *cc = io->cc;
2014 struct convert_context *ctx = &io->ctx;
2015 int crypt_finished;
2016 sector_t sector = io->sector;
2017 blk_status_t r;
2018
2019 wait_for_completion(&ctx->restart);
2020 reinit_completion(&ctx->restart);
2021
2022 r = crypt_convert(cc, &io->ctx, true, false);
2023 if (r)
2024 io->error = r;
2025 crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
2026 if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
2027
2028 wait_for_completion(&ctx->restart);
2029 crypt_finished = 1;
2030 }
2031
2032
2033 if (crypt_finished) {
2034 kcryptd_crypt_write_io_submit(io, 0);
2035 io->sector = sector;
2036 }
2037
2038 crypt_dec_pending(io);
2039}
2040
2041static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
2042{
2043 struct crypt_config *cc = io->cc;
2044 struct convert_context *ctx = &io->ctx;
2045 struct bio *clone;
2046 int crypt_finished;
2047 sector_t sector = io->sector;
2048 blk_status_t r;
2049
2050
2051
2052
2053 crypt_inc_pending(io);
2054 crypt_convert_init(cc, ctx, NULL, io->base_bio, sector);
2055
2056 clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
2057 if (unlikely(!clone)) {
2058 io->error = BLK_STS_IOERR;
2059 goto dec;
2060 }
2061
2062 io->ctx.bio_out = clone;
2063 io->ctx.iter_out = clone->bi_iter;
2064
2065 sector += bio_sectors(clone);
2066
2067 crypt_inc_pending(io);
2068 r = crypt_convert(cc, ctx,
2069 test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags), true);
2070
2071
2072
2073
2074
2075 if (r == BLK_STS_DEV_RESOURCE) {
2076 INIT_WORK(&io->work, kcryptd_crypt_write_continue);
2077 queue_work(cc->crypt_queue, &io->work);
2078 return;
2079 }
2080 if (r)
2081 io->error = r;
2082 crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
2083 if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
2084
2085 wait_for_completion(&ctx->restart);
2086 crypt_finished = 1;
2087 }
2088
2089
2090 if (crypt_finished) {
2091 kcryptd_crypt_write_io_submit(io, 0);
2092 io->sector = sector;
2093 }
2094
2095dec:
2096 crypt_dec_pending(io);
2097}
2098
2099static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
2100{
2101 crypt_dec_pending(io);
2102}
2103
2104static void kcryptd_crypt_read_continue(struct work_struct *work)
2105{
2106 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
2107 struct crypt_config *cc = io->cc;
2108 blk_status_t r;
2109
2110 wait_for_completion(&io->ctx.restart);
2111 reinit_completion(&io->ctx.restart);
2112
2113 r = crypt_convert(cc, &io->ctx, true, false);
2114 if (r)
2115 io->error = r;
2116
2117 if (atomic_dec_and_test(&io->ctx.cc_pending))
2118 kcryptd_crypt_read_done(io);
2119
2120 crypt_dec_pending(io);
2121}
2122
2123static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
2124{
2125 struct crypt_config *cc = io->cc;
2126 blk_status_t r;
2127
2128 crypt_inc_pending(io);
2129
2130 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
2131 io->sector);
2132
2133 r = crypt_convert(cc, &io->ctx,
2134 test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
2135
2136
2137
2138
2139 if (r == BLK_STS_DEV_RESOURCE) {
2140 INIT_WORK(&io->work, kcryptd_crypt_read_continue);
2141 queue_work(cc->crypt_queue, &io->work);
2142 return;
2143 }
2144 if (r)
2145 io->error = r;
2146
2147 if (atomic_dec_and_test(&io->ctx.cc_pending))
2148 kcryptd_crypt_read_done(io);
2149
2150 crypt_dec_pending(io);
2151}
2152
2153static void kcryptd_async_done(struct crypto_async_request *async_req,
2154 int error)
2155{
2156 struct dm_crypt_request *dmreq = async_req->data;
2157 struct convert_context *ctx = dmreq->ctx;
2158 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
2159 struct crypt_config *cc = io->cc;
2160
2161
2162
2163
2164
2165
2166 if (error == -EINPROGRESS) {
2167 complete(&ctx->restart);
2168 return;
2169 }
2170
2171 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
2172 error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq);
2173
2174 if (error == -EBADMSG) {
2175 char b[BDEVNAME_SIZE];
2176 DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b),
2177 (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq)));
2178 io->error = BLK_STS_PROTECTION;
2179 } else if (error < 0)
2180 io->error = BLK_STS_IOERR;
2181
2182 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
2183
2184 if (!atomic_dec_and_test(&ctx->cc_pending))
2185 return;
2186
2187
2188
2189
2190
2191 if (bio_data_dir(io->base_bio) == READ) {
2192 kcryptd_crypt_read_done(io);
2193 return;
2194 }
2195
2196 if (kcryptd_crypt_write_inline(cc, ctx)) {
2197 complete(&ctx->restart);
2198 return;
2199 }
2200
2201 kcryptd_crypt_write_io_submit(io, 1);
2202}
2203
2204static void kcryptd_crypt(struct work_struct *work)
2205{
2206 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
2207
2208 if (bio_data_dir(io->base_bio) == READ)
2209 kcryptd_crypt_read_convert(io);
2210 else
2211 kcryptd_crypt_write_convert(io);
2212}
2213
2214static void kcryptd_crypt_tasklet(unsigned long work)
2215{
2216 kcryptd_crypt((struct work_struct *)work);
2217}
2218
2219static void kcryptd_queue_crypt(struct dm_crypt_io *io)
2220{
2221 struct crypt_config *cc = io->cc;
2222
2223 if ((bio_data_dir(io->base_bio) == READ && test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags)) ||
2224 (bio_data_dir(io->base_bio) == WRITE && test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))) {
2225
2226
2227
2228
2229
2230 if (in_irq() || irqs_disabled()) {
2231 tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
2232 tasklet_schedule(&io->tasklet);
2233 return;
2234 }
2235
2236 kcryptd_crypt(&io->work);
2237 return;
2238 }
2239
2240 INIT_WORK(&io->work, kcryptd_crypt);
2241 queue_work(cc->crypt_queue, &io->work);
2242}
2243
2244static void crypt_free_tfms_aead(struct crypt_config *cc)
2245{
2246 if (!cc->cipher_tfm.tfms_aead)
2247 return;
2248
2249 if (cc->cipher_tfm.tfms_aead[0] && !IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
2250 crypto_free_aead(cc->cipher_tfm.tfms_aead[0]);
2251 cc->cipher_tfm.tfms_aead[0] = NULL;
2252 }
2253
2254 kfree(cc->cipher_tfm.tfms_aead);
2255 cc->cipher_tfm.tfms_aead = NULL;
2256}
2257
2258static void crypt_free_tfms_skcipher(struct crypt_config *cc)
2259{
2260 unsigned i;
2261
2262 if (!cc->cipher_tfm.tfms)
2263 return;
2264
2265 for (i = 0; i < cc->tfms_count; i++)
2266 if (cc->cipher_tfm.tfms[i] && !IS_ERR(cc->cipher_tfm.tfms[i])) {
2267 crypto_free_skcipher(cc->cipher_tfm.tfms[i]);
2268 cc->cipher_tfm.tfms[i] = NULL;
2269 }
2270
2271 kfree(cc->cipher_tfm.tfms);
2272 cc->cipher_tfm.tfms = NULL;
2273}
2274
2275static void crypt_free_tfms(struct crypt_config *cc)
2276{
2277 if (crypt_integrity_aead(cc))
2278 crypt_free_tfms_aead(cc);
2279 else
2280 crypt_free_tfms_skcipher(cc);
2281}
2282
2283static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode)
2284{
2285 unsigned i;
2286 int err;
2287
2288 cc->cipher_tfm.tfms = kcalloc(cc->tfms_count,
2289 sizeof(struct crypto_skcipher *),
2290 GFP_KERNEL);
2291 if (!cc->cipher_tfm.tfms)
2292 return -ENOMEM;
2293
2294 for (i = 0; i < cc->tfms_count; i++) {
2295 cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0,
2296 CRYPTO_ALG_ALLOCATES_MEMORY);
2297 if (IS_ERR(cc->cipher_tfm.tfms[i])) {
2298 err = PTR_ERR(cc->cipher_tfm.tfms[i]);
2299 crypt_free_tfms(cc);
2300 return err;
2301 }
2302 }
2303
2304
2305
2306
2307
2308
2309 DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
2310 crypto_skcipher_alg(any_tfm(cc))->base.cra_driver_name);
2311 return 0;
2312}
2313
2314static int crypt_alloc_tfms_aead(struct crypt_config *cc, char *ciphermode)
2315{
2316 int err;
2317
2318 cc->cipher_tfm.tfms = kmalloc(sizeof(struct crypto_aead *), GFP_KERNEL);
2319 if (!cc->cipher_tfm.tfms)
2320 return -ENOMEM;
2321
2322 cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0,
2323 CRYPTO_ALG_ALLOCATES_MEMORY);
2324 if (IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
2325 err = PTR_ERR(cc->cipher_tfm.tfms_aead[0]);
2326 crypt_free_tfms(cc);
2327 return err;
2328 }
2329
2330 DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
2331 crypto_aead_alg(any_tfm_aead(cc))->base.cra_driver_name);
2332 return 0;
2333}
2334
2335static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
2336{
2337 if (crypt_integrity_aead(cc))
2338 return crypt_alloc_tfms_aead(cc, ciphermode);
2339 else
2340 return crypt_alloc_tfms_skcipher(cc, ciphermode);
2341}
2342
2343static unsigned crypt_subkey_size(struct crypt_config *cc)
2344{
2345 return (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
2346}
2347
2348static unsigned crypt_authenckey_size(struct crypt_config *cc)
2349{
2350 return crypt_subkey_size(cc) + RTA_SPACE(sizeof(struct crypto_authenc_key_param));
2351}
2352
2353
2354
2355
2356
2357
2358static void crypt_copy_authenckey(char *p, const void *key,
2359 unsigned enckeylen, unsigned authkeylen)
2360{
2361 struct crypto_authenc_key_param *param;
2362 struct rtattr *rta;
2363
2364 rta = (struct rtattr *)p;
2365 param = RTA_DATA(rta);
2366 param->enckeylen = cpu_to_be32(enckeylen);
2367 rta->rta_len = RTA_LENGTH(sizeof(*param));
2368 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
2369 p += RTA_SPACE(sizeof(*param));
2370 memcpy(p, key + enckeylen, authkeylen);
2371 p += authkeylen;
2372 memcpy(p, key, enckeylen);
2373}
2374
2375static int crypt_setkey(struct crypt_config *cc)
2376{
2377 unsigned subkey_size;
2378 int err = 0, i, r;
2379
2380
2381 subkey_size = crypt_subkey_size(cc);
2382
2383 if (crypt_integrity_hmac(cc)) {
2384 if (subkey_size < cc->key_mac_size)
2385 return -EINVAL;
2386
2387 crypt_copy_authenckey(cc->authenc_key, cc->key,
2388 subkey_size - cc->key_mac_size,
2389 cc->key_mac_size);
2390 }
2391
2392 for (i = 0; i < cc->tfms_count; i++) {
2393 if (crypt_integrity_hmac(cc))
2394 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
2395 cc->authenc_key, crypt_authenckey_size(cc));
2396 else if (crypt_integrity_aead(cc))
2397 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
2398 cc->key + (i * subkey_size),
2399 subkey_size);
2400 else
2401 r = crypto_skcipher_setkey(cc->cipher_tfm.tfms[i],
2402 cc->key + (i * subkey_size),
2403 subkey_size);
2404 if (r)
2405 err = r;
2406 }
2407
2408 if (crypt_integrity_hmac(cc))
2409 memzero_explicit(cc->authenc_key, crypt_authenckey_size(cc));
2410
2411 return err;
2412}
2413
2414#ifdef CONFIG_KEYS
2415
2416static bool contains_whitespace(const char *str)
2417{
2418 while (*str)
2419 if (isspace(*str++))
2420 return true;
2421 return false;
2422}
2423
2424static int set_key_user(struct crypt_config *cc, struct key *key)
2425{
2426 const struct user_key_payload *ukp;
2427
2428 ukp = user_key_payload_locked(key);
2429 if (!ukp)
2430 return -EKEYREVOKED;
2431
2432 if (cc->key_size != ukp->datalen)
2433 return -EINVAL;
2434
2435 memcpy(cc->key, ukp->data, cc->key_size);
2436
2437 return 0;
2438}
2439
2440static int set_key_encrypted(struct crypt_config *cc, struct key *key)
2441{
2442 const struct encrypted_key_payload *ekp;
2443
2444 ekp = key->payload.data[0];
2445 if (!ekp)
2446 return -EKEYREVOKED;
2447
2448 if (cc->key_size != ekp->decrypted_datalen)
2449 return -EINVAL;
2450
2451 memcpy(cc->key, ekp->decrypted_data, cc->key_size);
2452
2453 return 0;
2454}
2455
2456static int set_key_trusted(struct crypt_config *cc, struct key *key)
2457{
2458 const struct trusted_key_payload *tkp;
2459
2460 tkp = key->payload.data[0];
2461 if (!tkp)
2462 return -EKEYREVOKED;
2463
2464 if (cc->key_size != tkp->key_len)
2465 return -EINVAL;
2466
2467 memcpy(cc->key, tkp->key, cc->key_size);
2468
2469 return 0;
2470}
2471
2472static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
2473{
2474 char *new_key_string, *key_desc;
2475 int ret;
2476 struct key_type *type;
2477 struct key *key;
2478 int (*set_key)(struct crypt_config *cc, struct key *key);
2479
2480
2481
2482
2483
2484 if (contains_whitespace(key_string)) {
2485 DMERR("whitespace chars not allowed in key string");
2486 return -EINVAL;
2487 }
2488
2489
2490 key_desc = strpbrk(key_string, ":");
2491 if (!key_desc || key_desc == key_string || !strlen(key_desc + 1))
2492 return -EINVAL;
2493
2494 if (!strncmp(key_string, "logon:", key_desc - key_string + 1)) {
2495 type = &key_type_logon;
2496 set_key = set_key_user;
2497 } else if (!strncmp(key_string, "user:", key_desc - key_string + 1)) {
2498 type = &key_type_user;
2499 set_key = set_key_user;
2500 } else if (IS_ENABLED(CONFIG_ENCRYPTED_KEYS) &&
2501 !strncmp(key_string, "encrypted:", key_desc - key_string + 1)) {
2502 type = &key_type_encrypted;
2503 set_key = set_key_encrypted;
2504 } else if (IS_ENABLED(CONFIG_TRUSTED_KEYS) &&
2505 !strncmp(key_string, "trusted:", key_desc - key_string + 1)) {
2506 type = &key_type_trusted;
2507 set_key = set_key_trusted;
2508 } else {
2509 return -EINVAL;
2510 }
2511
2512 new_key_string = kstrdup(key_string, GFP_KERNEL);
2513 if (!new_key_string)
2514 return -ENOMEM;
2515
2516 key = request_key(type, key_desc + 1, NULL);
2517 if (IS_ERR(key)) {
2518 kfree_sensitive(new_key_string);
2519 return PTR_ERR(key);
2520 }
2521
2522 down_read(&key->sem);
2523
2524 ret = set_key(cc, key);
2525 if (ret < 0) {
2526 up_read(&key->sem);
2527 key_put(key);
2528 kfree_sensitive(new_key_string);
2529 return ret;
2530 }
2531
2532 up_read(&key->sem);
2533 key_put(key);
2534
2535
2536 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2537
2538 ret = crypt_setkey(cc);
2539
2540 if (!ret) {
2541 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2542 kfree_sensitive(cc->key_string);
2543 cc->key_string = new_key_string;
2544 } else
2545 kfree_sensitive(new_key_string);
2546
2547 return ret;
2548}
2549
2550static int get_key_size(char **key_string)
2551{
2552 char *colon, dummy;
2553 int ret;
2554
2555 if (*key_string[0] != ':')
2556 return strlen(*key_string) >> 1;
2557
2558
2559 colon = strpbrk(*key_string + 1, ":");
2560 if (!colon)
2561 return -EINVAL;
2562
2563 if (sscanf(*key_string + 1, "%u%c", &ret, &dummy) != 2 || dummy != ':')
2564 return -EINVAL;
2565
2566 *key_string = colon;
2567
2568
2569
2570 return ret;
2571}
2572
2573#else
2574
2575static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
2576{
2577 return -EINVAL;
2578}
2579
2580static int get_key_size(char **key_string)
2581{
2582 return (*key_string[0] == ':') ? -EINVAL : strlen(*key_string) >> 1;
2583}
2584
2585#endif
2586
2587static int crypt_set_key(struct crypt_config *cc, char *key)
2588{
2589 int r = -EINVAL;
2590 int key_string_len = strlen(key);
2591
2592
2593 if (!cc->key_size && strcmp(key, "-"))
2594 goto out;
2595
2596
2597 if (key[0] == ':') {
2598 r = crypt_set_keyring_key(cc, key + 1);
2599 goto out;
2600 }
2601
2602
2603 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2604
2605
2606 kfree_sensitive(cc->key_string);
2607 cc->key_string = NULL;
2608
2609
2610 if (cc->key_size && hex2bin(cc->key, key, cc->key_size) < 0)
2611 goto out;
2612
2613 r = crypt_setkey(cc);
2614 if (!r)
2615 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2616
2617out:
2618
2619 memset(key, '0', key_string_len);
2620
2621 return r;
2622}
2623
2624static int crypt_wipe_key(struct crypt_config *cc)
2625{
2626 int r;
2627
2628 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2629 get_random_bytes(&cc->key, cc->key_size);
2630
2631
2632 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
2633 r = cc->iv_gen_ops->wipe(cc);
2634 if (r)
2635 return r;
2636 }
2637
2638 kfree_sensitive(cc->key_string);
2639 cc->key_string = NULL;
2640 r = crypt_setkey(cc);
2641 memset(&cc->key, 0, cc->key_size * sizeof(u8));
2642
2643 return r;
2644}
2645
2646static void crypt_calculate_pages_per_client(void)
2647{
2648 unsigned long pages = (totalram_pages() - totalhigh_pages()) * DM_CRYPT_MEMORY_PERCENT / 100;
2649
2650 if (!dm_crypt_clients_n)
2651 return;
2652
2653 pages /= dm_crypt_clients_n;
2654 if (pages < DM_CRYPT_MIN_PAGES_PER_CLIENT)
2655 pages = DM_CRYPT_MIN_PAGES_PER_CLIENT;
2656 dm_crypt_pages_per_client = pages;
2657}
2658
2659static void *crypt_page_alloc(gfp_t gfp_mask, void *pool_data)
2660{
2661 struct crypt_config *cc = pool_data;
2662 struct page *page;
2663
2664 if (unlikely(percpu_counter_compare(&cc->n_allocated_pages, dm_crypt_pages_per_client) >= 0) &&
2665 likely(gfp_mask & __GFP_NORETRY))
2666 return NULL;
2667
2668 page = alloc_page(gfp_mask);
2669 if (likely(page != NULL))
2670 percpu_counter_add(&cc->n_allocated_pages, 1);
2671
2672 return page;
2673}
2674
2675static void crypt_page_free(void *page, void *pool_data)
2676{
2677 struct crypt_config *cc = pool_data;
2678
2679 __free_page(page);
2680 percpu_counter_sub(&cc->n_allocated_pages, 1);
2681}
2682
2683static void crypt_dtr(struct dm_target *ti)
2684{
2685 struct crypt_config *cc = ti->private;
2686
2687 ti->private = NULL;
2688
2689 if (!cc)
2690 return;
2691
2692 if (cc->write_thread)
2693 kthread_stop(cc->write_thread);
2694
2695 if (cc->io_queue)
2696 destroy_workqueue(cc->io_queue);
2697 if (cc->crypt_queue)
2698 destroy_workqueue(cc->crypt_queue);
2699
2700 crypt_free_tfms(cc);
2701
2702 bioset_exit(&cc->bs);
2703
2704 mempool_exit(&cc->page_pool);
2705 mempool_exit(&cc->req_pool);
2706 mempool_exit(&cc->tag_pool);
2707
2708 WARN_ON(percpu_counter_sum(&cc->n_allocated_pages) != 0);
2709 percpu_counter_destroy(&cc->n_allocated_pages);
2710
2711 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
2712 cc->iv_gen_ops->dtr(cc);
2713
2714 if (cc->dev)
2715 dm_put_device(ti, cc->dev);
2716
2717 kfree_sensitive(cc->cipher_string);
2718 kfree_sensitive(cc->key_string);
2719 kfree_sensitive(cc->cipher_auth);
2720 kfree_sensitive(cc->authenc_key);
2721
2722 mutex_destroy(&cc->bio_alloc_lock);
2723
2724
2725 kfree_sensitive(cc);
2726
2727 spin_lock(&dm_crypt_clients_lock);
2728 WARN_ON(!dm_crypt_clients_n);
2729 dm_crypt_clients_n--;
2730 crypt_calculate_pages_per_client();
2731 spin_unlock(&dm_crypt_clients_lock);
2732}
2733
2734static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode)
2735{
2736 struct crypt_config *cc = ti->private;
2737
2738 if (crypt_integrity_aead(cc))
2739 cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
2740 else
2741 cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
2742
2743 if (cc->iv_size)
2744
2745 cc->iv_size = max(cc->iv_size,
2746 (unsigned int)(sizeof(u64) / sizeof(u8)));
2747 else if (ivmode) {
2748 DMWARN("Selected cipher does not support IVs");
2749 ivmode = NULL;
2750 }
2751
2752
2753 if (ivmode == NULL)
2754 cc->iv_gen_ops = NULL;
2755 else if (strcmp(ivmode, "plain") == 0)
2756 cc->iv_gen_ops = &crypt_iv_plain_ops;
2757 else if (strcmp(ivmode, "plain64") == 0)
2758 cc->iv_gen_ops = &crypt_iv_plain64_ops;
2759 else if (strcmp(ivmode, "plain64be") == 0)
2760 cc->iv_gen_ops = &crypt_iv_plain64be_ops;
2761 else if (strcmp(ivmode, "essiv") == 0)
2762 cc->iv_gen_ops = &crypt_iv_essiv_ops;
2763 else if (strcmp(ivmode, "benbi") == 0)
2764 cc->iv_gen_ops = &crypt_iv_benbi_ops;
2765 else if (strcmp(ivmode, "null") == 0)
2766 cc->iv_gen_ops = &crypt_iv_null_ops;
2767 else if (strcmp(ivmode, "eboiv") == 0)
2768 cc->iv_gen_ops = &crypt_iv_eboiv_ops;
2769 else if (strcmp(ivmode, "elephant") == 0) {
2770 cc->iv_gen_ops = &crypt_iv_elephant_ops;
2771 cc->key_parts = 2;
2772 cc->key_extra_size = cc->key_size / 2;
2773 if (cc->key_extra_size > ELEPHANT_MAX_KEY_SIZE)
2774 return -EINVAL;
2775 set_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags);
2776 } else if (strcmp(ivmode, "lmk") == 0) {
2777 cc->iv_gen_ops = &crypt_iv_lmk_ops;
2778
2779
2780
2781
2782
2783
2784 if (cc->key_size % cc->key_parts) {
2785 cc->key_parts++;
2786 cc->key_extra_size = cc->key_size / cc->key_parts;
2787 }
2788 } else if (strcmp(ivmode, "tcw") == 0) {
2789 cc->iv_gen_ops = &crypt_iv_tcw_ops;
2790 cc->key_parts += 2;
2791 cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
2792 } else if (strcmp(ivmode, "random") == 0) {
2793 cc->iv_gen_ops = &crypt_iv_random_ops;
2794
2795 cc->integrity_iv_size = cc->iv_size;
2796 } else {
2797 ti->error = "Invalid IV mode";
2798 return -EINVAL;
2799 }
2800
2801 return 0;
2802}
2803
2804
2805
2806
2807
2808
2809static int crypt_ctr_auth_cipher(struct crypt_config *cc, char *cipher_api)
2810{
2811 char *start, *end, *mac_alg = NULL;
2812 struct crypto_ahash *mac;
2813
2814 if (!strstarts(cipher_api, "authenc("))
2815 return 0;
2816
2817 start = strchr(cipher_api, '(');
2818 end = strchr(cipher_api, ',');
2819 if (!start || !end || ++start > end)
2820 return -EINVAL;
2821
2822 mac_alg = kzalloc(end - start + 1, GFP_KERNEL);
2823 if (!mac_alg)
2824 return -ENOMEM;
2825 strncpy(mac_alg, start, end - start);
2826
2827 mac = crypto_alloc_ahash(mac_alg, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
2828 kfree(mac_alg);
2829
2830 if (IS_ERR(mac))
2831 return PTR_ERR(mac);
2832
2833 cc->key_mac_size = crypto_ahash_digestsize(mac);
2834 crypto_free_ahash(mac);
2835
2836 cc->authenc_key = kmalloc(crypt_authenckey_size(cc), GFP_KERNEL);
2837 if (!cc->authenc_key)
2838 return -ENOMEM;
2839
2840 return 0;
2841}
2842
2843static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key,
2844 char **ivmode, char **ivopts)
2845{
2846 struct crypt_config *cc = ti->private;
2847 char *tmp, *cipher_api, buf[CRYPTO_MAX_ALG_NAME];
2848 int ret = -EINVAL;
2849
2850 cc->tfms_count = 1;
2851
2852
2853
2854
2855
2856 tmp = &cipher_in[strlen("capi:")];
2857
2858
2859 *ivopts = strrchr(tmp, ':');
2860 if (*ivopts) {
2861 **ivopts = '\0';
2862 (*ivopts)++;
2863 }
2864
2865 *ivmode = strrchr(tmp, '-');
2866 if (*ivmode) {
2867 **ivmode = '\0';
2868 (*ivmode)++;
2869 }
2870
2871 cipher_api = tmp;
2872
2873
2874 if (crypt_integrity_aead(cc)) {
2875 ret = crypt_ctr_auth_cipher(cc, cipher_api);
2876 if (ret < 0) {
2877 ti->error = "Invalid AEAD cipher spec";
2878 return -ENOMEM;
2879 }
2880 }
2881
2882 if (*ivmode && !strcmp(*ivmode, "lmk"))
2883 cc->tfms_count = 64;
2884
2885 if (*ivmode && !strcmp(*ivmode, "essiv")) {
2886 if (!*ivopts) {
2887 ti->error = "Digest algorithm missing for ESSIV mode";
2888 return -EINVAL;
2889 }
2890 ret = snprintf(buf, CRYPTO_MAX_ALG_NAME, "essiv(%s,%s)",
2891 cipher_api, *ivopts);
2892 if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) {
2893 ti->error = "Cannot allocate cipher string";
2894 return -ENOMEM;
2895 }
2896 cipher_api = buf;
2897 }
2898
2899 cc->key_parts = cc->tfms_count;
2900
2901
2902 ret = crypt_alloc_tfms(cc, cipher_api);
2903 if (ret < 0) {
2904 ti->error = "Error allocating crypto tfm";
2905 return ret;
2906 }
2907
2908 if (crypt_integrity_aead(cc))
2909 cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
2910 else
2911 cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
2912
2913 return 0;
2914}
2915
2916static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key,
2917 char **ivmode, char **ivopts)
2918{
2919 struct crypt_config *cc = ti->private;
2920 char *tmp, *cipher, *chainmode, *keycount;
2921 char *cipher_api = NULL;
2922 int ret = -EINVAL;
2923 char dummy;
2924
2925 if (strchr(cipher_in, '(') || crypt_integrity_aead(cc)) {
2926 ti->error = "Bad cipher specification";
2927 return -EINVAL;
2928 }
2929
2930
2931
2932
2933
2934 tmp = cipher_in;
2935 keycount = strsep(&tmp, "-");
2936 cipher = strsep(&keycount, ":");
2937
2938 if (!keycount)
2939 cc->tfms_count = 1;
2940 else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 ||
2941 !is_power_of_2(cc->tfms_count)) {
2942 ti->error = "Bad cipher key count specification";
2943 return -EINVAL;
2944 }
2945 cc->key_parts = cc->tfms_count;
2946
2947 chainmode = strsep(&tmp, "-");
2948 *ivmode = strsep(&tmp, ":");
2949 *ivopts = tmp;
2950
2951
2952
2953
2954
2955 if (!chainmode || (!strcmp(chainmode, "plain") && !*ivmode)) {
2956 chainmode = "cbc";
2957 *ivmode = "plain";
2958 }
2959
2960 if (strcmp(chainmode, "ecb") && !*ivmode) {
2961 ti->error = "IV mechanism required";
2962 return -EINVAL;
2963 }
2964
2965 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
2966 if (!cipher_api)
2967 goto bad_mem;
2968
2969 if (*ivmode && !strcmp(*ivmode, "essiv")) {
2970 if (!*ivopts) {
2971 ti->error = "Digest algorithm missing for ESSIV mode";
2972 kfree(cipher_api);
2973 return -EINVAL;
2974 }
2975 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
2976 "essiv(%s(%s),%s)", chainmode, cipher, *ivopts);
2977 } else {
2978 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
2979 "%s(%s)", chainmode, cipher);
2980 }
2981 if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) {
2982 kfree(cipher_api);
2983 goto bad_mem;
2984 }
2985
2986
2987 ret = crypt_alloc_tfms(cc, cipher_api);
2988 if (ret < 0) {
2989 ti->error = "Error allocating crypto tfm";
2990 kfree(cipher_api);
2991 return ret;
2992 }
2993 kfree(cipher_api);
2994
2995 return 0;
2996bad_mem:
2997 ti->error = "Cannot allocate cipher strings";
2998 return -ENOMEM;
2999}
3000
3001static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key)
3002{
3003 struct crypt_config *cc = ti->private;
3004 char *ivmode = NULL, *ivopts = NULL;
3005 int ret;
3006
3007 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
3008 if (!cc->cipher_string) {
3009 ti->error = "Cannot allocate cipher strings";
3010 return -ENOMEM;
3011 }
3012
3013 if (strstarts(cipher_in, "capi:"))
3014 ret = crypt_ctr_cipher_new(ti, cipher_in, key, &ivmode, &ivopts);
3015 else
3016 ret = crypt_ctr_cipher_old(ti, cipher_in, key, &ivmode, &ivopts);
3017 if (ret)
3018 return ret;
3019
3020
3021 ret = crypt_ctr_ivmode(ti, ivmode);
3022 if (ret < 0)
3023 return ret;
3024
3025
3026 ret = crypt_set_key(cc, key);
3027 if (ret < 0) {
3028 ti->error = "Error decoding and setting key";
3029 return ret;
3030 }
3031
3032
3033 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
3034 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
3035 if (ret < 0) {
3036 ti->error = "Error creating IV";
3037 return ret;
3038 }
3039 }
3040
3041
3042 if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
3043 ret = cc->iv_gen_ops->init(cc);
3044 if (ret < 0) {
3045 ti->error = "Error initialising IV";
3046 return ret;
3047 }
3048 }
3049
3050
3051 if (cc->key_string)
3052 memset(cc->key, 0, cc->key_size * sizeof(u8));
3053
3054 return ret;
3055}
3056
3057static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **argv)
3058{
3059 struct crypt_config *cc = ti->private;
3060 struct dm_arg_set as;
3061 static const struct dm_arg _args[] = {
3062 {0, 8, "Invalid number of feature args"},
3063 };
3064 unsigned int opt_params, val;
3065 const char *opt_string, *sval;
3066 char dummy;
3067 int ret;
3068
3069
3070 as.argc = argc;
3071 as.argv = argv;
3072
3073 ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
3074 if (ret)
3075 return ret;
3076
3077 while (opt_params--) {
3078 opt_string = dm_shift_arg(&as);
3079 if (!opt_string) {
3080 ti->error = "Not enough feature arguments";
3081 return -EINVAL;
3082 }
3083
3084 if (!strcasecmp(opt_string, "allow_discards"))
3085 ti->num_discard_bios = 1;
3086
3087 else if (!strcasecmp(opt_string, "same_cpu_crypt"))
3088 set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
3089
3090 else if (!strcasecmp(opt_string, "submit_from_crypt_cpus"))
3091 set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
3092 else if (!strcasecmp(opt_string, "no_read_workqueue"))
3093 set_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags);
3094 else if (!strcasecmp(opt_string, "no_write_workqueue"))
3095 set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
3096 else if (sscanf(opt_string, "integrity:%u:", &val) == 1) {
3097 if (val == 0 || val > MAX_TAG_SIZE) {
3098 ti->error = "Invalid integrity arguments";
3099 return -EINVAL;
3100 }
3101 cc->on_disk_tag_size = val;
3102 sval = strchr(opt_string + strlen("integrity:"), ':') + 1;
3103 if (!strcasecmp(sval, "aead")) {
3104 set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
3105 } else if (strcasecmp(sval, "none")) {
3106 ti->error = "Unknown integrity profile";
3107 return -EINVAL;
3108 }
3109
3110 cc->cipher_auth = kstrdup(sval, GFP_KERNEL);
3111 if (!cc->cipher_auth)
3112 return -ENOMEM;
3113 } else if (sscanf(opt_string, "sector_size:%hu%c", &cc->sector_size, &dummy) == 1) {
3114 if (cc->sector_size < (1 << SECTOR_SHIFT) ||
3115 cc->sector_size > 4096 ||
3116 (cc->sector_size & (cc->sector_size - 1))) {
3117 ti->error = "Invalid feature value for sector_size";
3118 return -EINVAL;
3119 }
3120 if (ti->len & ((cc->sector_size >> SECTOR_SHIFT) - 1)) {
3121 ti->error = "Device size is not multiple of sector_size feature";
3122 return -EINVAL;
3123 }
3124 cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
3125 } else if (!strcasecmp(opt_string, "iv_large_sectors"))
3126 set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
3127 else {
3128 ti->error = "Invalid feature arguments";
3129 return -EINVAL;
3130 }
3131 }
3132
3133 return 0;
3134}
3135
3136#ifdef CONFIG_BLK_DEV_ZONED
3137static int crypt_report_zones(struct dm_target *ti,
3138 struct dm_report_zones_args *args, unsigned int nr_zones)
3139{
3140 struct crypt_config *cc = ti->private;
3141
3142 return dm_report_zones(cc->dev->bdev, cc->start,
3143 cc->start + dm_target_offset(ti, args->next_sector),
3144 args, nr_zones);
3145}
3146#else
3147#define crypt_report_zones NULL
3148#endif
3149
3150
3151
3152
3153
3154static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3155{
3156 struct crypt_config *cc;
3157 const char *devname = dm_table_device_name(ti->table);
3158 int key_size;
3159 unsigned int align_mask;
3160 unsigned long long tmpll;
3161 int ret;
3162 size_t iv_size_padding, additional_req_size;
3163 char dummy;
3164
3165 if (argc < 5) {
3166 ti->error = "Not enough arguments";
3167 return -EINVAL;
3168 }
3169
3170 key_size = get_key_size(&argv[1]);
3171 if (key_size < 0) {
3172 ti->error = "Cannot parse key size";
3173 return -EINVAL;
3174 }
3175
3176 cc = kzalloc(struct_size(cc, key, key_size), GFP_KERNEL);
3177 if (!cc) {
3178 ti->error = "Cannot allocate encryption context";
3179 return -ENOMEM;
3180 }
3181 cc->key_size = key_size;
3182 cc->sector_size = (1 << SECTOR_SHIFT);
3183 cc->sector_shift = 0;
3184
3185 ti->private = cc;
3186
3187 spin_lock(&dm_crypt_clients_lock);
3188 dm_crypt_clients_n++;
3189 crypt_calculate_pages_per_client();
3190 spin_unlock(&dm_crypt_clients_lock);
3191
3192 ret = percpu_counter_init(&cc->n_allocated_pages, 0, GFP_KERNEL);
3193 if (ret < 0)
3194 goto bad;
3195
3196
3197 if (argc > 5) {
3198 ret = crypt_ctr_optional(ti, argc - 5, &argv[5]);
3199 if (ret)
3200 goto bad;
3201 }
3202
3203 ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
3204 if (ret < 0)
3205 goto bad;
3206
3207 if (crypt_integrity_aead(cc)) {
3208 cc->dmreq_start = sizeof(struct aead_request);
3209 cc->dmreq_start += crypto_aead_reqsize(any_tfm_aead(cc));
3210 align_mask = crypto_aead_alignmask(any_tfm_aead(cc));
3211 } else {
3212 cc->dmreq_start = sizeof(struct skcipher_request);
3213 cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc));
3214 align_mask = crypto_skcipher_alignmask(any_tfm(cc));
3215 }
3216 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
3217
3218 if (align_mask < CRYPTO_MINALIGN) {
3219
3220 iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
3221 & align_mask;
3222 } else {
3223
3224
3225
3226
3227
3228 iv_size_padding = align_mask;
3229 }
3230
3231
3232 additional_req_size = sizeof(struct dm_crypt_request) +
3233 iv_size_padding + cc->iv_size +
3234 cc->iv_size +
3235 sizeof(uint64_t) +
3236 sizeof(unsigned int);
3237
3238 ret = mempool_init_kmalloc_pool(&cc->req_pool, MIN_IOS, cc->dmreq_start + additional_req_size);
3239 if (ret) {
3240 ti->error = "Cannot allocate crypt request mempool";
3241 goto bad;
3242 }
3243
3244 cc->per_bio_data_size = ti->per_io_data_size =
3245 ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size,
3246 ARCH_KMALLOC_MINALIGN);
3247
3248 ret = mempool_init(&cc->page_pool, BIO_MAX_VECS, crypt_page_alloc, crypt_page_free, cc);
3249 if (ret) {
3250 ti->error = "Cannot allocate page mempool";
3251 goto bad;
3252 }
3253
3254 ret = bioset_init(&cc->bs, MIN_IOS, 0, BIOSET_NEED_BVECS);
3255 if (ret) {
3256 ti->error = "Cannot allocate crypt bioset";
3257 goto bad;
3258 }
3259
3260 mutex_init(&cc->bio_alloc_lock);
3261
3262 ret = -EINVAL;
3263 if ((sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) ||
3264 (tmpll & ((cc->sector_size >> SECTOR_SHIFT) - 1))) {
3265 ti->error = "Invalid iv_offset sector";
3266 goto bad;
3267 }
3268 cc->iv_offset = tmpll;
3269
3270 ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev);
3271 if (ret) {
3272 ti->error = "Device lookup failed";
3273 goto bad;
3274 }
3275
3276 ret = -EINVAL;
3277 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
3278 ti->error = "Invalid device sector";
3279 goto bad;
3280 }
3281 cc->start = tmpll;
3282
3283 if (bdev_is_zoned(cc->dev->bdev)) {
3284
3285
3286
3287
3288
3289 set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
3290 set_bit(DM_CRYPT_WRITE_INLINE, &cc->flags);
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303 DMDEBUG("Zone append operations will be emulated");
3304 ti->emulate_zone_append = true;
3305 }
3306
3307 if (crypt_integrity_aead(cc) || cc->integrity_iv_size) {
3308 ret = crypt_integrity_ctr(cc, ti);
3309 if (ret)
3310 goto bad;
3311
3312 cc->tag_pool_max_sectors = POOL_ENTRY_SIZE / cc->on_disk_tag_size;
3313 if (!cc->tag_pool_max_sectors)
3314 cc->tag_pool_max_sectors = 1;
3315
3316 ret = mempool_init_kmalloc_pool(&cc->tag_pool, MIN_IOS,
3317 cc->tag_pool_max_sectors * cc->on_disk_tag_size);
3318 if (ret) {
3319 ti->error = "Cannot allocate integrity tags mempool";
3320 goto bad;
3321 }
3322
3323 cc->tag_pool_max_sectors <<= cc->sector_shift;
3324 }
3325
3326 ret = -ENOMEM;
3327 cc->io_queue = alloc_workqueue("kcryptd_io/%s", WQ_MEM_RECLAIM, 1, devname);
3328 if (!cc->io_queue) {
3329 ti->error = "Couldn't create kcryptd io queue";
3330 goto bad;
3331 }
3332
3333 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
3334 cc->crypt_queue = alloc_workqueue("kcryptd/%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
3335 1, devname);
3336 else
3337 cc->crypt_queue = alloc_workqueue("kcryptd/%s",
3338 WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
3339 num_online_cpus(), devname);
3340 if (!cc->crypt_queue) {
3341 ti->error = "Couldn't create kcryptd queue";
3342 goto bad;
3343 }
3344
3345 spin_lock_init(&cc->write_thread_lock);
3346 cc->write_tree = RB_ROOT;
3347
3348 cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write/%s", devname);
3349 if (IS_ERR(cc->write_thread)) {
3350 ret = PTR_ERR(cc->write_thread);
3351 cc->write_thread = NULL;
3352 ti->error = "Couldn't spawn write thread";
3353 goto bad;
3354 }
3355 wake_up_process(cc->write_thread);
3356
3357 ti->num_flush_bios = 1;
3358 ti->limit_swap_bios = true;
3359
3360 return 0;
3361
3362bad:
3363 crypt_dtr(ti);
3364 return ret;
3365}
3366
3367static int crypt_map(struct dm_target *ti, struct bio *bio)
3368{
3369 struct dm_crypt_io *io;
3370 struct crypt_config *cc = ti->private;
3371
3372
3373
3374
3375
3376
3377 if (unlikely(bio->bi_opf & REQ_PREFLUSH ||
3378 bio_op(bio) == REQ_OP_DISCARD)) {
3379 bio_set_dev(bio, cc->dev->bdev);
3380 if (bio_sectors(bio))
3381 bio->bi_iter.bi_sector = cc->start +
3382 dm_target_offset(ti, bio->bi_iter.bi_sector);
3383 return DM_MAPIO_REMAPPED;
3384 }
3385
3386
3387
3388
3389 if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_VECS << PAGE_SHIFT)) &&
3390 (bio_data_dir(bio) == WRITE || cc->on_disk_tag_size))
3391 dm_accept_partial_bio(bio, ((BIO_MAX_VECS << PAGE_SHIFT) >> SECTOR_SHIFT));
3392
3393
3394
3395
3396
3397 if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0))
3398 return DM_MAPIO_KILL;
3399
3400 if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1)))
3401 return DM_MAPIO_KILL;
3402
3403 io = dm_per_bio_data(bio, cc->per_bio_data_size);
3404 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
3405
3406 if (cc->on_disk_tag_size) {
3407 unsigned tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift);
3408
3409 if (unlikely(tag_len > KMALLOC_MAX_SIZE) ||
3410 unlikely(!(io->integrity_metadata = kmalloc(tag_len,
3411 GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
3412 if (bio_sectors(bio) > cc->tag_pool_max_sectors)
3413 dm_accept_partial_bio(bio, cc->tag_pool_max_sectors);
3414 io->integrity_metadata = mempool_alloc(&cc->tag_pool, GFP_NOIO);
3415 io->integrity_metadata_from_pool = true;
3416 }
3417 }
3418
3419 if (crypt_integrity_aead(cc))
3420 io->ctx.r.req_aead = (struct aead_request *)(io + 1);
3421 else
3422 io->ctx.r.req = (struct skcipher_request *)(io + 1);
3423
3424 if (bio_data_dir(io->base_bio) == READ) {
3425 if (kcryptd_io_read(io, GFP_NOWAIT))
3426 kcryptd_queue_read(io);
3427 } else
3428 kcryptd_queue_crypt(io);
3429
3430 return DM_MAPIO_SUBMITTED;
3431}
3432
3433static void crypt_status(struct dm_target *ti, status_type_t type,
3434 unsigned status_flags, char *result, unsigned maxlen)
3435{
3436 struct crypt_config *cc = ti->private;
3437 unsigned i, sz = 0;
3438 int num_feature_args = 0;
3439
3440 switch (type) {
3441 case STATUSTYPE_INFO:
3442 result[0] = '\0';
3443 break;
3444
3445 case STATUSTYPE_TABLE:
3446 DMEMIT("%s ", cc->cipher_string);
3447
3448 if (cc->key_size > 0) {
3449 if (cc->key_string)
3450 DMEMIT(":%u:%s", cc->key_size, cc->key_string);
3451 else
3452 for (i = 0; i < cc->key_size; i++)
3453 DMEMIT("%02x", cc->key[i]);
3454 } else
3455 DMEMIT("-");
3456
3457 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
3458 cc->dev->name, (unsigned long long)cc->start);
3459
3460 num_feature_args += !!ti->num_discard_bios;
3461 num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
3462 num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
3463 num_feature_args += test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags);
3464 num_feature_args += test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
3465 num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT);
3466 num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
3467 if (cc->on_disk_tag_size)
3468 num_feature_args++;
3469 if (num_feature_args) {
3470 DMEMIT(" %d", num_feature_args);
3471 if (ti->num_discard_bios)
3472 DMEMIT(" allow_discards");
3473 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
3474 DMEMIT(" same_cpu_crypt");
3475 if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
3476 DMEMIT(" submit_from_crypt_cpus");
3477 if (test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags))
3478 DMEMIT(" no_read_workqueue");
3479 if (test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))
3480 DMEMIT(" no_write_workqueue");
3481 if (cc->on_disk_tag_size)
3482 DMEMIT(" integrity:%u:%s", cc->on_disk_tag_size, cc->cipher_auth);
3483 if (cc->sector_size != (1 << SECTOR_SHIFT))
3484 DMEMIT(" sector_size:%d", cc->sector_size);
3485 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
3486 DMEMIT(" iv_large_sectors");
3487 }
3488
3489 break;
3490 }
3491}
3492
3493static void crypt_postsuspend(struct dm_target *ti)
3494{
3495 struct crypt_config *cc = ti->private;
3496
3497 set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
3498}
3499
3500static int crypt_preresume(struct dm_target *ti)
3501{
3502 struct crypt_config *cc = ti->private;
3503
3504 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
3505 DMERR("aborting resume - crypt key is not set.");
3506 return -EAGAIN;
3507 }
3508
3509 return 0;
3510}
3511
3512static void crypt_resume(struct dm_target *ti)
3513{
3514 struct crypt_config *cc = ti->private;
3515
3516 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
3517}
3518
3519
3520
3521
3522
3523static int crypt_message(struct dm_target *ti, unsigned argc, char **argv,
3524 char *result, unsigned maxlen)
3525{
3526 struct crypt_config *cc = ti->private;
3527 int key_size, ret = -EINVAL;
3528
3529 if (argc < 2)
3530 goto error;
3531
3532 if (!strcasecmp(argv[0], "key")) {
3533 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
3534 DMWARN("not suspended during key manipulation.");
3535 return -EINVAL;
3536 }
3537 if (argc == 3 && !strcasecmp(argv[1], "set")) {
3538
3539 key_size = get_key_size(&argv[2]);
3540 if (key_size < 0 || cc->key_size != key_size) {
3541 memset(argv[2], '0', strlen(argv[2]));
3542 return -EINVAL;
3543 }
3544
3545 ret = crypt_set_key(cc, argv[2]);
3546 if (ret)
3547 return ret;
3548 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
3549 ret = cc->iv_gen_ops->init(cc);
3550
3551 if (cc->key_string)
3552 memset(cc->key, 0, cc->key_size * sizeof(u8));
3553 return ret;
3554 }
3555 if (argc == 2 && !strcasecmp(argv[1], "wipe"))
3556 return crypt_wipe_key(cc);
3557 }
3558
3559error:
3560 DMWARN("unrecognised message received.");
3561 return -EINVAL;
3562}
3563
3564static int crypt_iterate_devices(struct dm_target *ti,
3565 iterate_devices_callout_fn fn, void *data)
3566{
3567 struct crypt_config *cc = ti->private;
3568
3569 return fn(ti, cc->dev, cc->start, ti->len, data);
3570}
3571
3572static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
3573{
3574 struct crypt_config *cc = ti->private;
3575
3576
3577
3578
3579
3580
3581
3582 limits->max_segment_size = PAGE_SIZE;
3583
3584 limits->logical_block_size =
3585 max_t(unsigned, limits->logical_block_size, cc->sector_size);
3586 limits->physical_block_size =
3587 max_t(unsigned, limits->physical_block_size, cc->sector_size);
3588 limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size);
3589}
3590
3591static struct target_type crypt_target = {
3592 .name = "crypt",
3593 .version = {1, 23, 0},
3594 .module = THIS_MODULE,
3595 .ctr = crypt_ctr,
3596 .dtr = crypt_dtr,
3597 .features = DM_TARGET_ZONED_HM,
3598 .report_zones = crypt_report_zones,
3599 .map = crypt_map,
3600 .status = crypt_status,
3601 .postsuspend = crypt_postsuspend,
3602 .preresume = crypt_preresume,
3603 .resume = crypt_resume,
3604 .message = crypt_message,
3605 .iterate_devices = crypt_iterate_devices,
3606 .io_hints = crypt_io_hints,
3607};
3608
3609static int __init dm_crypt_init(void)
3610{
3611 int r;
3612
3613 r = dm_register_target(&crypt_target);
3614 if (r < 0)
3615 DMERR("register failed %d", r);
3616
3617 return r;
3618}
3619
3620static void __exit dm_crypt_exit(void)
3621{
3622 dm_unregister_target(&crypt_target);
3623}
3624
3625module_init(dm_crypt_init);
3626module_exit(dm_crypt_exit);
3627
3628MODULE_AUTHOR("Jana Saout <jana@saout.de>");
3629MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
3630MODULE_LICENSE("GPL");
3631