1
2
3
4
5
6
7
8
9
10#include <linux/completion.h>
11#include <linux/err.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/key.h>
16#include <linux/bio.h>
17#include <linux/blkdev.h>
18#include <linux/mempool.h>
19#include <linux/slab.h>
20#include <linux/crypto.h>
21#include <linux/workqueue.h>
22#include <linux/kthread.h>
23#include <linux/backing-dev.h>
24#include <linux/atomic.h>
25#include <linux/scatterlist.h>
26#include <linux/rbtree.h>
27#include <linux/ctype.h>
28#include <asm/page.h>
29#include <asm/unaligned.h>
30#include <crypto/hash.h>
31#include <crypto/md5.h>
32#include <crypto/algapi.h>
33#include <crypto/skcipher.h>
34#include <crypto/aead.h>
35#include <crypto/authenc.h>
36#include <linux/rtnetlink.h>
37#include <linux/key-type.h>
38#include <keys/user-type.h>
39#include <keys/encrypted-type.h>
40
41#include <linux/device-mapper.h>
42
43#define DM_MSG_PREFIX "crypt"
44
45
46
47
48struct convert_context {
49 struct completion restart;
50 struct bio *bio_in;
51 struct bio *bio_out;
52 struct bvec_iter iter_in;
53 struct bvec_iter iter_out;
54 u64 cc_sector;
55 atomic_t cc_pending;
56 union {
57 struct skcipher_request *req;
58 struct aead_request *req_aead;
59 } r;
60
61};
62
63
64
65
66struct dm_crypt_io {
67 struct crypt_config *cc;
68 struct bio *base_bio;
69 u8 *integrity_metadata;
70 bool integrity_metadata_from_pool;
71 struct work_struct work;
72 struct tasklet_struct tasklet;
73
74 struct convert_context ctx;
75
76 atomic_t io_pending;
77 blk_status_t error;
78 sector_t sector;
79
80 struct rb_node rb_node;
81} CRYPTO_MINALIGN_ATTR;
82
83struct dm_crypt_request {
84 struct convert_context *ctx;
85 struct scatterlist sg_in[4];
86 struct scatterlist sg_out[4];
87 u64 iv_sector;
88};
89
90struct crypt_config;
91
92struct crypt_iv_operations {
93 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
94 const char *opts);
95 void (*dtr)(struct crypt_config *cc);
96 int (*init)(struct crypt_config *cc);
97 int (*wipe)(struct crypt_config *cc);
98 int (*generator)(struct crypt_config *cc, u8 *iv,
99 struct dm_crypt_request *dmreq);
100 int (*post)(struct crypt_config *cc, u8 *iv,
101 struct dm_crypt_request *dmreq);
102};
103
104struct iv_benbi_private {
105 int shift;
106};
107
108#define LMK_SEED_SIZE 64
109struct iv_lmk_private {
110 struct crypto_shash *hash_tfm;
111 u8 *seed;
112};
113
114#define TCW_WHITENING_SIZE 16
115struct iv_tcw_private {
116 struct crypto_shash *crc32_tfm;
117 u8 *iv_seed;
118 u8 *whitening;
119};
120
121#define ELEPHANT_MAX_KEY_SIZE 32
122struct iv_elephant_private {
123 struct crypto_skcipher *tfm;
124};
125
126
127
128
129
130enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
131 DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
132 DM_CRYPT_NO_READ_WORKQUEUE, DM_CRYPT_NO_WRITE_WORKQUEUE,
133 DM_CRYPT_WRITE_INLINE };
134
135enum cipher_flags {
136 CRYPT_MODE_INTEGRITY_AEAD,
137 CRYPT_IV_LARGE_SECTORS,
138 CRYPT_ENCRYPT_PREPROCESS,
139};
140
141
142
143
144struct crypt_config {
145 struct dm_dev *dev;
146 sector_t start;
147
148 struct percpu_counter n_allocated_pages;
149
150 struct workqueue_struct *io_queue;
151 struct workqueue_struct *crypt_queue;
152
153 spinlock_t write_thread_lock;
154 struct task_struct *write_thread;
155 struct rb_root write_tree;
156
157 char *cipher_string;
158 char *cipher_auth;
159 char *key_string;
160
161 const struct crypt_iv_operations *iv_gen_ops;
162 union {
163 struct iv_benbi_private benbi;
164 struct iv_lmk_private lmk;
165 struct iv_tcw_private tcw;
166 struct iv_elephant_private elephant;
167 } iv_gen_private;
168 u64 iv_offset;
169 unsigned int iv_size;
170 unsigned short int sector_size;
171 unsigned char sector_shift;
172
173 union {
174 struct crypto_skcipher **tfms;
175 struct crypto_aead **tfms_aead;
176 } cipher_tfm;
177 unsigned tfms_count;
178 unsigned long cipher_flags;
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193 unsigned int dmreq_start;
194
195 unsigned int per_bio_data_size;
196
197 unsigned long flags;
198 unsigned int key_size;
199 unsigned int key_parts;
200 unsigned int key_extra_size;
201 unsigned int key_mac_size;
202
203 unsigned int integrity_tag_size;
204 unsigned int integrity_iv_size;
205 unsigned int on_disk_tag_size;
206
207
208
209
210
211 unsigned tag_pool_max_sectors;
212 mempool_t tag_pool;
213 mempool_t req_pool;
214 mempool_t page_pool;
215
216 struct bio_set bs;
217 struct mutex bio_alloc_lock;
218
219 u8 *authenc_key;
220 u8 key[];
221};
222
223#define MIN_IOS 64
224#define MAX_TAG_SIZE 480
225#define POOL_ENTRY_SIZE 512
226
227static DEFINE_SPINLOCK(dm_crypt_clients_lock);
228static unsigned dm_crypt_clients_n = 0;
229static volatile unsigned long dm_crypt_pages_per_client;
230#define DM_CRYPT_MEMORY_PERCENT 2
231#define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_PAGES * 16)
232
233static void clone_init(struct dm_crypt_io *, struct bio *);
234static void kcryptd_queue_crypt(struct dm_crypt_io *io);
235static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
236 struct scatterlist *sg);
237
238static bool crypt_integrity_aead(struct crypt_config *cc);
239
240
241
242
243static struct crypto_skcipher *any_tfm(struct crypt_config *cc)
244{
245 return cc->cipher_tfm.tfms[0];
246}
247
248static struct crypto_aead *any_tfm_aead(struct crypt_config *cc)
249{
250 return cc->cipher_tfm.tfms_aead[0];
251}
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
310 struct dm_crypt_request *dmreq)
311{
312 memset(iv, 0, cc->iv_size);
313 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
314
315 return 0;
316}
317
318static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
319 struct dm_crypt_request *dmreq)
320{
321 memset(iv, 0, cc->iv_size);
322 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
323
324 return 0;
325}
326
327static int crypt_iv_plain64be_gen(struct crypt_config *cc, u8 *iv,
328 struct dm_crypt_request *dmreq)
329{
330 memset(iv, 0, cc->iv_size);
331
332 *(__be64 *)&iv[cc->iv_size - sizeof(u64)] = cpu_to_be64(dmreq->iv_sector);
333
334 return 0;
335}
336
337static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
338 struct dm_crypt_request *dmreq)
339{
340
341
342
343
344 memset(iv, 0, cc->iv_size);
345 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
346
347 return 0;
348}
349
350static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
351 const char *opts)
352{
353 unsigned bs;
354 int log;
355
356 if (crypt_integrity_aead(cc))
357 bs = crypto_aead_blocksize(any_tfm_aead(cc));
358 else
359 bs = crypto_skcipher_blocksize(any_tfm(cc));
360 log = ilog2(bs);
361
362
363
364
365 if (1 << log != bs) {
366 ti->error = "cypher blocksize is not a power of 2";
367 return -EINVAL;
368 }
369
370 if (log > 9) {
371 ti->error = "cypher blocksize is > 512";
372 return -EINVAL;
373 }
374
375 cc->iv_gen_private.benbi.shift = 9 - log;
376
377 return 0;
378}
379
380static void crypt_iv_benbi_dtr(struct crypt_config *cc)
381{
382}
383
384static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
385 struct dm_crypt_request *dmreq)
386{
387 __be64 val;
388
389 memset(iv, 0, cc->iv_size - sizeof(u64));
390
391 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
392 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
393
394 return 0;
395}
396
397static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
398 struct dm_crypt_request *dmreq)
399{
400 memset(iv, 0, cc->iv_size);
401
402 return 0;
403}
404
405static void crypt_iv_lmk_dtr(struct crypt_config *cc)
406{
407 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
408
409 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
410 crypto_free_shash(lmk->hash_tfm);
411 lmk->hash_tfm = NULL;
412
413 kfree_sensitive(lmk->seed);
414 lmk->seed = NULL;
415}
416
417static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
418 const char *opts)
419{
420 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
421
422 if (cc->sector_size != (1 << SECTOR_SHIFT)) {
423 ti->error = "Unsupported sector size for LMK";
424 return -EINVAL;
425 }
426
427 lmk->hash_tfm = crypto_alloc_shash("md5", 0,
428 CRYPTO_ALG_ALLOCATES_MEMORY);
429 if (IS_ERR(lmk->hash_tfm)) {
430 ti->error = "Error initializing LMK hash";
431 return PTR_ERR(lmk->hash_tfm);
432 }
433
434
435 if (cc->key_parts == cc->tfms_count) {
436 lmk->seed = NULL;
437 return 0;
438 }
439
440 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
441 if (!lmk->seed) {
442 crypt_iv_lmk_dtr(cc);
443 ti->error = "Error kmallocing seed storage in LMK";
444 return -ENOMEM;
445 }
446
447 return 0;
448}
449
450static int crypt_iv_lmk_init(struct crypt_config *cc)
451{
452 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
453 int subkey_size = cc->key_size / cc->key_parts;
454
455
456 if (lmk->seed)
457 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
458 crypto_shash_digestsize(lmk->hash_tfm));
459
460 return 0;
461}
462
463static int crypt_iv_lmk_wipe(struct crypt_config *cc)
464{
465 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
466
467 if (lmk->seed)
468 memset(lmk->seed, 0, LMK_SEED_SIZE);
469
470 return 0;
471}
472
473static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
474 struct dm_crypt_request *dmreq,
475 u8 *data)
476{
477 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
478 SHASH_DESC_ON_STACK(desc, lmk->hash_tfm);
479 struct md5_state md5state;
480 __le32 buf[4];
481 int i, r;
482
483 desc->tfm = lmk->hash_tfm;
484
485 r = crypto_shash_init(desc);
486 if (r)
487 return r;
488
489 if (lmk->seed) {
490 r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE);
491 if (r)
492 return r;
493 }
494
495
496 r = crypto_shash_update(desc, data + 16, 16 * 31);
497 if (r)
498 return r;
499
500
501 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
502 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
503 buf[2] = cpu_to_le32(4024);
504 buf[3] = 0;
505 r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf));
506 if (r)
507 return r;
508
509
510 r = crypto_shash_export(desc, &md5state);
511 if (r)
512 return r;
513
514 for (i = 0; i < MD5_HASH_WORDS; i++)
515 __cpu_to_le32s(&md5state.hash[i]);
516 memcpy(iv, &md5state.hash, cc->iv_size);
517
518 return 0;
519}
520
521static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
522 struct dm_crypt_request *dmreq)
523{
524 struct scatterlist *sg;
525 u8 *src;
526 int r = 0;
527
528 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
529 sg = crypt_get_sg_data(cc, dmreq->sg_in);
530 src = kmap_atomic(sg_page(sg));
531 r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset);
532 kunmap_atomic(src);
533 } else
534 memset(iv, 0, cc->iv_size);
535
536 return r;
537}
538
539static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
540 struct dm_crypt_request *dmreq)
541{
542 struct scatterlist *sg;
543 u8 *dst;
544 int r;
545
546 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
547 return 0;
548
549 sg = crypt_get_sg_data(cc, dmreq->sg_out);
550 dst = kmap_atomic(sg_page(sg));
551 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset);
552
553
554 if (!r)
555 crypto_xor(dst + sg->offset, iv, cc->iv_size);
556
557 kunmap_atomic(dst);
558 return r;
559}
560
561static void crypt_iv_tcw_dtr(struct crypt_config *cc)
562{
563 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
564
565 kfree_sensitive(tcw->iv_seed);
566 tcw->iv_seed = NULL;
567 kfree_sensitive(tcw->whitening);
568 tcw->whitening = NULL;
569
570 if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
571 crypto_free_shash(tcw->crc32_tfm);
572 tcw->crc32_tfm = NULL;
573}
574
575static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
576 const char *opts)
577{
578 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
579
580 if (cc->sector_size != (1 << SECTOR_SHIFT)) {
581 ti->error = "Unsupported sector size for TCW";
582 return -EINVAL;
583 }
584
585 if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
586 ti->error = "Wrong key size for TCW";
587 return -EINVAL;
588 }
589
590 tcw->crc32_tfm = crypto_alloc_shash("crc32", 0,
591 CRYPTO_ALG_ALLOCATES_MEMORY);
592 if (IS_ERR(tcw->crc32_tfm)) {
593 ti->error = "Error initializing CRC32 in TCW";
594 return PTR_ERR(tcw->crc32_tfm);
595 }
596
597 tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
598 tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
599 if (!tcw->iv_seed || !tcw->whitening) {
600 crypt_iv_tcw_dtr(cc);
601 ti->error = "Error allocating seed storage in TCW";
602 return -ENOMEM;
603 }
604
605 return 0;
606}
607
608static int crypt_iv_tcw_init(struct crypt_config *cc)
609{
610 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
611 int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
612
613 memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
614 memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
615 TCW_WHITENING_SIZE);
616
617 return 0;
618}
619
620static int crypt_iv_tcw_wipe(struct crypt_config *cc)
621{
622 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
623
624 memset(tcw->iv_seed, 0, cc->iv_size);
625 memset(tcw->whitening, 0, TCW_WHITENING_SIZE);
626
627 return 0;
628}
629
630static int crypt_iv_tcw_whitening(struct crypt_config *cc,
631 struct dm_crypt_request *dmreq,
632 u8 *data)
633{
634 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
635 __le64 sector = cpu_to_le64(dmreq->iv_sector);
636 u8 buf[TCW_WHITENING_SIZE];
637 SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm);
638 int i, r;
639
640
641 crypto_xor_cpy(buf, tcw->whitening, (u8 *)§or, 8);
642 crypto_xor_cpy(&buf[8], tcw->whitening + 8, (u8 *)§or, 8);
643
644
645 desc->tfm = tcw->crc32_tfm;
646 for (i = 0; i < 4; i++) {
647 r = crypto_shash_init(desc);
648 if (r)
649 goto out;
650 r = crypto_shash_update(desc, &buf[i * 4], 4);
651 if (r)
652 goto out;
653 r = crypto_shash_final(desc, &buf[i * 4]);
654 if (r)
655 goto out;
656 }
657 crypto_xor(&buf[0], &buf[12], 4);
658 crypto_xor(&buf[4], &buf[8], 4);
659
660
661 for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
662 crypto_xor(data + i * 8, buf, 8);
663out:
664 memzero_explicit(buf, sizeof(buf));
665 return r;
666}
667
668static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
669 struct dm_crypt_request *dmreq)
670{
671 struct scatterlist *sg;
672 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
673 __le64 sector = cpu_to_le64(dmreq->iv_sector);
674 u8 *src;
675 int r = 0;
676
677
678 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
679 sg = crypt_get_sg_data(cc, dmreq->sg_in);
680 src = kmap_atomic(sg_page(sg));
681 r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset);
682 kunmap_atomic(src);
683 }
684
685
686 crypto_xor_cpy(iv, tcw->iv_seed, (u8 *)§or, 8);
687 if (cc->iv_size > 8)
688 crypto_xor_cpy(&iv[8], tcw->iv_seed + 8, (u8 *)§or,
689 cc->iv_size - 8);
690
691 return r;
692}
693
694static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
695 struct dm_crypt_request *dmreq)
696{
697 struct scatterlist *sg;
698 u8 *dst;
699 int r;
700
701 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
702 return 0;
703
704
705 sg = crypt_get_sg_data(cc, dmreq->sg_out);
706 dst = kmap_atomic(sg_page(sg));
707 r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset);
708 kunmap_atomic(dst);
709
710 return r;
711}
712
713static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv,
714 struct dm_crypt_request *dmreq)
715{
716
717 get_random_bytes(iv, cc->iv_size);
718 return 0;
719}
720
721static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti,
722 const char *opts)
723{
724 if (crypt_integrity_aead(cc)) {
725 ti->error = "AEAD transforms not supported for EBOIV";
726 return -EINVAL;
727 }
728
729 if (crypto_skcipher_blocksize(any_tfm(cc)) != cc->iv_size) {
730 ti->error = "Block size of EBOIV cipher does "
731 "not match IV size of block cipher";
732 return -EINVAL;
733 }
734
735 return 0;
736}
737
738static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
739 struct dm_crypt_request *dmreq)
740{
741 u8 buf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(__le64));
742 struct skcipher_request *req;
743 struct scatterlist src, dst;
744 DECLARE_CRYPTO_WAIT(wait);
745 int err;
746
747 req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO);
748 if (!req)
749 return -ENOMEM;
750
751 memset(buf, 0, cc->iv_size);
752 *(__le64 *)buf = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
753
754 sg_init_one(&src, page_address(ZERO_PAGE(0)), cc->iv_size);
755 sg_init_one(&dst, iv, cc->iv_size);
756 skcipher_request_set_crypt(req, &src, &dst, cc->iv_size, buf);
757 skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
758 err = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
759 skcipher_request_free(req);
760
761 return err;
762}
763
764static void crypt_iv_elephant_dtr(struct crypt_config *cc)
765{
766 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
767
768 crypto_free_skcipher(elephant->tfm);
769 elephant->tfm = NULL;
770}
771
772static int crypt_iv_elephant_ctr(struct crypt_config *cc, struct dm_target *ti,
773 const char *opts)
774{
775 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
776 int r;
777
778 elephant->tfm = crypto_alloc_skcipher("ecb(aes)", 0,
779 CRYPTO_ALG_ALLOCATES_MEMORY);
780 if (IS_ERR(elephant->tfm)) {
781 r = PTR_ERR(elephant->tfm);
782 elephant->tfm = NULL;
783 return r;
784 }
785
786 r = crypt_iv_eboiv_ctr(cc, ti, NULL);
787 if (r)
788 crypt_iv_elephant_dtr(cc);
789 return r;
790}
791
792static void diffuser_disk_to_cpu(u32 *d, size_t n)
793{
794#ifndef __LITTLE_ENDIAN
795 int i;
796
797 for (i = 0; i < n; i++)
798 d[i] = le32_to_cpu((__le32)d[i]);
799#endif
800}
801
802static void diffuser_cpu_to_disk(__le32 *d, size_t n)
803{
804#ifndef __LITTLE_ENDIAN
805 int i;
806
807 for (i = 0; i < n; i++)
808 d[i] = cpu_to_le32((u32)d[i]);
809#endif
810}
811
812static void diffuser_a_decrypt(u32 *d, size_t n)
813{
814 int i, i1, i2, i3;
815
816 for (i = 0; i < 5; i++) {
817 i1 = 0;
818 i2 = n - 2;
819 i3 = n - 5;
820
821 while (i1 < (n - 1)) {
822 d[i1] += d[i2] ^ (d[i3] << 9 | d[i3] >> 23);
823 i1++; i2++; i3++;
824
825 if (i3 >= n)
826 i3 -= n;
827
828 d[i1] += d[i2] ^ d[i3];
829 i1++; i2++; i3++;
830
831 if (i2 >= n)
832 i2 -= n;
833
834 d[i1] += d[i2] ^ (d[i3] << 13 | d[i3] >> 19);
835 i1++; i2++; i3++;
836
837 d[i1] += d[i2] ^ d[i3];
838 i1++; i2++; i3++;
839 }
840 }
841}
842
843static void diffuser_a_encrypt(u32 *d, size_t n)
844{
845 int i, i1, i2, i3;
846
847 for (i = 0; i < 5; i++) {
848 i1 = n - 1;
849 i2 = n - 2 - 1;
850 i3 = n - 5 - 1;
851
852 while (i1 > 0) {
853 d[i1] -= d[i2] ^ d[i3];
854 i1--; i2--; i3--;
855
856 d[i1] -= d[i2] ^ (d[i3] << 13 | d[i3] >> 19);
857 i1--; i2--; i3--;
858
859 if (i2 < 0)
860 i2 += n;
861
862 d[i1] -= d[i2] ^ d[i3];
863 i1--; i2--; i3--;
864
865 if (i3 < 0)
866 i3 += n;
867
868 d[i1] -= d[i2] ^ (d[i3] << 9 | d[i3] >> 23);
869 i1--; i2--; i3--;
870 }
871 }
872}
873
874static void diffuser_b_decrypt(u32 *d, size_t n)
875{
876 int i, i1, i2, i3;
877
878 for (i = 0; i < 3; i++) {
879 i1 = 0;
880 i2 = 2;
881 i3 = 5;
882
883 while (i1 < (n - 1)) {
884 d[i1] += d[i2] ^ d[i3];
885 i1++; i2++; i3++;
886
887 d[i1] += d[i2] ^ (d[i3] << 10 | d[i3] >> 22);
888 i1++; i2++; i3++;
889
890 if (i2 >= n)
891 i2 -= n;
892
893 d[i1] += d[i2] ^ d[i3];
894 i1++; i2++; i3++;
895
896 if (i3 >= n)
897 i3 -= n;
898
899 d[i1] += d[i2] ^ (d[i3] << 25 | d[i3] >> 7);
900 i1++; i2++; i3++;
901 }
902 }
903}
904
905static void diffuser_b_encrypt(u32 *d, size_t n)
906{
907 int i, i1, i2, i3;
908
909 for (i = 0; i < 3; i++) {
910 i1 = n - 1;
911 i2 = 2 - 1;
912 i3 = 5 - 1;
913
914 while (i1 > 0) {
915 d[i1] -= d[i2] ^ (d[i3] << 25 | d[i3] >> 7);
916 i1--; i2--; i3--;
917
918 if (i3 < 0)
919 i3 += n;
920
921 d[i1] -= d[i2] ^ d[i3];
922 i1--; i2--; i3--;
923
924 if (i2 < 0)
925 i2 += n;
926
927 d[i1] -= d[i2] ^ (d[i3] << 10 | d[i3] >> 22);
928 i1--; i2--; i3--;
929
930 d[i1] -= d[i2] ^ d[i3];
931 i1--; i2--; i3--;
932 }
933 }
934}
935
936static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *dmreq)
937{
938 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
939 u8 *es, *ks, *data, *data2, *data_offset;
940 struct skcipher_request *req;
941 struct scatterlist *sg, *sg2, src, dst;
942 DECLARE_CRYPTO_WAIT(wait);
943 int i, r;
944
945 req = skcipher_request_alloc(elephant->tfm, GFP_NOIO);
946 es = kzalloc(16, GFP_NOIO);
947 ks = kzalloc(32, GFP_NOIO);
948
949 if (!req || !es || !ks) {
950 r = -ENOMEM;
951 goto out;
952 }
953
954 *(__le64 *)es = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
955
956
957 sg_init_one(&src, es, 16);
958 sg_init_one(&dst, ks, 16);
959 skcipher_request_set_crypt(req, &src, &dst, 16, NULL);
960 skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
961 r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
962 if (r)
963 goto out;
964
965
966 es[15] = 0x80;
967 sg_init_one(&dst, &ks[16], 16);
968 r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
969 if (r)
970 goto out;
971
972 sg = crypt_get_sg_data(cc, dmreq->sg_out);
973 data = kmap_atomic(sg_page(sg));
974 data_offset = data + sg->offset;
975
976
977 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
978 sg2 = crypt_get_sg_data(cc, dmreq->sg_in);
979 data2 = kmap_atomic(sg_page(sg2));
980 memcpy(data_offset, data2 + sg2->offset, cc->sector_size);
981 kunmap_atomic(data2);
982 }
983
984 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
985 diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32));
986 diffuser_b_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
987 diffuser_a_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
988 diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32));
989 }
990
991 for (i = 0; i < (cc->sector_size / 32); i++)
992 crypto_xor(data_offset + i * 32, ks, 32);
993
994 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
995 diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32));
996 diffuser_a_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
997 diffuser_b_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
998 diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32));
999 }
1000
1001 kunmap_atomic(data);
1002out:
1003 kfree_sensitive(ks);
1004 kfree_sensitive(es);
1005 skcipher_request_free(req);
1006 return r;
1007}
1008
1009static int crypt_iv_elephant_gen(struct crypt_config *cc, u8 *iv,
1010 struct dm_crypt_request *dmreq)
1011{
1012 int r;
1013
1014 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
1015 r = crypt_iv_elephant(cc, dmreq);
1016 if (r)
1017 return r;
1018 }
1019
1020 return crypt_iv_eboiv_gen(cc, iv, dmreq);
1021}
1022
1023static int crypt_iv_elephant_post(struct crypt_config *cc, u8 *iv,
1024 struct dm_crypt_request *dmreq)
1025{
1026 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
1027 return crypt_iv_elephant(cc, dmreq);
1028
1029 return 0;
1030}
1031
1032static int crypt_iv_elephant_init(struct crypt_config *cc)
1033{
1034 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
1035 int key_offset = cc->key_size - cc->key_extra_size;
1036
1037 return crypto_skcipher_setkey(elephant->tfm, &cc->key[key_offset], cc->key_extra_size);
1038}
1039
1040static int crypt_iv_elephant_wipe(struct crypt_config *cc)
1041{
1042 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
1043 u8 key[ELEPHANT_MAX_KEY_SIZE];
1044
1045 memset(key, 0, cc->key_extra_size);
1046 return crypto_skcipher_setkey(elephant->tfm, key, cc->key_extra_size);
1047}
1048
1049static const struct crypt_iv_operations crypt_iv_plain_ops = {
1050 .generator = crypt_iv_plain_gen
1051};
1052
1053static const struct crypt_iv_operations crypt_iv_plain64_ops = {
1054 .generator = crypt_iv_plain64_gen
1055};
1056
1057static const struct crypt_iv_operations crypt_iv_plain64be_ops = {
1058 .generator = crypt_iv_plain64be_gen
1059};
1060
1061static const struct crypt_iv_operations crypt_iv_essiv_ops = {
1062 .generator = crypt_iv_essiv_gen
1063};
1064
1065static const struct crypt_iv_operations crypt_iv_benbi_ops = {
1066 .ctr = crypt_iv_benbi_ctr,
1067 .dtr = crypt_iv_benbi_dtr,
1068 .generator = crypt_iv_benbi_gen
1069};
1070
1071static const struct crypt_iv_operations crypt_iv_null_ops = {
1072 .generator = crypt_iv_null_gen
1073};
1074
1075static const struct crypt_iv_operations crypt_iv_lmk_ops = {
1076 .ctr = crypt_iv_lmk_ctr,
1077 .dtr = crypt_iv_lmk_dtr,
1078 .init = crypt_iv_lmk_init,
1079 .wipe = crypt_iv_lmk_wipe,
1080 .generator = crypt_iv_lmk_gen,
1081 .post = crypt_iv_lmk_post
1082};
1083
1084static const struct crypt_iv_operations crypt_iv_tcw_ops = {
1085 .ctr = crypt_iv_tcw_ctr,
1086 .dtr = crypt_iv_tcw_dtr,
1087 .init = crypt_iv_tcw_init,
1088 .wipe = crypt_iv_tcw_wipe,
1089 .generator = crypt_iv_tcw_gen,
1090 .post = crypt_iv_tcw_post
1091};
1092
1093static struct crypt_iv_operations crypt_iv_random_ops = {
1094 .generator = crypt_iv_random_gen
1095};
1096
1097static struct crypt_iv_operations crypt_iv_eboiv_ops = {
1098 .ctr = crypt_iv_eboiv_ctr,
1099 .generator = crypt_iv_eboiv_gen
1100};
1101
1102static struct crypt_iv_operations crypt_iv_elephant_ops = {
1103 .ctr = crypt_iv_elephant_ctr,
1104 .dtr = crypt_iv_elephant_dtr,
1105 .init = crypt_iv_elephant_init,
1106 .wipe = crypt_iv_elephant_wipe,
1107 .generator = crypt_iv_elephant_gen,
1108 .post = crypt_iv_elephant_post
1109};
1110
1111
1112
1113
1114static bool crypt_integrity_aead(struct crypt_config *cc)
1115{
1116 return test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
1117}
1118
1119static bool crypt_integrity_hmac(struct crypt_config *cc)
1120{
1121 return crypt_integrity_aead(cc) && cc->key_mac_size;
1122}
1123
1124
1125static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
1126 struct scatterlist *sg)
1127{
1128 if (unlikely(crypt_integrity_aead(cc)))
1129 return &sg[2];
1130
1131 return sg;
1132}
1133
1134static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
1135{
1136 struct bio_integrity_payload *bip;
1137 unsigned int tag_len;
1138 int ret;
1139
1140 if (!bio_sectors(bio) || !io->cc->on_disk_tag_size)
1141 return 0;
1142
1143 bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
1144 if (IS_ERR(bip))
1145 return PTR_ERR(bip);
1146
1147 tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift);
1148
1149 bip->bip_iter.bi_size = tag_len;
1150 bip->bip_iter.bi_sector = io->cc->start + io->sector;
1151
1152 ret = bio_integrity_add_page(bio, virt_to_page(io->integrity_metadata),
1153 tag_len, offset_in_page(io->integrity_metadata));
1154 if (unlikely(ret != tag_len))
1155 return -ENOMEM;
1156
1157 return 0;
1158}
1159
1160static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
1161{
1162#ifdef CONFIG_BLK_DEV_INTEGRITY
1163 struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk);
1164 struct mapped_device *md = dm_table_get_md(ti->table);
1165
1166
1167 if (!bi || strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) {
1168 ti->error = "Integrity profile not supported.";
1169 return -EINVAL;
1170 }
1171
1172 if (bi->tag_size != cc->on_disk_tag_size ||
1173 bi->tuple_size != cc->on_disk_tag_size) {
1174 ti->error = "Integrity profile tag size mismatch.";
1175 return -EINVAL;
1176 }
1177 if (1 << bi->interval_exp != cc->sector_size) {
1178 ti->error = "Integrity profile sector size mismatch.";
1179 return -EINVAL;
1180 }
1181
1182 if (crypt_integrity_aead(cc)) {
1183 cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size;
1184 DMDEBUG("%s: Integrity AEAD, tag size %u, IV size %u.", dm_device_name(md),
1185 cc->integrity_tag_size, cc->integrity_iv_size);
1186
1187 if (crypto_aead_setauthsize(any_tfm_aead(cc), cc->integrity_tag_size)) {
1188 ti->error = "Integrity AEAD auth tag size is not supported.";
1189 return -EINVAL;
1190 }
1191 } else if (cc->integrity_iv_size)
1192 DMDEBUG("%s: Additional per-sector space %u bytes for IV.", dm_device_name(md),
1193 cc->integrity_iv_size);
1194
1195 if ((cc->integrity_tag_size + cc->integrity_iv_size) != bi->tag_size) {
1196 ti->error = "Not enough space for integrity tag in the profile.";
1197 return -EINVAL;
1198 }
1199
1200 return 0;
1201#else
1202 ti->error = "Integrity profile not supported.";
1203 return -EINVAL;
1204#endif
1205}
1206
1207static void crypt_convert_init(struct crypt_config *cc,
1208 struct convert_context *ctx,
1209 struct bio *bio_out, struct bio *bio_in,
1210 sector_t sector)
1211{
1212 ctx->bio_in = bio_in;
1213 ctx->bio_out = bio_out;
1214 if (bio_in)
1215 ctx->iter_in = bio_in->bi_iter;
1216 if (bio_out)
1217 ctx->iter_out = bio_out->bi_iter;
1218 ctx->cc_sector = sector + cc->iv_offset;
1219 init_completion(&ctx->restart);
1220}
1221
1222static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
1223 void *req)
1224{
1225 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
1226}
1227
1228static void *req_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq)
1229{
1230 return (void *)((char *)dmreq - cc->dmreq_start);
1231}
1232
1233static u8 *iv_of_dmreq(struct crypt_config *cc,
1234 struct dm_crypt_request *dmreq)
1235{
1236 if (crypt_integrity_aead(cc))
1237 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
1238 crypto_aead_alignmask(any_tfm_aead(cc)) + 1);
1239 else
1240 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
1241 crypto_skcipher_alignmask(any_tfm(cc)) + 1);
1242}
1243
1244static u8 *org_iv_of_dmreq(struct crypt_config *cc,
1245 struct dm_crypt_request *dmreq)
1246{
1247 return iv_of_dmreq(cc, dmreq) + cc->iv_size;
1248}
1249
1250static __le64 *org_sector_of_dmreq(struct crypt_config *cc,
1251 struct dm_crypt_request *dmreq)
1252{
1253 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size;
1254 return (__le64 *) ptr;
1255}
1256
1257static unsigned int *org_tag_of_dmreq(struct crypt_config *cc,
1258 struct dm_crypt_request *dmreq)
1259{
1260 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size +
1261 cc->iv_size + sizeof(uint64_t);
1262 return (unsigned int*)ptr;
1263}
1264
1265static void *tag_from_dmreq(struct crypt_config *cc,
1266 struct dm_crypt_request *dmreq)
1267{
1268 struct convert_context *ctx = dmreq->ctx;
1269 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
1270
1271 return &io->integrity_metadata[*org_tag_of_dmreq(cc, dmreq) *
1272 cc->on_disk_tag_size];
1273}
1274
1275static void *iv_tag_from_dmreq(struct crypt_config *cc,
1276 struct dm_crypt_request *dmreq)
1277{
1278 return tag_from_dmreq(cc, dmreq) + cc->integrity_tag_size;
1279}
1280
1281static int crypt_convert_block_aead(struct crypt_config *cc,
1282 struct convert_context *ctx,
1283 struct aead_request *req,
1284 unsigned int tag_offset)
1285{
1286 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
1287 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
1288 struct dm_crypt_request *dmreq;
1289 u8 *iv, *org_iv, *tag_iv, *tag;
1290 __le64 *sector;
1291 int r = 0;
1292
1293 BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size);
1294
1295
1296 if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
1297 return -EIO;
1298
1299 dmreq = dmreq_of_req(cc, req);
1300 dmreq->iv_sector = ctx->cc_sector;
1301 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
1302 dmreq->iv_sector >>= cc->sector_shift;
1303 dmreq->ctx = ctx;
1304
1305 *org_tag_of_dmreq(cc, dmreq) = tag_offset;
1306
1307 sector = org_sector_of_dmreq(cc, dmreq);
1308 *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
1309
1310 iv = iv_of_dmreq(cc, dmreq);
1311 org_iv = org_iv_of_dmreq(cc, dmreq);
1312 tag = tag_from_dmreq(cc, dmreq);
1313 tag_iv = iv_tag_from_dmreq(cc, dmreq);
1314
1315
1316
1317
1318
1319
1320 sg_init_table(dmreq->sg_in, 4);
1321 sg_set_buf(&dmreq->sg_in[0], sector, sizeof(uint64_t));
1322 sg_set_buf(&dmreq->sg_in[1], org_iv, cc->iv_size);
1323 sg_set_page(&dmreq->sg_in[2], bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
1324 sg_set_buf(&dmreq->sg_in[3], tag, cc->integrity_tag_size);
1325
1326 sg_init_table(dmreq->sg_out, 4);
1327 sg_set_buf(&dmreq->sg_out[0], sector, sizeof(uint64_t));
1328 sg_set_buf(&dmreq->sg_out[1], org_iv, cc->iv_size);
1329 sg_set_page(&dmreq->sg_out[2], bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
1330 sg_set_buf(&dmreq->sg_out[3], tag, cc->integrity_tag_size);
1331
1332 if (cc->iv_gen_ops) {
1333
1334 if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) {
1335 memcpy(org_iv, tag_iv, cc->iv_size);
1336 } else {
1337 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
1338 if (r < 0)
1339 return r;
1340
1341 if (cc->integrity_iv_size)
1342 memcpy(tag_iv, org_iv, cc->iv_size);
1343 }
1344
1345 memcpy(iv, org_iv, cc->iv_size);
1346 }
1347
1348 aead_request_set_ad(req, sizeof(uint64_t) + cc->iv_size);
1349 if (bio_data_dir(ctx->bio_in) == WRITE) {
1350 aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
1351 cc->sector_size, iv);
1352 r = crypto_aead_encrypt(req);
1353 if (cc->integrity_tag_size + cc->integrity_iv_size != cc->on_disk_tag_size)
1354 memset(tag + cc->integrity_tag_size + cc->integrity_iv_size, 0,
1355 cc->on_disk_tag_size - (cc->integrity_tag_size + cc->integrity_iv_size));
1356 } else {
1357 aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
1358 cc->sector_size + cc->integrity_tag_size, iv);
1359 r = crypto_aead_decrypt(req);
1360 }
1361
1362 if (r == -EBADMSG) {
1363 char b[BDEVNAME_SIZE];
1364 DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b),
1365 (unsigned long long)le64_to_cpu(*sector));
1366 }
1367
1368 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
1369 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
1370
1371 bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
1372 bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
1373
1374 return r;
1375}
1376
1377static int crypt_convert_block_skcipher(struct crypt_config *cc,
1378 struct convert_context *ctx,
1379 struct skcipher_request *req,
1380 unsigned int tag_offset)
1381{
1382 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
1383 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
1384 struct scatterlist *sg_in, *sg_out;
1385 struct dm_crypt_request *dmreq;
1386 u8 *iv, *org_iv, *tag_iv;
1387 __le64 *sector;
1388 int r = 0;
1389
1390
1391 if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
1392 return -EIO;
1393
1394 dmreq = dmreq_of_req(cc, req);
1395 dmreq->iv_sector = ctx->cc_sector;
1396 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
1397 dmreq->iv_sector >>= cc->sector_shift;
1398 dmreq->ctx = ctx;
1399
1400 *org_tag_of_dmreq(cc, dmreq) = tag_offset;
1401
1402 iv = iv_of_dmreq(cc, dmreq);
1403 org_iv = org_iv_of_dmreq(cc, dmreq);
1404 tag_iv = iv_tag_from_dmreq(cc, dmreq);
1405
1406 sector = org_sector_of_dmreq(cc, dmreq);
1407 *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
1408
1409
1410 sg_in = &dmreq->sg_in[0];
1411 sg_out = &dmreq->sg_out[0];
1412
1413 sg_init_table(sg_in, 1);
1414 sg_set_page(sg_in, bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
1415
1416 sg_init_table(sg_out, 1);
1417 sg_set_page(sg_out, bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
1418
1419 if (cc->iv_gen_ops) {
1420
1421 if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) {
1422 memcpy(org_iv, tag_iv, cc->integrity_iv_size);
1423 } else {
1424 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
1425 if (r < 0)
1426 return r;
1427
1428 if (test_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags))
1429 sg_in = sg_out;
1430
1431 if (cc->integrity_iv_size)
1432 memcpy(tag_iv, org_iv, cc->integrity_iv_size);
1433 }
1434
1435 memcpy(iv, org_iv, cc->iv_size);
1436 }
1437
1438 skcipher_request_set_crypt(req, sg_in, sg_out, cc->sector_size, iv);
1439
1440 if (bio_data_dir(ctx->bio_in) == WRITE)
1441 r = crypto_skcipher_encrypt(req);
1442 else
1443 r = crypto_skcipher_decrypt(req);
1444
1445 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
1446 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
1447
1448 bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
1449 bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
1450
1451 return r;
1452}
1453
1454static void kcryptd_async_done(struct crypto_async_request *async_req,
1455 int error);
1456
1457static void crypt_alloc_req_skcipher(struct crypt_config *cc,
1458 struct convert_context *ctx)
1459{
1460 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
1461
1462 if (!ctx->r.req)
1463 ctx->r.req = mempool_alloc(&cc->req_pool, GFP_NOIO);
1464
1465 skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]);
1466
1467
1468
1469
1470
1471 skcipher_request_set_callback(ctx->r.req,
1472 CRYPTO_TFM_REQ_MAY_BACKLOG,
1473 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
1474}
1475
1476static void crypt_alloc_req_aead(struct crypt_config *cc,
1477 struct convert_context *ctx)
1478{
1479 if (!ctx->r.req_aead)
1480 ctx->r.req_aead = mempool_alloc(&cc->req_pool, GFP_NOIO);
1481
1482 aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]);
1483
1484
1485
1486
1487
1488 aead_request_set_callback(ctx->r.req_aead,
1489 CRYPTO_TFM_REQ_MAY_BACKLOG,
1490 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
1491}
1492
1493static void crypt_alloc_req(struct crypt_config *cc,
1494 struct convert_context *ctx)
1495{
1496 if (crypt_integrity_aead(cc))
1497 crypt_alloc_req_aead(cc, ctx);
1498 else
1499 crypt_alloc_req_skcipher(cc, ctx);
1500}
1501
1502static void crypt_free_req_skcipher(struct crypt_config *cc,
1503 struct skcipher_request *req, struct bio *base_bio)
1504{
1505 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
1506
1507 if ((struct skcipher_request *)(io + 1) != req)
1508 mempool_free(req, &cc->req_pool);
1509}
1510
1511static void crypt_free_req_aead(struct crypt_config *cc,
1512 struct aead_request *req, struct bio *base_bio)
1513{
1514 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
1515
1516 if ((struct aead_request *)(io + 1) != req)
1517 mempool_free(req, &cc->req_pool);
1518}
1519
1520static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_bio)
1521{
1522 if (crypt_integrity_aead(cc))
1523 crypt_free_req_aead(cc, req, base_bio);
1524 else
1525 crypt_free_req_skcipher(cc, req, base_bio);
1526}
1527
1528
1529
1530
1531static blk_status_t crypt_convert(struct crypt_config *cc,
1532 struct convert_context *ctx, bool atomic)
1533{
1534 unsigned int tag_offset = 0;
1535 unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
1536 int r;
1537
1538 atomic_set(&ctx->cc_pending, 1);
1539
1540 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
1541
1542 crypt_alloc_req(cc, ctx);
1543 atomic_inc(&ctx->cc_pending);
1544
1545 if (crypt_integrity_aead(cc))
1546 r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, tag_offset);
1547 else
1548 r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, tag_offset);
1549
1550 switch (r) {
1551
1552
1553
1554
1555 case -EBUSY:
1556 wait_for_completion(&ctx->restart);
1557 reinit_completion(&ctx->restart);
1558 fallthrough;
1559
1560
1561
1562
1563 case -EINPROGRESS:
1564 ctx->r.req = NULL;
1565 ctx->cc_sector += sector_step;
1566 tag_offset++;
1567 continue;
1568
1569
1570
1571 case 0:
1572 atomic_dec(&ctx->cc_pending);
1573 ctx->cc_sector += sector_step;
1574 tag_offset++;
1575 if (!atomic)
1576 cond_resched();
1577 continue;
1578
1579
1580
1581 case -EBADMSG:
1582 atomic_dec(&ctx->cc_pending);
1583 return BLK_STS_PROTECTION;
1584
1585
1586
1587 default:
1588 atomic_dec(&ctx->cc_pending);
1589 return BLK_STS_IOERR;
1590 }
1591 }
1592
1593 return 0;
1594}
1595
1596static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
1616{
1617 struct crypt_config *cc = io->cc;
1618 struct bio *clone;
1619 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1620 gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
1621 unsigned i, len, remaining_size;
1622 struct page *page;
1623
1624retry:
1625 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
1626 mutex_lock(&cc->bio_alloc_lock);
1627
1628 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, &cc->bs);
1629 if (!clone)
1630 goto out;
1631
1632 clone_init(io, clone);
1633
1634 remaining_size = size;
1635
1636 for (i = 0; i < nr_iovecs; i++) {
1637 page = mempool_alloc(&cc->page_pool, gfp_mask);
1638 if (!page) {
1639 crypt_free_buffer_pages(cc, clone);
1640 bio_put(clone);
1641 gfp_mask |= __GFP_DIRECT_RECLAIM;
1642 goto retry;
1643 }
1644
1645 len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
1646
1647 bio_add_page(clone, page, len, 0);
1648
1649 remaining_size -= len;
1650 }
1651
1652
1653 if (dm_crypt_integrity_io_alloc(io, clone)) {
1654 crypt_free_buffer_pages(cc, clone);
1655 bio_put(clone);
1656 clone = NULL;
1657 }
1658out:
1659 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
1660 mutex_unlock(&cc->bio_alloc_lock);
1661
1662 return clone;
1663}
1664
1665static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
1666{
1667 struct bio_vec *bv;
1668 struct bvec_iter_all iter_all;
1669
1670 bio_for_each_segment_all(bv, clone, iter_all) {
1671 BUG_ON(!bv->bv_page);
1672 mempool_free(bv->bv_page, &cc->page_pool);
1673 }
1674}
1675
1676static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
1677 struct bio *bio, sector_t sector)
1678{
1679 io->cc = cc;
1680 io->base_bio = bio;
1681 io->sector = sector;
1682 io->error = 0;
1683 io->ctx.r.req = NULL;
1684 io->integrity_metadata = NULL;
1685 io->integrity_metadata_from_pool = false;
1686 atomic_set(&io->io_pending, 0);
1687}
1688
1689static void crypt_inc_pending(struct dm_crypt_io *io)
1690{
1691 atomic_inc(&io->io_pending);
1692}
1693
1694
1695
1696
1697
1698static void crypt_dec_pending(struct dm_crypt_io *io)
1699{
1700 struct crypt_config *cc = io->cc;
1701 struct bio *base_bio = io->base_bio;
1702 blk_status_t error = io->error;
1703
1704 if (!atomic_dec_and_test(&io->io_pending))
1705 return;
1706
1707 if (io->ctx.r.req)
1708 crypt_free_req(cc, io->ctx.r.req, base_bio);
1709
1710 if (unlikely(io->integrity_metadata_from_pool))
1711 mempool_free(io->integrity_metadata, &io->cc->tag_pool);
1712 else
1713 kfree(io->integrity_metadata);
1714
1715 base_bio->bi_status = error;
1716 bio_endio(base_bio);
1717}
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736static void crypt_endio(struct bio *clone)
1737{
1738 struct dm_crypt_io *io = clone->bi_private;
1739 struct crypt_config *cc = io->cc;
1740 unsigned rw = bio_data_dir(clone);
1741 blk_status_t error;
1742
1743
1744
1745
1746 if (rw == WRITE)
1747 crypt_free_buffer_pages(cc, clone);
1748
1749 error = clone->bi_status;
1750 bio_put(clone);
1751
1752 if (rw == READ && !error) {
1753 kcryptd_queue_crypt(io);
1754 return;
1755 }
1756
1757 if (unlikely(error))
1758 io->error = error;
1759
1760 crypt_dec_pending(io);
1761}
1762
1763static void clone_init(struct dm_crypt_io *io, struct bio *clone)
1764{
1765 struct crypt_config *cc = io->cc;
1766
1767 clone->bi_private = io;
1768 clone->bi_end_io = crypt_endio;
1769 bio_set_dev(clone, cc->dev->bdev);
1770 clone->bi_opf = io->base_bio->bi_opf;
1771}
1772
1773static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
1774{
1775 struct crypt_config *cc = io->cc;
1776 struct bio *clone;
1777
1778
1779
1780
1781
1782
1783
1784 clone = bio_clone_fast(io->base_bio, gfp, &cc->bs);
1785 if (!clone)
1786 return 1;
1787
1788 crypt_inc_pending(io);
1789
1790 clone_init(io, clone);
1791 clone->bi_iter.bi_sector = cc->start + io->sector;
1792
1793 if (dm_crypt_integrity_io_alloc(io, clone)) {
1794 crypt_dec_pending(io);
1795 bio_put(clone);
1796 return 1;
1797 }
1798
1799 submit_bio_noacct(clone);
1800 return 0;
1801}
1802
1803static void kcryptd_io_read_work(struct work_struct *work)
1804{
1805 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1806
1807 crypt_inc_pending(io);
1808 if (kcryptd_io_read(io, GFP_NOIO))
1809 io->error = BLK_STS_RESOURCE;
1810 crypt_dec_pending(io);
1811}
1812
1813static void kcryptd_queue_read(struct dm_crypt_io *io)
1814{
1815 struct crypt_config *cc = io->cc;
1816
1817 INIT_WORK(&io->work, kcryptd_io_read_work);
1818 queue_work(cc->io_queue, &io->work);
1819}
1820
1821static void kcryptd_io_write(struct dm_crypt_io *io)
1822{
1823 struct bio *clone = io->ctx.bio_out;
1824
1825 submit_bio_noacct(clone);
1826}
1827
1828#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
1829
1830static int dmcrypt_write(void *data)
1831{
1832 struct crypt_config *cc = data;
1833 struct dm_crypt_io *io;
1834
1835 while (1) {
1836 struct rb_root write_tree;
1837 struct blk_plug plug;
1838
1839 spin_lock_irq(&cc->write_thread_lock);
1840continue_locked:
1841
1842 if (!RB_EMPTY_ROOT(&cc->write_tree))
1843 goto pop_from_list;
1844
1845 set_current_state(TASK_INTERRUPTIBLE);
1846
1847 spin_unlock_irq(&cc->write_thread_lock);
1848
1849 if (unlikely(kthread_should_stop())) {
1850 set_current_state(TASK_RUNNING);
1851 break;
1852 }
1853
1854 schedule();
1855
1856 set_current_state(TASK_RUNNING);
1857 spin_lock_irq(&cc->write_thread_lock);
1858 goto continue_locked;
1859
1860pop_from_list:
1861 write_tree = cc->write_tree;
1862 cc->write_tree = RB_ROOT;
1863 spin_unlock_irq(&cc->write_thread_lock);
1864
1865 BUG_ON(rb_parent(write_tree.rb_node));
1866
1867
1868
1869
1870
1871 blk_start_plug(&plug);
1872 do {
1873 io = crypt_io_from_node(rb_first(&write_tree));
1874 rb_erase(&io->rb_node, &write_tree);
1875 kcryptd_io_write(io);
1876 } while (!RB_EMPTY_ROOT(&write_tree));
1877 blk_finish_plug(&plug);
1878 }
1879 return 0;
1880}
1881
1882static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
1883{
1884 struct bio *clone = io->ctx.bio_out;
1885 struct crypt_config *cc = io->cc;
1886 unsigned long flags;
1887 sector_t sector;
1888 struct rb_node **rbp, *parent;
1889
1890 if (unlikely(io->error)) {
1891 crypt_free_buffer_pages(cc, clone);
1892 bio_put(clone);
1893 crypt_dec_pending(io);
1894 return;
1895 }
1896
1897
1898 BUG_ON(io->ctx.iter_out.bi_size);
1899
1900 clone->bi_iter.bi_sector = cc->start + io->sector;
1901
1902 if ((likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) ||
1903 test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) {
1904 submit_bio_noacct(clone);
1905 return;
1906 }
1907
1908 spin_lock_irqsave(&cc->write_thread_lock, flags);
1909 if (RB_EMPTY_ROOT(&cc->write_tree))
1910 wake_up_process(cc->write_thread);
1911 rbp = &cc->write_tree.rb_node;
1912 parent = NULL;
1913 sector = io->sector;
1914 while (*rbp) {
1915 parent = *rbp;
1916 if (sector < crypt_io_from_node(parent)->sector)
1917 rbp = &(*rbp)->rb_left;
1918 else
1919 rbp = &(*rbp)->rb_right;
1920 }
1921 rb_link_node(&io->rb_node, parent, rbp);
1922 rb_insert_color(&io->rb_node, &cc->write_tree);
1923 spin_unlock_irqrestore(&cc->write_thread_lock, flags);
1924}
1925
1926static bool kcryptd_crypt_write_inline(struct crypt_config *cc,
1927 struct convert_context *ctx)
1928
1929{
1930 if (!test_bit(DM_CRYPT_WRITE_INLINE, &cc->flags))
1931 return false;
1932
1933
1934
1935
1936
1937
1938 switch (bio_op(ctx->bio_in)) {
1939 case REQ_OP_WRITE:
1940 case REQ_OP_WRITE_SAME:
1941 case REQ_OP_WRITE_ZEROES:
1942 return true;
1943 default:
1944 return false;
1945 }
1946}
1947
1948static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1949{
1950 struct crypt_config *cc = io->cc;
1951 struct convert_context *ctx = &io->ctx;
1952 struct bio *clone;
1953 int crypt_finished;
1954 sector_t sector = io->sector;
1955 blk_status_t r;
1956
1957
1958
1959
1960 crypt_inc_pending(io);
1961 crypt_convert_init(cc, ctx, NULL, io->base_bio, sector);
1962
1963 clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
1964 if (unlikely(!clone)) {
1965 io->error = BLK_STS_IOERR;
1966 goto dec;
1967 }
1968
1969 io->ctx.bio_out = clone;
1970 io->ctx.iter_out = clone->bi_iter;
1971
1972 sector += bio_sectors(clone);
1973
1974 crypt_inc_pending(io);
1975 r = crypt_convert(cc, ctx,
1976 test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags));
1977 if (r)
1978 io->error = r;
1979 crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
1980 if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
1981
1982 wait_for_completion(&ctx->restart);
1983 crypt_finished = 1;
1984 }
1985
1986
1987 if (crypt_finished) {
1988 kcryptd_crypt_write_io_submit(io, 0);
1989 io->sector = sector;
1990 }
1991
1992dec:
1993 crypt_dec_pending(io);
1994}
1995
1996static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
1997{
1998 crypt_dec_pending(io);
1999}
2000
2001static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
2002{
2003 struct crypt_config *cc = io->cc;
2004 blk_status_t r;
2005
2006 crypt_inc_pending(io);
2007
2008 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
2009 io->sector);
2010
2011 r = crypt_convert(cc, &io->ctx,
2012 test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags));
2013 if (r)
2014 io->error = r;
2015
2016 if (atomic_dec_and_test(&io->ctx.cc_pending))
2017 kcryptd_crypt_read_done(io);
2018
2019 crypt_dec_pending(io);
2020}
2021
2022static void kcryptd_async_done(struct crypto_async_request *async_req,
2023 int error)
2024{
2025 struct dm_crypt_request *dmreq = async_req->data;
2026 struct convert_context *ctx = dmreq->ctx;
2027 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
2028 struct crypt_config *cc = io->cc;
2029
2030
2031
2032
2033
2034
2035 if (error == -EINPROGRESS) {
2036 complete(&ctx->restart);
2037 return;
2038 }
2039
2040 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
2041 error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq);
2042
2043 if (error == -EBADMSG) {
2044 char b[BDEVNAME_SIZE];
2045 DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b),
2046 (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq)));
2047 io->error = BLK_STS_PROTECTION;
2048 } else if (error < 0)
2049 io->error = BLK_STS_IOERR;
2050
2051 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
2052
2053 if (!atomic_dec_and_test(&ctx->cc_pending))
2054 return;
2055
2056
2057
2058
2059
2060 if (bio_data_dir(io->base_bio) == READ) {
2061 kcryptd_crypt_read_done(io);
2062 return;
2063 }
2064
2065 if (kcryptd_crypt_write_inline(cc, ctx)) {
2066 complete(&ctx->restart);
2067 return;
2068 }
2069
2070 kcryptd_crypt_write_io_submit(io, 1);
2071}
2072
2073static void kcryptd_crypt(struct work_struct *work)
2074{
2075 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
2076
2077 if (bio_data_dir(io->base_bio) == READ)
2078 kcryptd_crypt_read_convert(io);
2079 else
2080 kcryptd_crypt_write_convert(io);
2081}
2082
2083static void kcryptd_crypt_tasklet(unsigned long work)
2084{
2085 kcryptd_crypt((struct work_struct *)work);
2086}
2087
2088static void kcryptd_queue_crypt(struct dm_crypt_io *io)
2089{
2090 struct crypt_config *cc = io->cc;
2091
2092 if ((bio_data_dir(io->base_bio) == READ && test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags)) ||
2093 (bio_data_dir(io->base_bio) == WRITE && test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))) {
2094 if (in_irq()) {
2095
2096 tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
2097 tasklet_schedule(&io->tasklet);
2098 return;
2099 }
2100
2101 kcryptd_crypt(&io->work);
2102 return;
2103 }
2104
2105 INIT_WORK(&io->work, kcryptd_crypt);
2106 queue_work(cc->crypt_queue, &io->work);
2107}
2108
2109static void crypt_free_tfms_aead(struct crypt_config *cc)
2110{
2111 if (!cc->cipher_tfm.tfms_aead)
2112 return;
2113
2114 if (cc->cipher_tfm.tfms_aead[0] && !IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
2115 crypto_free_aead(cc->cipher_tfm.tfms_aead[0]);
2116 cc->cipher_tfm.tfms_aead[0] = NULL;
2117 }
2118
2119 kfree(cc->cipher_tfm.tfms_aead);
2120 cc->cipher_tfm.tfms_aead = NULL;
2121}
2122
2123static void crypt_free_tfms_skcipher(struct crypt_config *cc)
2124{
2125 unsigned i;
2126
2127 if (!cc->cipher_tfm.tfms)
2128 return;
2129
2130 for (i = 0; i < cc->tfms_count; i++)
2131 if (cc->cipher_tfm.tfms[i] && !IS_ERR(cc->cipher_tfm.tfms[i])) {
2132 crypto_free_skcipher(cc->cipher_tfm.tfms[i]);
2133 cc->cipher_tfm.tfms[i] = NULL;
2134 }
2135
2136 kfree(cc->cipher_tfm.tfms);
2137 cc->cipher_tfm.tfms = NULL;
2138}
2139
2140static void crypt_free_tfms(struct crypt_config *cc)
2141{
2142 if (crypt_integrity_aead(cc))
2143 crypt_free_tfms_aead(cc);
2144 else
2145 crypt_free_tfms_skcipher(cc);
2146}
2147
2148static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode)
2149{
2150 unsigned i;
2151 int err;
2152
2153 cc->cipher_tfm.tfms = kcalloc(cc->tfms_count,
2154 sizeof(struct crypto_skcipher *),
2155 GFP_KERNEL);
2156 if (!cc->cipher_tfm.tfms)
2157 return -ENOMEM;
2158
2159 for (i = 0; i < cc->tfms_count; i++) {
2160 cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0,
2161 CRYPTO_ALG_ALLOCATES_MEMORY);
2162 if (IS_ERR(cc->cipher_tfm.tfms[i])) {
2163 err = PTR_ERR(cc->cipher_tfm.tfms[i]);
2164 crypt_free_tfms(cc);
2165 return err;
2166 }
2167 }
2168
2169
2170
2171
2172
2173
2174 DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
2175 crypto_skcipher_alg(any_tfm(cc))->base.cra_driver_name);
2176 return 0;
2177}
2178
2179static int crypt_alloc_tfms_aead(struct crypt_config *cc, char *ciphermode)
2180{
2181 int err;
2182
2183 cc->cipher_tfm.tfms = kmalloc(sizeof(struct crypto_aead *), GFP_KERNEL);
2184 if (!cc->cipher_tfm.tfms)
2185 return -ENOMEM;
2186
2187 cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0,
2188 CRYPTO_ALG_ALLOCATES_MEMORY);
2189 if (IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
2190 err = PTR_ERR(cc->cipher_tfm.tfms_aead[0]);
2191 crypt_free_tfms(cc);
2192 return err;
2193 }
2194
2195 DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
2196 crypto_aead_alg(any_tfm_aead(cc))->base.cra_driver_name);
2197 return 0;
2198}
2199
2200static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
2201{
2202 if (crypt_integrity_aead(cc))
2203 return crypt_alloc_tfms_aead(cc, ciphermode);
2204 else
2205 return crypt_alloc_tfms_skcipher(cc, ciphermode);
2206}
2207
2208static unsigned crypt_subkey_size(struct crypt_config *cc)
2209{
2210 return (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
2211}
2212
2213static unsigned crypt_authenckey_size(struct crypt_config *cc)
2214{
2215 return crypt_subkey_size(cc) + RTA_SPACE(sizeof(struct crypto_authenc_key_param));
2216}
2217
2218
2219
2220
2221
2222
2223static void crypt_copy_authenckey(char *p, const void *key,
2224 unsigned enckeylen, unsigned authkeylen)
2225{
2226 struct crypto_authenc_key_param *param;
2227 struct rtattr *rta;
2228
2229 rta = (struct rtattr *)p;
2230 param = RTA_DATA(rta);
2231 param->enckeylen = cpu_to_be32(enckeylen);
2232 rta->rta_len = RTA_LENGTH(sizeof(*param));
2233 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
2234 p += RTA_SPACE(sizeof(*param));
2235 memcpy(p, key + enckeylen, authkeylen);
2236 p += authkeylen;
2237 memcpy(p, key, enckeylen);
2238}
2239
2240static int crypt_setkey(struct crypt_config *cc)
2241{
2242 unsigned subkey_size;
2243 int err = 0, i, r;
2244
2245
2246 subkey_size = crypt_subkey_size(cc);
2247
2248 if (crypt_integrity_hmac(cc)) {
2249 if (subkey_size < cc->key_mac_size)
2250 return -EINVAL;
2251
2252 crypt_copy_authenckey(cc->authenc_key, cc->key,
2253 subkey_size - cc->key_mac_size,
2254 cc->key_mac_size);
2255 }
2256
2257 for (i = 0; i < cc->tfms_count; i++) {
2258 if (crypt_integrity_hmac(cc))
2259 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
2260 cc->authenc_key, crypt_authenckey_size(cc));
2261 else if (crypt_integrity_aead(cc))
2262 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
2263 cc->key + (i * subkey_size),
2264 subkey_size);
2265 else
2266 r = crypto_skcipher_setkey(cc->cipher_tfm.tfms[i],
2267 cc->key + (i * subkey_size),
2268 subkey_size);
2269 if (r)
2270 err = r;
2271 }
2272
2273 if (crypt_integrity_hmac(cc))
2274 memzero_explicit(cc->authenc_key, crypt_authenckey_size(cc));
2275
2276 return err;
2277}
2278
2279#ifdef CONFIG_KEYS
2280
2281static bool contains_whitespace(const char *str)
2282{
2283 while (*str)
2284 if (isspace(*str++))
2285 return true;
2286 return false;
2287}
2288
2289static int set_key_user(struct crypt_config *cc, struct key *key)
2290{
2291 const struct user_key_payload *ukp;
2292
2293 ukp = user_key_payload_locked(key);
2294 if (!ukp)
2295 return -EKEYREVOKED;
2296
2297 if (cc->key_size != ukp->datalen)
2298 return -EINVAL;
2299
2300 memcpy(cc->key, ukp->data, cc->key_size);
2301
2302 return 0;
2303}
2304
2305#if defined(CONFIG_ENCRYPTED_KEYS) || defined(CONFIG_ENCRYPTED_KEYS_MODULE)
2306static int set_key_encrypted(struct crypt_config *cc, struct key *key)
2307{
2308 const struct encrypted_key_payload *ekp;
2309
2310 ekp = key->payload.data[0];
2311 if (!ekp)
2312 return -EKEYREVOKED;
2313
2314 if (cc->key_size != ekp->decrypted_datalen)
2315 return -EINVAL;
2316
2317 memcpy(cc->key, ekp->decrypted_data, cc->key_size);
2318
2319 return 0;
2320}
2321#endif
2322
2323static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
2324{
2325 char *new_key_string, *key_desc;
2326 int ret;
2327 struct key_type *type;
2328 struct key *key;
2329 int (*set_key)(struct crypt_config *cc, struct key *key);
2330
2331
2332
2333
2334
2335 if (contains_whitespace(key_string)) {
2336 DMERR("whitespace chars not allowed in key string");
2337 return -EINVAL;
2338 }
2339
2340
2341 key_desc = strpbrk(key_string, ":");
2342 if (!key_desc || key_desc == key_string || !strlen(key_desc + 1))
2343 return -EINVAL;
2344
2345 if (!strncmp(key_string, "logon:", key_desc - key_string + 1)) {
2346 type = &key_type_logon;
2347 set_key = set_key_user;
2348 } else if (!strncmp(key_string, "user:", key_desc - key_string + 1)) {
2349 type = &key_type_user;
2350 set_key = set_key_user;
2351#if defined(CONFIG_ENCRYPTED_KEYS) || defined(CONFIG_ENCRYPTED_KEYS_MODULE)
2352 } else if (!strncmp(key_string, "encrypted:", key_desc - key_string + 1)) {
2353 type = &key_type_encrypted;
2354 set_key = set_key_encrypted;
2355#endif
2356 } else {
2357 return -EINVAL;
2358 }
2359
2360 new_key_string = kstrdup(key_string, GFP_KERNEL);
2361 if (!new_key_string)
2362 return -ENOMEM;
2363
2364 key = request_key(type, key_desc + 1, NULL);
2365 if (IS_ERR(key)) {
2366 kfree_sensitive(new_key_string);
2367 return PTR_ERR(key);
2368 }
2369
2370 down_read(&key->sem);
2371
2372 ret = set_key(cc, key);
2373 if (ret < 0) {
2374 up_read(&key->sem);
2375 key_put(key);
2376 kfree_sensitive(new_key_string);
2377 return ret;
2378 }
2379
2380 up_read(&key->sem);
2381 key_put(key);
2382
2383
2384 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2385
2386 ret = crypt_setkey(cc);
2387
2388 if (!ret) {
2389 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2390 kfree_sensitive(cc->key_string);
2391 cc->key_string = new_key_string;
2392 } else
2393 kfree_sensitive(new_key_string);
2394
2395 return ret;
2396}
2397
2398static int get_key_size(char **key_string)
2399{
2400 char *colon, dummy;
2401 int ret;
2402
2403 if (*key_string[0] != ':')
2404 return strlen(*key_string) >> 1;
2405
2406
2407 colon = strpbrk(*key_string + 1, ":");
2408 if (!colon)
2409 return -EINVAL;
2410
2411 if (sscanf(*key_string + 1, "%u%c", &ret, &dummy) != 2 || dummy != ':')
2412 return -EINVAL;
2413
2414 *key_string = colon;
2415
2416
2417
2418 return ret;
2419}
2420
2421#else
2422
2423static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
2424{
2425 return -EINVAL;
2426}
2427
2428static int get_key_size(char **key_string)
2429{
2430 return (*key_string[0] == ':') ? -EINVAL : strlen(*key_string) >> 1;
2431}
2432
2433#endif
2434
2435static int crypt_set_key(struct crypt_config *cc, char *key)
2436{
2437 int r = -EINVAL;
2438 int key_string_len = strlen(key);
2439
2440
2441 if (!cc->key_size && strcmp(key, "-"))
2442 goto out;
2443
2444
2445 if (key[0] == ':') {
2446 r = crypt_set_keyring_key(cc, key + 1);
2447 goto out;
2448 }
2449
2450
2451 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2452
2453
2454 kfree_sensitive(cc->key_string);
2455 cc->key_string = NULL;
2456
2457
2458 if (cc->key_size && hex2bin(cc->key, key, cc->key_size) < 0)
2459 goto out;
2460
2461 r = crypt_setkey(cc);
2462 if (!r)
2463 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2464
2465out:
2466
2467 memset(key, '0', key_string_len);
2468
2469 return r;
2470}
2471
2472static int crypt_wipe_key(struct crypt_config *cc)
2473{
2474 int r;
2475
2476 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2477 get_random_bytes(&cc->key, cc->key_size);
2478
2479
2480 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
2481 r = cc->iv_gen_ops->wipe(cc);
2482 if (r)
2483 return r;
2484 }
2485
2486 kfree_sensitive(cc->key_string);
2487 cc->key_string = NULL;
2488 r = crypt_setkey(cc);
2489 memset(&cc->key, 0, cc->key_size * sizeof(u8));
2490
2491 return r;
2492}
2493
2494static void crypt_calculate_pages_per_client(void)
2495{
2496 unsigned long pages = (totalram_pages() - totalhigh_pages()) * DM_CRYPT_MEMORY_PERCENT / 100;
2497
2498 if (!dm_crypt_clients_n)
2499 return;
2500
2501 pages /= dm_crypt_clients_n;
2502 if (pages < DM_CRYPT_MIN_PAGES_PER_CLIENT)
2503 pages = DM_CRYPT_MIN_PAGES_PER_CLIENT;
2504 dm_crypt_pages_per_client = pages;
2505}
2506
2507static void *crypt_page_alloc(gfp_t gfp_mask, void *pool_data)
2508{
2509 struct crypt_config *cc = pool_data;
2510 struct page *page;
2511
2512 if (unlikely(percpu_counter_compare(&cc->n_allocated_pages, dm_crypt_pages_per_client) >= 0) &&
2513 likely(gfp_mask & __GFP_NORETRY))
2514 return NULL;
2515
2516 page = alloc_page(gfp_mask);
2517 if (likely(page != NULL))
2518 percpu_counter_add(&cc->n_allocated_pages, 1);
2519
2520 return page;
2521}
2522
2523static void crypt_page_free(void *page, void *pool_data)
2524{
2525 struct crypt_config *cc = pool_data;
2526
2527 __free_page(page);
2528 percpu_counter_sub(&cc->n_allocated_pages, 1);
2529}
2530
2531static void crypt_dtr(struct dm_target *ti)
2532{
2533 struct crypt_config *cc = ti->private;
2534
2535 ti->private = NULL;
2536
2537 if (!cc)
2538 return;
2539
2540 if (cc->write_thread)
2541 kthread_stop(cc->write_thread);
2542
2543 if (cc->io_queue)
2544 destroy_workqueue(cc->io_queue);
2545 if (cc->crypt_queue)
2546 destroy_workqueue(cc->crypt_queue);
2547
2548 crypt_free_tfms(cc);
2549
2550 bioset_exit(&cc->bs);
2551
2552 mempool_exit(&cc->page_pool);
2553 mempool_exit(&cc->req_pool);
2554 mempool_exit(&cc->tag_pool);
2555
2556 WARN_ON(percpu_counter_sum(&cc->n_allocated_pages) != 0);
2557 percpu_counter_destroy(&cc->n_allocated_pages);
2558
2559 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
2560 cc->iv_gen_ops->dtr(cc);
2561
2562 if (cc->dev)
2563 dm_put_device(ti, cc->dev);
2564
2565 kfree_sensitive(cc->cipher_string);
2566 kfree_sensitive(cc->key_string);
2567 kfree_sensitive(cc->cipher_auth);
2568 kfree_sensitive(cc->authenc_key);
2569
2570 mutex_destroy(&cc->bio_alloc_lock);
2571
2572
2573 kfree_sensitive(cc);
2574
2575 spin_lock(&dm_crypt_clients_lock);
2576 WARN_ON(!dm_crypt_clients_n);
2577 dm_crypt_clients_n--;
2578 crypt_calculate_pages_per_client();
2579 spin_unlock(&dm_crypt_clients_lock);
2580}
2581
2582static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode)
2583{
2584 struct crypt_config *cc = ti->private;
2585
2586 if (crypt_integrity_aead(cc))
2587 cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
2588 else
2589 cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
2590
2591 if (cc->iv_size)
2592
2593 cc->iv_size = max(cc->iv_size,
2594 (unsigned int)(sizeof(u64) / sizeof(u8)));
2595 else if (ivmode) {
2596 DMWARN("Selected cipher does not support IVs");
2597 ivmode = NULL;
2598 }
2599
2600
2601 if (ivmode == NULL)
2602 cc->iv_gen_ops = NULL;
2603 else if (strcmp(ivmode, "plain") == 0)
2604 cc->iv_gen_ops = &crypt_iv_plain_ops;
2605 else if (strcmp(ivmode, "plain64") == 0)
2606 cc->iv_gen_ops = &crypt_iv_plain64_ops;
2607 else if (strcmp(ivmode, "plain64be") == 0)
2608 cc->iv_gen_ops = &crypt_iv_plain64be_ops;
2609 else if (strcmp(ivmode, "essiv") == 0)
2610 cc->iv_gen_ops = &crypt_iv_essiv_ops;
2611 else if (strcmp(ivmode, "benbi") == 0)
2612 cc->iv_gen_ops = &crypt_iv_benbi_ops;
2613 else if (strcmp(ivmode, "null") == 0)
2614 cc->iv_gen_ops = &crypt_iv_null_ops;
2615 else if (strcmp(ivmode, "eboiv") == 0)
2616 cc->iv_gen_ops = &crypt_iv_eboiv_ops;
2617 else if (strcmp(ivmode, "elephant") == 0) {
2618 cc->iv_gen_ops = &crypt_iv_elephant_ops;
2619 cc->key_parts = 2;
2620 cc->key_extra_size = cc->key_size / 2;
2621 if (cc->key_extra_size > ELEPHANT_MAX_KEY_SIZE)
2622 return -EINVAL;
2623 set_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags);
2624 } else if (strcmp(ivmode, "lmk") == 0) {
2625 cc->iv_gen_ops = &crypt_iv_lmk_ops;
2626
2627
2628
2629
2630
2631
2632 if (cc->key_size % cc->key_parts) {
2633 cc->key_parts++;
2634 cc->key_extra_size = cc->key_size / cc->key_parts;
2635 }
2636 } else if (strcmp(ivmode, "tcw") == 0) {
2637 cc->iv_gen_ops = &crypt_iv_tcw_ops;
2638 cc->key_parts += 2;
2639 cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
2640 } else if (strcmp(ivmode, "random") == 0) {
2641 cc->iv_gen_ops = &crypt_iv_random_ops;
2642
2643 cc->integrity_iv_size = cc->iv_size;
2644 } else {
2645 ti->error = "Invalid IV mode";
2646 return -EINVAL;
2647 }
2648
2649 return 0;
2650}
2651
2652
2653
2654
2655
2656
2657static int crypt_ctr_auth_cipher(struct crypt_config *cc, char *cipher_api)
2658{
2659 char *start, *end, *mac_alg = NULL;
2660 struct crypto_ahash *mac;
2661
2662 if (!strstarts(cipher_api, "authenc("))
2663 return 0;
2664
2665 start = strchr(cipher_api, '(');
2666 end = strchr(cipher_api, ',');
2667 if (!start || !end || ++start > end)
2668 return -EINVAL;
2669
2670 mac_alg = kzalloc(end - start + 1, GFP_KERNEL);
2671 if (!mac_alg)
2672 return -ENOMEM;
2673 strncpy(mac_alg, start, end - start);
2674
2675 mac = crypto_alloc_ahash(mac_alg, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
2676 kfree(mac_alg);
2677
2678 if (IS_ERR(mac))
2679 return PTR_ERR(mac);
2680
2681 cc->key_mac_size = crypto_ahash_digestsize(mac);
2682 crypto_free_ahash(mac);
2683
2684 cc->authenc_key = kmalloc(crypt_authenckey_size(cc), GFP_KERNEL);
2685 if (!cc->authenc_key)
2686 return -ENOMEM;
2687
2688 return 0;
2689}
2690
2691static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key,
2692 char **ivmode, char **ivopts)
2693{
2694 struct crypt_config *cc = ti->private;
2695 char *tmp, *cipher_api, buf[CRYPTO_MAX_ALG_NAME];
2696 int ret = -EINVAL;
2697
2698 cc->tfms_count = 1;
2699
2700
2701
2702
2703
2704 tmp = &cipher_in[strlen("capi:")];
2705
2706
2707 *ivopts = strrchr(tmp, ':');
2708 if (*ivopts) {
2709 **ivopts = '\0';
2710 (*ivopts)++;
2711 }
2712
2713 *ivmode = strrchr(tmp, '-');
2714 if (*ivmode) {
2715 **ivmode = '\0';
2716 (*ivmode)++;
2717 }
2718
2719 cipher_api = tmp;
2720
2721
2722 if (crypt_integrity_aead(cc)) {
2723 ret = crypt_ctr_auth_cipher(cc, cipher_api);
2724 if (ret < 0) {
2725 ti->error = "Invalid AEAD cipher spec";
2726 return -ENOMEM;
2727 }
2728 }
2729
2730 if (*ivmode && !strcmp(*ivmode, "lmk"))
2731 cc->tfms_count = 64;
2732
2733 if (*ivmode && !strcmp(*ivmode, "essiv")) {
2734 if (!*ivopts) {
2735 ti->error = "Digest algorithm missing for ESSIV mode";
2736 return -EINVAL;
2737 }
2738 ret = snprintf(buf, CRYPTO_MAX_ALG_NAME, "essiv(%s,%s)",
2739 cipher_api, *ivopts);
2740 if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) {
2741 ti->error = "Cannot allocate cipher string";
2742 return -ENOMEM;
2743 }
2744 cipher_api = buf;
2745 }
2746
2747 cc->key_parts = cc->tfms_count;
2748
2749
2750 ret = crypt_alloc_tfms(cc, cipher_api);
2751 if (ret < 0) {
2752 ti->error = "Error allocating crypto tfm";
2753 return ret;
2754 }
2755
2756 if (crypt_integrity_aead(cc))
2757 cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
2758 else
2759 cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
2760
2761 return 0;
2762}
2763
2764static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key,
2765 char **ivmode, char **ivopts)
2766{
2767 struct crypt_config *cc = ti->private;
2768 char *tmp, *cipher, *chainmode, *keycount;
2769 char *cipher_api = NULL;
2770 int ret = -EINVAL;
2771 char dummy;
2772
2773 if (strchr(cipher_in, '(') || crypt_integrity_aead(cc)) {
2774 ti->error = "Bad cipher specification";
2775 return -EINVAL;
2776 }
2777
2778
2779
2780
2781
2782 tmp = cipher_in;
2783 keycount = strsep(&tmp, "-");
2784 cipher = strsep(&keycount, ":");
2785
2786 if (!keycount)
2787 cc->tfms_count = 1;
2788 else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 ||
2789 !is_power_of_2(cc->tfms_count)) {
2790 ti->error = "Bad cipher key count specification";
2791 return -EINVAL;
2792 }
2793 cc->key_parts = cc->tfms_count;
2794
2795 chainmode = strsep(&tmp, "-");
2796 *ivmode = strsep(&tmp, ":");
2797 *ivopts = tmp;
2798
2799
2800
2801
2802
2803 if (!chainmode || (!strcmp(chainmode, "plain") && !*ivmode)) {
2804 chainmode = "cbc";
2805 *ivmode = "plain";
2806 }
2807
2808 if (strcmp(chainmode, "ecb") && !*ivmode) {
2809 ti->error = "IV mechanism required";
2810 return -EINVAL;
2811 }
2812
2813 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
2814 if (!cipher_api)
2815 goto bad_mem;
2816
2817 if (*ivmode && !strcmp(*ivmode, "essiv")) {
2818 if (!*ivopts) {
2819 ti->error = "Digest algorithm missing for ESSIV mode";
2820 kfree(cipher_api);
2821 return -EINVAL;
2822 }
2823 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
2824 "essiv(%s(%s),%s)", chainmode, cipher, *ivopts);
2825 } else {
2826 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
2827 "%s(%s)", chainmode, cipher);
2828 }
2829 if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) {
2830 kfree(cipher_api);
2831 goto bad_mem;
2832 }
2833
2834
2835 ret = crypt_alloc_tfms(cc, cipher_api);
2836 if (ret < 0) {
2837 ti->error = "Error allocating crypto tfm";
2838 kfree(cipher_api);
2839 return ret;
2840 }
2841 kfree(cipher_api);
2842
2843 return 0;
2844bad_mem:
2845 ti->error = "Cannot allocate cipher strings";
2846 return -ENOMEM;
2847}
2848
2849static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key)
2850{
2851 struct crypt_config *cc = ti->private;
2852 char *ivmode = NULL, *ivopts = NULL;
2853 int ret;
2854
2855 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
2856 if (!cc->cipher_string) {
2857 ti->error = "Cannot allocate cipher strings";
2858 return -ENOMEM;
2859 }
2860
2861 if (strstarts(cipher_in, "capi:"))
2862 ret = crypt_ctr_cipher_new(ti, cipher_in, key, &ivmode, &ivopts);
2863 else
2864 ret = crypt_ctr_cipher_old(ti, cipher_in, key, &ivmode, &ivopts);
2865 if (ret)
2866 return ret;
2867
2868
2869 ret = crypt_ctr_ivmode(ti, ivmode);
2870 if (ret < 0)
2871 return ret;
2872
2873
2874 ret = crypt_set_key(cc, key);
2875 if (ret < 0) {
2876 ti->error = "Error decoding and setting key";
2877 return ret;
2878 }
2879
2880
2881 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
2882 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
2883 if (ret < 0) {
2884 ti->error = "Error creating IV";
2885 return ret;
2886 }
2887 }
2888
2889
2890 if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
2891 ret = cc->iv_gen_ops->init(cc);
2892 if (ret < 0) {
2893 ti->error = "Error initialising IV";
2894 return ret;
2895 }
2896 }
2897
2898
2899 if (cc->key_string)
2900 memset(cc->key, 0, cc->key_size * sizeof(u8));
2901
2902 return ret;
2903}
2904
2905static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **argv)
2906{
2907 struct crypt_config *cc = ti->private;
2908 struct dm_arg_set as;
2909 static const struct dm_arg _args[] = {
2910 {0, 8, "Invalid number of feature args"},
2911 };
2912 unsigned int opt_params, val;
2913 const char *opt_string, *sval;
2914 char dummy;
2915 int ret;
2916
2917
2918 as.argc = argc;
2919 as.argv = argv;
2920
2921 ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
2922 if (ret)
2923 return ret;
2924
2925 while (opt_params--) {
2926 opt_string = dm_shift_arg(&as);
2927 if (!opt_string) {
2928 ti->error = "Not enough feature arguments";
2929 return -EINVAL;
2930 }
2931
2932 if (!strcasecmp(opt_string, "allow_discards"))
2933 ti->num_discard_bios = 1;
2934
2935 else if (!strcasecmp(opt_string, "same_cpu_crypt"))
2936 set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
2937
2938 else if (!strcasecmp(opt_string, "submit_from_crypt_cpus"))
2939 set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
2940 else if (!strcasecmp(opt_string, "no_read_workqueue"))
2941 set_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags);
2942 else if (!strcasecmp(opt_string, "no_write_workqueue"))
2943 set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
2944 else if (sscanf(opt_string, "integrity:%u:", &val) == 1) {
2945 if (val == 0 || val > MAX_TAG_SIZE) {
2946 ti->error = "Invalid integrity arguments";
2947 return -EINVAL;
2948 }
2949 cc->on_disk_tag_size = val;
2950 sval = strchr(opt_string + strlen("integrity:"), ':') + 1;
2951 if (!strcasecmp(sval, "aead")) {
2952 set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
2953 } else if (strcasecmp(sval, "none")) {
2954 ti->error = "Unknown integrity profile";
2955 return -EINVAL;
2956 }
2957
2958 cc->cipher_auth = kstrdup(sval, GFP_KERNEL);
2959 if (!cc->cipher_auth)
2960 return -ENOMEM;
2961 } else if (sscanf(opt_string, "sector_size:%hu%c", &cc->sector_size, &dummy) == 1) {
2962 if (cc->sector_size < (1 << SECTOR_SHIFT) ||
2963 cc->sector_size > 4096 ||
2964 (cc->sector_size & (cc->sector_size - 1))) {
2965 ti->error = "Invalid feature value for sector_size";
2966 return -EINVAL;
2967 }
2968 if (ti->len & ((cc->sector_size >> SECTOR_SHIFT) - 1)) {
2969 ti->error = "Device size is not multiple of sector_size feature";
2970 return -EINVAL;
2971 }
2972 cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
2973 } else if (!strcasecmp(opt_string, "iv_large_sectors"))
2974 set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
2975 else {
2976 ti->error = "Invalid feature arguments";
2977 return -EINVAL;
2978 }
2979 }
2980
2981 return 0;
2982}
2983
2984#ifdef CONFIG_BLK_DEV_ZONED
2985
2986static int crypt_report_zones(struct dm_target *ti,
2987 struct dm_report_zones_args *args, unsigned int nr_zones)
2988{
2989 struct crypt_config *cc = ti->private;
2990 sector_t sector = cc->start + dm_target_offset(ti, args->next_sector);
2991
2992 args->start = cc->start;
2993 return blkdev_report_zones(cc->dev->bdev, sector, nr_zones,
2994 dm_report_zones_cb, args);
2995}
2996
2997#endif
2998
2999
3000
3001
3002
3003static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3004{
3005 struct crypt_config *cc;
3006 const char *devname = dm_table_device_name(ti->table);
3007 int key_size;
3008 unsigned int align_mask;
3009 unsigned long long tmpll;
3010 int ret;
3011 size_t iv_size_padding, additional_req_size;
3012 char dummy;
3013
3014 if (argc < 5) {
3015 ti->error = "Not enough arguments";
3016 return -EINVAL;
3017 }
3018
3019 key_size = get_key_size(&argv[1]);
3020 if (key_size < 0) {
3021 ti->error = "Cannot parse key size";
3022 return -EINVAL;
3023 }
3024
3025 cc = kzalloc(struct_size(cc, key, key_size), GFP_KERNEL);
3026 if (!cc) {
3027 ti->error = "Cannot allocate encryption context";
3028 return -ENOMEM;
3029 }
3030 cc->key_size = key_size;
3031 cc->sector_size = (1 << SECTOR_SHIFT);
3032 cc->sector_shift = 0;
3033
3034 ti->private = cc;
3035
3036 spin_lock(&dm_crypt_clients_lock);
3037 dm_crypt_clients_n++;
3038 crypt_calculate_pages_per_client();
3039 spin_unlock(&dm_crypt_clients_lock);
3040
3041 ret = percpu_counter_init(&cc->n_allocated_pages, 0, GFP_KERNEL);
3042 if (ret < 0)
3043 goto bad;
3044
3045
3046 if (argc > 5) {
3047 ret = crypt_ctr_optional(ti, argc - 5, &argv[5]);
3048 if (ret)
3049 goto bad;
3050 }
3051
3052 ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
3053 if (ret < 0)
3054 goto bad;
3055
3056 if (crypt_integrity_aead(cc)) {
3057 cc->dmreq_start = sizeof(struct aead_request);
3058 cc->dmreq_start += crypto_aead_reqsize(any_tfm_aead(cc));
3059 align_mask = crypto_aead_alignmask(any_tfm_aead(cc));
3060 } else {
3061 cc->dmreq_start = sizeof(struct skcipher_request);
3062 cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc));
3063 align_mask = crypto_skcipher_alignmask(any_tfm(cc));
3064 }
3065 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
3066
3067 if (align_mask < CRYPTO_MINALIGN) {
3068
3069 iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
3070 & align_mask;
3071 } else {
3072
3073
3074
3075
3076
3077 iv_size_padding = align_mask;
3078 }
3079
3080
3081 additional_req_size = sizeof(struct dm_crypt_request) +
3082 iv_size_padding + cc->iv_size +
3083 cc->iv_size +
3084 sizeof(uint64_t) +
3085 sizeof(unsigned int);
3086
3087 ret = mempool_init_kmalloc_pool(&cc->req_pool, MIN_IOS, cc->dmreq_start + additional_req_size);
3088 if (ret) {
3089 ti->error = "Cannot allocate crypt request mempool";
3090 goto bad;
3091 }
3092
3093 cc->per_bio_data_size = ti->per_io_data_size =
3094 ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size,
3095 ARCH_KMALLOC_MINALIGN);
3096
3097 ret = mempool_init(&cc->page_pool, BIO_MAX_PAGES, crypt_page_alloc, crypt_page_free, cc);
3098 if (ret) {
3099 ti->error = "Cannot allocate page mempool";
3100 goto bad;
3101 }
3102
3103 ret = bioset_init(&cc->bs, MIN_IOS, 0, BIOSET_NEED_BVECS);
3104 if (ret) {
3105 ti->error = "Cannot allocate crypt bioset";
3106 goto bad;
3107 }
3108
3109 mutex_init(&cc->bio_alloc_lock);
3110
3111 ret = -EINVAL;
3112 if ((sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) ||
3113 (tmpll & ((cc->sector_size >> SECTOR_SHIFT) - 1))) {
3114 ti->error = "Invalid iv_offset sector";
3115 goto bad;
3116 }
3117 cc->iv_offset = tmpll;
3118
3119 ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev);
3120 if (ret) {
3121 ti->error = "Device lookup failed";
3122 goto bad;
3123 }
3124
3125 ret = -EINVAL;
3126 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
3127 ti->error = "Invalid device sector";
3128 goto bad;
3129 }
3130 cc->start = tmpll;
3131
3132
3133
3134
3135
3136
3137 if (bdev_is_zoned(cc->dev->bdev)) {
3138 set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
3139 set_bit(DM_CRYPT_WRITE_INLINE, &cc->flags);
3140 }
3141
3142 if (crypt_integrity_aead(cc) || cc->integrity_iv_size) {
3143 ret = crypt_integrity_ctr(cc, ti);
3144 if (ret)
3145 goto bad;
3146
3147 cc->tag_pool_max_sectors = POOL_ENTRY_SIZE / cc->on_disk_tag_size;
3148 if (!cc->tag_pool_max_sectors)
3149 cc->tag_pool_max_sectors = 1;
3150
3151 ret = mempool_init_kmalloc_pool(&cc->tag_pool, MIN_IOS,
3152 cc->tag_pool_max_sectors * cc->on_disk_tag_size);
3153 if (ret) {
3154 ti->error = "Cannot allocate integrity tags mempool";
3155 goto bad;
3156 }
3157
3158 cc->tag_pool_max_sectors <<= cc->sector_shift;
3159 }
3160
3161 ret = -ENOMEM;
3162 cc->io_queue = alloc_workqueue("kcryptd_io/%s", WQ_MEM_RECLAIM, 1, devname);
3163 if (!cc->io_queue) {
3164 ti->error = "Couldn't create kcryptd io queue";
3165 goto bad;
3166 }
3167
3168 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
3169 cc->crypt_queue = alloc_workqueue("kcryptd/%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
3170 1, devname);
3171 else
3172 cc->crypt_queue = alloc_workqueue("kcryptd/%s",
3173 WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
3174 num_online_cpus(), devname);
3175 if (!cc->crypt_queue) {
3176 ti->error = "Couldn't create kcryptd queue";
3177 goto bad;
3178 }
3179
3180 spin_lock_init(&cc->write_thread_lock);
3181 cc->write_tree = RB_ROOT;
3182
3183 cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write/%s", devname);
3184 if (IS_ERR(cc->write_thread)) {
3185 ret = PTR_ERR(cc->write_thread);
3186 cc->write_thread = NULL;
3187 ti->error = "Couldn't spawn write thread";
3188 goto bad;
3189 }
3190 wake_up_process(cc->write_thread);
3191
3192 ti->num_flush_bios = 1;
3193
3194 return 0;
3195
3196bad:
3197 crypt_dtr(ti);
3198 return ret;
3199}
3200
3201static int crypt_map(struct dm_target *ti, struct bio *bio)
3202{
3203 struct dm_crypt_io *io;
3204 struct crypt_config *cc = ti->private;
3205
3206
3207
3208
3209
3210
3211 if (unlikely(bio->bi_opf & REQ_PREFLUSH ||
3212 bio_op(bio) == REQ_OP_DISCARD)) {
3213 bio_set_dev(bio, cc->dev->bdev);
3214 if (bio_sectors(bio))
3215 bio->bi_iter.bi_sector = cc->start +
3216 dm_target_offset(ti, bio->bi_iter.bi_sector);
3217 return DM_MAPIO_REMAPPED;
3218 }
3219
3220
3221
3222
3223 if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_PAGES << PAGE_SHIFT)) &&
3224 (bio_data_dir(bio) == WRITE || cc->on_disk_tag_size))
3225 dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT));
3226
3227
3228
3229
3230
3231 if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0))
3232 return DM_MAPIO_KILL;
3233
3234 if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1)))
3235 return DM_MAPIO_KILL;
3236
3237 io = dm_per_bio_data(bio, cc->per_bio_data_size);
3238 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
3239
3240 if (cc->on_disk_tag_size) {
3241 unsigned tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift);
3242
3243 if (unlikely(tag_len > KMALLOC_MAX_SIZE) ||
3244 unlikely(!(io->integrity_metadata = kmalloc(tag_len,
3245 GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
3246 if (bio_sectors(bio) > cc->tag_pool_max_sectors)
3247 dm_accept_partial_bio(bio, cc->tag_pool_max_sectors);
3248 io->integrity_metadata = mempool_alloc(&cc->tag_pool, GFP_NOIO);
3249 io->integrity_metadata_from_pool = true;
3250 }
3251 }
3252
3253 if (crypt_integrity_aead(cc))
3254 io->ctx.r.req_aead = (struct aead_request *)(io + 1);
3255 else
3256 io->ctx.r.req = (struct skcipher_request *)(io + 1);
3257
3258 if (bio_data_dir(io->base_bio) == READ) {
3259 if (kcryptd_io_read(io, GFP_NOWAIT))
3260 kcryptd_queue_read(io);
3261 } else
3262 kcryptd_queue_crypt(io);
3263
3264 return DM_MAPIO_SUBMITTED;
3265}
3266
3267static void crypt_status(struct dm_target *ti, status_type_t type,
3268 unsigned status_flags, char *result, unsigned maxlen)
3269{
3270 struct crypt_config *cc = ti->private;
3271 unsigned i, sz = 0;
3272 int num_feature_args = 0;
3273
3274 switch (type) {
3275 case STATUSTYPE_INFO:
3276 result[0] = '\0';
3277 break;
3278
3279 case STATUSTYPE_TABLE:
3280 DMEMIT("%s ", cc->cipher_string);
3281
3282 if (cc->key_size > 0) {
3283 if (cc->key_string)
3284 DMEMIT(":%u:%s", cc->key_size, cc->key_string);
3285 else
3286 for (i = 0; i < cc->key_size; i++)
3287 DMEMIT("%02x", cc->key[i]);
3288 } else
3289 DMEMIT("-");
3290
3291 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
3292 cc->dev->name, (unsigned long long)cc->start);
3293
3294 num_feature_args += !!ti->num_discard_bios;
3295 num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
3296 num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
3297 num_feature_args += test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags);
3298 num_feature_args += test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
3299 num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT);
3300 num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
3301 if (cc->on_disk_tag_size)
3302 num_feature_args++;
3303 if (num_feature_args) {
3304 DMEMIT(" %d", num_feature_args);
3305 if (ti->num_discard_bios)
3306 DMEMIT(" allow_discards");
3307 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
3308 DMEMIT(" same_cpu_crypt");
3309 if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
3310 DMEMIT(" submit_from_crypt_cpus");
3311 if (test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags))
3312 DMEMIT(" no_read_workqueue");
3313 if (test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))
3314 DMEMIT(" no_write_workqueue");
3315 if (cc->on_disk_tag_size)
3316 DMEMIT(" integrity:%u:%s", cc->on_disk_tag_size, cc->cipher_auth);
3317 if (cc->sector_size != (1 << SECTOR_SHIFT))
3318 DMEMIT(" sector_size:%d", cc->sector_size);
3319 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
3320 DMEMIT(" iv_large_sectors");
3321 }
3322
3323 break;
3324 }
3325}
3326
3327static void crypt_postsuspend(struct dm_target *ti)
3328{
3329 struct crypt_config *cc = ti->private;
3330
3331 set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
3332}
3333
3334static int crypt_preresume(struct dm_target *ti)
3335{
3336 struct crypt_config *cc = ti->private;
3337
3338 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
3339 DMERR("aborting resume - crypt key is not set.");
3340 return -EAGAIN;
3341 }
3342
3343 return 0;
3344}
3345
3346static void crypt_resume(struct dm_target *ti)
3347{
3348 struct crypt_config *cc = ti->private;
3349
3350 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
3351}
3352
3353
3354
3355
3356
3357static int crypt_message(struct dm_target *ti, unsigned argc, char **argv,
3358 char *result, unsigned maxlen)
3359{
3360 struct crypt_config *cc = ti->private;
3361 int key_size, ret = -EINVAL;
3362
3363 if (argc < 2)
3364 goto error;
3365
3366 if (!strcasecmp(argv[0], "key")) {
3367 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
3368 DMWARN("not suspended during key manipulation.");
3369 return -EINVAL;
3370 }
3371 if (argc == 3 && !strcasecmp(argv[1], "set")) {
3372
3373 key_size = get_key_size(&argv[2]);
3374 if (key_size < 0 || cc->key_size != key_size) {
3375 memset(argv[2], '0', strlen(argv[2]));
3376 return -EINVAL;
3377 }
3378
3379 ret = crypt_set_key(cc, argv[2]);
3380 if (ret)
3381 return ret;
3382 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
3383 ret = cc->iv_gen_ops->init(cc);
3384
3385 if (cc->key_string)
3386 memset(cc->key, 0, cc->key_size * sizeof(u8));
3387 return ret;
3388 }
3389 if (argc == 2 && !strcasecmp(argv[1], "wipe"))
3390 return crypt_wipe_key(cc);
3391 }
3392
3393error:
3394 DMWARN("unrecognised message received.");
3395 return -EINVAL;
3396}
3397
3398static int crypt_iterate_devices(struct dm_target *ti,
3399 iterate_devices_callout_fn fn, void *data)
3400{
3401 struct crypt_config *cc = ti->private;
3402
3403 return fn(ti, cc->dev, cc->start, ti->len, data);
3404}
3405
3406static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
3407{
3408 struct crypt_config *cc = ti->private;
3409
3410
3411
3412
3413
3414
3415
3416 limits->max_segment_size = PAGE_SIZE;
3417
3418 limits->logical_block_size =
3419 max_t(unsigned, limits->logical_block_size, cc->sector_size);
3420 limits->physical_block_size =
3421 max_t(unsigned, limits->physical_block_size, cc->sector_size);
3422 limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size);
3423}
3424
3425static struct target_type crypt_target = {
3426 .name = "crypt",
3427 .version = {1, 22, 0},
3428 .module = THIS_MODULE,
3429 .ctr = crypt_ctr,
3430 .dtr = crypt_dtr,
3431#ifdef CONFIG_BLK_DEV_ZONED
3432 .features = DM_TARGET_ZONED_HM,
3433 .report_zones = crypt_report_zones,
3434#endif
3435 .map = crypt_map,
3436 .status = crypt_status,
3437 .postsuspend = crypt_postsuspend,
3438 .preresume = crypt_preresume,
3439 .resume = crypt_resume,
3440 .message = crypt_message,
3441 .iterate_devices = crypt_iterate_devices,
3442 .io_hints = crypt_io_hints,
3443};
3444
3445static int __init dm_crypt_init(void)
3446{
3447 int r;
3448
3449 r = dm_register_target(&crypt_target);
3450 if (r < 0)
3451 DMERR("register failed %d", r);
3452
3453 return r;
3454}
3455
3456static void __exit dm_crypt_exit(void)
3457{
3458 dm_unregister_target(&crypt_target);
3459}
3460
3461module_init(dm_crypt_init);
3462module_exit(dm_crypt_exit);
3463
3464MODULE_AUTHOR("Jana Saout <jana@saout.de>");
3465MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
3466MODULE_LICENSE("GPL");
3467