1
2
3
4
5
6
7
8
9
10#include <linux/completion.h>
11#include <linux/err.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/key.h>
16#include <linux/bio.h>
17#include <linux/blkdev.h>
18#include <linux/mempool.h>
19#include <linux/slab.h>
20#include <linux/crypto.h>
21#include <linux/workqueue.h>
22#include <linux/kthread.h>
23#include <linux/backing-dev.h>
24#include <linux/atomic.h>
25#include <linux/scatterlist.h>
26#include <linux/rbtree.h>
27#include <linux/ctype.h>
28#include <asm/page.h>
29#include <asm/unaligned.h>
30#include <crypto/hash.h>
31#include <crypto/md5.h>
32#include <crypto/algapi.h>
33#include <crypto/skcipher.h>
34#include <crypto/aead.h>
35#include <crypto/authenc.h>
36#include <linux/rtnetlink.h>
37#include <linux/key-type.h>
38#include <keys/user-type.h>
39#include <keys/encrypted-type.h>
40#include <keys/trusted-type.h>
41
42#include <linux/device-mapper.h>
43
44#define DM_MSG_PREFIX "crypt"
45
46
47
48
49struct convert_context {
50 struct completion restart;
51 struct bio *bio_in;
52 struct bio *bio_out;
53 struct bvec_iter iter_in;
54 struct bvec_iter iter_out;
55 u64 cc_sector;
56 atomic_t cc_pending;
57 union {
58 struct skcipher_request *req;
59 struct aead_request *req_aead;
60 } r;
61
62};
63
64
65
66
67struct dm_crypt_io {
68 struct crypt_config *cc;
69 struct bio *base_bio;
70 u8 *integrity_metadata;
71 bool integrity_metadata_from_pool;
72 struct work_struct work;
73 struct tasklet_struct tasklet;
74
75 struct convert_context ctx;
76
77 atomic_t io_pending;
78 blk_status_t error;
79 sector_t sector;
80
81 struct rb_node rb_node;
82} CRYPTO_MINALIGN_ATTR;
83
84struct dm_crypt_request {
85 struct convert_context *ctx;
86 struct scatterlist sg_in[4];
87 struct scatterlist sg_out[4];
88 u64 iv_sector;
89};
90
91struct crypt_config;
92
93struct crypt_iv_operations {
94 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
95 const char *opts);
96 void (*dtr)(struct crypt_config *cc);
97 int (*init)(struct crypt_config *cc);
98 int (*wipe)(struct crypt_config *cc);
99 int (*generator)(struct crypt_config *cc, u8 *iv,
100 struct dm_crypt_request *dmreq);
101 int (*post)(struct crypt_config *cc, u8 *iv,
102 struct dm_crypt_request *dmreq);
103};
104
105struct iv_benbi_private {
106 int shift;
107};
108
109#define LMK_SEED_SIZE 64
110struct iv_lmk_private {
111 struct crypto_shash *hash_tfm;
112 u8 *seed;
113};
114
115#define TCW_WHITENING_SIZE 16
116struct iv_tcw_private {
117 struct crypto_shash *crc32_tfm;
118 u8 *iv_seed;
119 u8 *whitening;
120};
121
122#define ELEPHANT_MAX_KEY_SIZE 32
123struct iv_elephant_private {
124 struct crypto_skcipher *tfm;
125};
126
127
128
129
130
131enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
132 DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
133 DM_CRYPT_NO_READ_WORKQUEUE, DM_CRYPT_NO_WRITE_WORKQUEUE,
134 DM_CRYPT_WRITE_INLINE };
135
136enum cipher_flags {
137 CRYPT_MODE_INTEGRITY_AEAD,
138 CRYPT_IV_LARGE_SECTORS,
139 CRYPT_ENCRYPT_PREPROCESS,
140};
141
142
143
144
145struct crypt_config {
146 struct dm_dev *dev;
147 sector_t start;
148
149 struct percpu_counter n_allocated_pages;
150
151 struct workqueue_struct *io_queue;
152 struct workqueue_struct *crypt_queue;
153
154 spinlock_t write_thread_lock;
155 struct task_struct *write_thread;
156 struct rb_root write_tree;
157
158 char *cipher_string;
159 char *cipher_auth;
160 char *key_string;
161
162 const struct crypt_iv_operations *iv_gen_ops;
163 union {
164 struct iv_benbi_private benbi;
165 struct iv_lmk_private lmk;
166 struct iv_tcw_private tcw;
167 struct iv_elephant_private elephant;
168 } iv_gen_private;
169 u64 iv_offset;
170 unsigned int iv_size;
171 unsigned short int sector_size;
172 unsigned char sector_shift;
173
174 union {
175 struct crypto_skcipher **tfms;
176 struct crypto_aead **tfms_aead;
177 } cipher_tfm;
178 unsigned tfms_count;
179 unsigned long cipher_flags;
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194 unsigned int dmreq_start;
195
196 unsigned int per_bio_data_size;
197
198 unsigned long flags;
199 unsigned int key_size;
200 unsigned int key_parts;
201 unsigned int key_extra_size;
202 unsigned int key_mac_size;
203
204 unsigned int integrity_tag_size;
205 unsigned int integrity_iv_size;
206 unsigned int on_disk_tag_size;
207
208
209
210
211
212 unsigned tag_pool_max_sectors;
213 mempool_t tag_pool;
214 mempool_t req_pool;
215 mempool_t page_pool;
216
217 struct bio_set bs;
218 struct mutex bio_alloc_lock;
219
220 u8 *authenc_key;
221 u8 key[];
222};
223
224#define MIN_IOS 64
225#define MAX_TAG_SIZE 480
226#define POOL_ENTRY_SIZE 512
227
228static DEFINE_SPINLOCK(dm_crypt_clients_lock);
229static unsigned dm_crypt_clients_n = 0;
230static volatile unsigned long dm_crypt_pages_per_client;
231#define DM_CRYPT_MEMORY_PERCENT 2
232#define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_VECS * 16)
233
234static void clone_init(struct dm_crypt_io *, struct bio *);
235static void kcryptd_queue_crypt(struct dm_crypt_io *io);
236static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
237 struct scatterlist *sg);
238
239static bool crypt_integrity_aead(struct crypt_config *cc);
240
241
242
243
244static struct crypto_skcipher *any_tfm(struct crypt_config *cc)
245{
246 return cc->cipher_tfm.tfms[0];
247}
248
249static struct crypto_aead *any_tfm_aead(struct crypt_config *cc)
250{
251 return cc->cipher_tfm.tfms_aead[0];
252}
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
311 struct dm_crypt_request *dmreq)
312{
313 memset(iv, 0, cc->iv_size);
314 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
315
316 return 0;
317}
318
319static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
320 struct dm_crypt_request *dmreq)
321{
322 memset(iv, 0, cc->iv_size);
323 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
324
325 return 0;
326}
327
328static int crypt_iv_plain64be_gen(struct crypt_config *cc, u8 *iv,
329 struct dm_crypt_request *dmreq)
330{
331 memset(iv, 0, cc->iv_size);
332
333 *(__be64 *)&iv[cc->iv_size - sizeof(u64)] = cpu_to_be64(dmreq->iv_sector);
334
335 return 0;
336}
337
338static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
339 struct dm_crypt_request *dmreq)
340{
341
342
343
344
345 memset(iv, 0, cc->iv_size);
346 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
347
348 return 0;
349}
350
351static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
352 const char *opts)
353{
354 unsigned bs;
355 int log;
356
357 if (crypt_integrity_aead(cc))
358 bs = crypto_aead_blocksize(any_tfm_aead(cc));
359 else
360 bs = crypto_skcipher_blocksize(any_tfm(cc));
361 log = ilog2(bs);
362
363
364
365
366 if (1 << log != bs) {
367 ti->error = "cypher blocksize is not a power of 2";
368 return -EINVAL;
369 }
370
371 if (log > 9) {
372 ti->error = "cypher blocksize is > 512";
373 return -EINVAL;
374 }
375
376 cc->iv_gen_private.benbi.shift = 9 - log;
377
378 return 0;
379}
380
381static void crypt_iv_benbi_dtr(struct crypt_config *cc)
382{
383}
384
385static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
386 struct dm_crypt_request *dmreq)
387{
388 __be64 val;
389
390 memset(iv, 0, cc->iv_size - sizeof(u64));
391
392 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
393 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
394
395 return 0;
396}
397
398static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
399 struct dm_crypt_request *dmreq)
400{
401 memset(iv, 0, cc->iv_size);
402
403 return 0;
404}
405
406static void crypt_iv_lmk_dtr(struct crypt_config *cc)
407{
408 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
409
410 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
411 crypto_free_shash(lmk->hash_tfm);
412 lmk->hash_tfm = NULL;
413
414 kfree_sensitive(lmk->seed);
415 lmk->seed = NULL;
416}
417
418static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
419 const char *opts)
420{
421 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
422
423 if (cc->sector_size != (1 << SECTOR_SHIFT)) {
424 ti->error = "Unsupported sector size for LMK";
425 return -EINVAL;
426 }
427
428 lmk->hash_tfm = crypto_alloc_shash("md5", 0,
429 CRYPTO_ALG_ALLOCATES_MEMORY);
430 if (IS_ERR(lmk->hash_tfm)) {
431 ti->error = "Error initializing LMK hash";
432 return PTR_ERR(lmk->hash_tfm);
433 }
434
435
436 if (cc->key_parts == cc->tfms_count) {
437 lmk->seed = NULL;
438 return 0;
439 }
440
441 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
442 if (!lmk->seed) {
443 crypt_iv_lmk_dtr(cc);
444 ti->error = "Error kmallocing seed storage in LMK";
445 return -ENOMEM;
446 }
447
448 return 0;
449}
450
451static int crypt_iv_lmk_init(struct crypt_config *cc)
452{
453 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
454 int subkey_size = cc->key_size / cc->key_parts;
455
456
457 if (lmk->seed)
458 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
459 crypto_shash_digestsize(lmk->hash_tfm));
460
461 return 0;
462}
463
464static int crypt_iv_lmk_wipe(struct crypt_config *cc)
465{
466 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
467
468 if (lmk->seed)
469 memset(lmk->seed, 0, LMK_SEED_SIZE);
470
471 return 0;
472}
473
474static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
475 struct dm_crypt_request *dmreq,
476 u8 *data)
477{
478 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
479 SHASH_DESC_ON_STACK(desc, lmk->hash_tfm);
480 struct md5_state md5state;
481 __le32 buf[4];
482 int i, r;
483
484 desc->tfm = lmk->hash_tfm;
485
486 r = crypto_shash_init(desc);
487 if (r)
488 return r;
489
490 if (lmk->seed) {
491 r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE);
492 if (r)
493 return r;
494 }
495
496
497 r = crypto_shash_update(desc, data + 16, 16 * 31);
498 if (r)
499 return r;
500
501
502 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
503 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
504 buf[2] = cpu_to_le32(4024);
505 buf[3] = 0;
506 r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf));
507 if (r)
508 return r;
509
510
511 r = crypto_shash_export(desc, &md5state);
512 if (r)
513 return r;
514
515 for (i = 0; i < MD5_HASH_WORDS; i++)
516 __cpu_to_le32s(&md5state.hash[i]);
517 memcpy(iv, &md5state.hash, cc->iv_size);
518
519 return 0;
520}
521
522static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
523 struct dm_crypt_request *dmreq)
524{
525 struct scatterlist *sg;
526 u8 *src;
527 int r = 0;
528
529 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
530 sg = crypt_get_sg_data(cc, dmreq->sg_in);
531 src = kmap_atomic(sg_page(sg));
532 r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset);
533 kunmap_atomic(src);
534 } else
535 memset(iv, 0, cc->iv_size);
536
537 return r;
538}
539
540static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
541 struct dm_crypt_request *dmreq)
542{
543 struct scatterlist *sg;
544 u8 *dst;
545 int r;
546
547 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
548 return 0;
549
550 sg = crypt_get_sg_data(cc, dmreq->sg_out);
551 dst = kmap_atomic(sg_page(sg));
552 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset);
553
554
555 if (!r)
556 crypto_xor(dst + sg->offset, iv, cc->iv_size);
557
558 kunmap_atomic(dst);
559 return r;
560}
561
562static void crypt_iv_tcw_dtr(struct crypt_config *cc)
563{
564 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
565
566 kfree_sensitive(tcw->iv_seed);
567 tcw->iv_seed = NULL;
568 kfree_sensitive(tcw->whitening);
569 tcw->whitening = NULL;
570
571 if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
572 crypto_free_shash(tcw->crc32_tfm);
573 tcw->crc32_tfm = NULL;
574}
575
576static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
577 const char *opts)
578{
579 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
580
581 if (cc->sector_size != (1 << SECTOR_SHIFT)) {
582 ti->error = "Unsupported sector size for TCW";
583 return -EINVAL;
584 }
585
586 if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
587 ti->error = "Wrong key size for TCW";
588 return -EINVAL;
589 }
590
591 tcw->crc32_tfm = crypto_alloc_shash("crc32", 0,
592 CRYPTO_ALG_ALLOCATES_MEMORY);
593 if (IS_ERR(tcw->crc32_tfm)) {
594 ti->error = "Error initializing CRC32 in TCW";
595 return PTR_ERR(tcw->crc32_tfm);
596 }
597
598 tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
599 tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
600 if (!tcw->iv_seed || !tcw->whitening) {
601 crypt_iv_tcw_dtr(cc);
602 ti->error = "Error allocating seed storage in TCW";
603 return -ENOMEM;
604 }
605
606 return 0;
607}
608
609static int crypt_iv_tcw_init(struct crypt_config *cc)
610{
611 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
612 int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
613
614 memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
615 memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
616 TCW_WHITENING_SIZE);
617
618 return 0;
619}
620
621static int crypt_iv_tcw_wipe(struct crypt_config *cc)
622{
623 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
624
625 memset(tcw->iv_seed, 0, cc->iv_size);
626 memset(tcw->whitening, 0, TCW_WHITENING_SIZE);
627
628 return 0;
629}
630
631static int crypt_iv_tcw_whitening(struct crypt_config *cc,
632 struct dm_crypt_request *dmreq,
633 u8 *data)
634{
635 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
636 __le64 sector = cpu_to_le64(dmreq->iv_sector);
637 u8 buf[TCW_WHITENING_SIZE];
638 SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm);
639 int i, r;
640
641
642 crypto_xor_cpy(buf, tcw->whitening, (u8 *)§or, 8);
643 crypto_xor_cpy(&buf[8], tcw->whitening + 8, (u8 *)§or, 8);
644
645
646 desc->tfm = tcw->crc32_tfm;
647 for (i = 0; i < 4; i++) {
648 r = crypto_shash_init(desc);
649 if (r)
650 goto out;
651 r = crypto_shash_update(desc, &buf[i * 4], 4);
652 if (r)
653 goto out;
654 r = crypto_shash_final(desc, &buf[i * 4]);
655 if (r)
656 goto out;
657 }
658 crypto_xor(&buf[0], &buf[12], 4);
659 crypto_xor(&buf[4], &buf[8], 4);
660
661
662 for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
663 crypto_xor(data + i * 8, buf, 8);
664out:
665 memzero_explicit(buf, sizeof(buf));
666 return r;
667}
668
669static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
670 struct dm_crypt_request *dmreq)
671{
672 struct scatterlist *sg;
673 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
674 __le64 sector = cpu_to_le64(dmreq->iv_sector);
675 u8 *src;
676 int r = 0;
677
678
679 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
680 sg = crypt_get_sg_data(cc, dmreq->sg_in);
681 src = kmap_atomic(sg_page(sg));
682 r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset);
683 kunmap_atomic(src);
684 }
685
686
687 crypto_xor_cpy(iv, tcw->iv_seed, (u8 *)§or, 8);
688 if (cc->iv_size > 8)
689 crypto_xor_cpy(&iv[8], tcw->iv_seed + 8, (u8 *)§or,
690 cc->iv_size - 8);
691
692 return r;
693}
694
695static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
696 struct dm_crypt_request *dmreq)
697{
698 struct scatterlist *sg;
699 u8 *dst;
700 int r;
701
702 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
703 return 0;
704
705
706 sg = crypt_get_sg_data(cc, dmreq->sg_out);
707 dst = kmap_atomic(sg_page(sg));
708 r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset);
709 kunmap_atomic(dst);
710
711 return r;
712}
713
714static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv,
715 struct dm_crypt_request *dmreq)
716{
717
718 get_random_bytes(iv, cc->iv_size);
719 return 0;
720}
721
722static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti,
723 const char *opts)
724{
725 if (crypt_integrity_aead(cc)) {
726 ti->error = "AEAD transforms not supported for EBOIV";
727 return -EINVAL;
728 }
729
730 if (crypto_skcipher_blocksize(any_tfm(cc)) != cc->iv_size) {
731 ti->error = "Block size of EBOIV cipher does "
732 "not match IV size of block cipher";
733 return -EINVAL;
734 }
735
736 return 0;
737}
738
739static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
740 struct dm_crypt_request *dmreq)
741{
742 u8 buf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(__le64));
743 struct skcipher_request *req;
744 struct scatterlist src, dst;
745 DECLARE_CRYPTO_WAIT(wait);
746 int err;
747
748 req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO);
749 if (!req)
750 return -ENOMEM;
751
752 memset(buf, 0, cc->iv_size);
753 *(__le64 *)buf = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
754
755 sg_init_one(&src, page_address(ZERO_PAGE(0)), cc->iv_size);
756 sg_init_one(&dst, iv, cc->iv_size);
757 skcipher_request_set_crypt(req, &src, &dst, cc->iv_size, buf);
758 skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
759 err = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
760 skcipher_request_free(req);
761
762 return err;
763}
764
765static void crypt_iv_elephant_dtr(struct crypt_config *cc)
766{
767 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
768
769 crypto_free_skcipher(elephant->tfm);
770 elephant->tfm = NULL;
771}
772
773static int crypt_iv_elephant_ctr(struct crypt_config *cc, struct dm_target *ti,
774 const char *opts)
775{
776 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
777 int r;
778
779 elephant->tfm = crypto_alloc_skcipher("ecb(aes)", 0,
780 CRYPTO_ALG_ALLOCATES_MEMORY);
781 if (IS_ERR(elephant->tfm)) {
782 r = PTR_ERR(elephant->tfm);
783 elephant->tfm = NULL;
784 return r;
785 }
786
787 r = crypt_iv_eboiv_ctr(cc, ti, NULL);
788 if (r)
789 crypt_iv_elephant_dtr(cc);
790 return r;
791}
792
793static void diffuser_disk_to_cpu(u32 *d, size_t n)
794{
795#ifndef __LITTLE_ENDIAN
796 int i;
797
798 for (i = 0; i < n; i++)
799 d[i] = le32_to_cpu((__le32)d[i]);
800#endif
801}
802
803static void diffuser_cpu_to_disk(__le32 *d, size_t n)
804{
805#ifndef __LITTLE_ENDIAN
806 int i;
807
808 for (i = 0; i < n; i++)
809 d[i] = cpu_to_le32((u32)d[i]);
810#endif
811}
812
813static void diffuser_a_decrypt(u32 *d, size_t n)
814{
815 int i, i1, i2, i3;
816
817 for (i = 0; i < 5; i++) {
818 i1 = 0;
819 i2 = n - 2;
820 i3 = n - 5;
821
822 while (i1 < (n - 1)) {
823 d[i1] += d[i2] ^ (d[i3] << 9 | d[i3] >> 23);
824 i1++; i2++; i3++;
825
826 if (i3 >= n)
827 i3 -= n;
828
829 d[i1] += d[i2] ^ d[i3];
830 i1++; i2++; i3++;
831
832 if (i2 >= n)
833 i2 -= n;
834
835 d[i1] += d[i2] ^ (d[i3] << 13 | d[i3] >> 19);
836 i1++; i2++; i3++;
837
838 d[i1] += d[i2] ^ d[i3];
839 i1++; i2++; i3++;
840 }
841 }
842}
843
844static void diffuser_a_encrypt(u32 *d, size_t n)
845{
846 int i, i1, i2, i3;
847
848 for (i = 0; i < 5; i++) {
849 i1 = n - 1;
850 i2 = n - 2 - 1;
851 i3 = n - 5 - 1;
852
853 while (i1 > 0) {
854 d[i1] -= d[i2] ^ d[i3];
855 i1--; i2--; i3--;
856
857 d[i1] -= d[i2] ^ (d[i3] << 13 | d[i3] >> 19);
858 i1--; i2--; i3--;
859
860 if (i2 < 0)
861 i2 += n;
862
863 d[i1] -= d[i2] ^ d[i3];
864 i1--; i2--; i3--;
865
866 if (i3 < 0)
867 i3 += n;
868
869 d[i1] -= d[i2] ^ (d[i3] << 9 | d[i3] >> 23);
870 i1--; i2--; i3--;
871 }
872 }
873}
874
875static void diffuser_b_decrypt(u32 *d, size_t n)
876{
877 int i, i1, i2, i3;
878
879 for (i = 0; i < 3; i++) {
880 i1 = 0;
881 i2 = 2;
882 i3 = 5;
883
884 while (i1 < (n - 1)) {
885 d[i1] += d[i2] ^ d[i3];
886 i1++; i2++; i3++;
887
888 d[i1] += d[i2] ^ (d[i3] << 10 | d[i3] >> 22);
889 i1++; i2++; i3++;
890
891 if (i2 >= n)
892 i2 -= n;
893
894 d[i1] += d[i2] ^ d[i3];
895 i1++; i2++; i3++;
896
897 if (i3 >= n)
898 i3 -= n;
899
900 d[i1] += d[i2] ^ (d[i3] << 25 | d[i3] >> 7);
901 i1++; i2++; i3++;
902 }
903 }
904}
905
906static void diffuser_b_encrypt(u32 *d, size_t n)
907{
908 int i, i1, i2, i3;
909
910 for (i = 0; i < 3; i++) {
911 i1 = n - 1;
912 i2 = 2 - 1;
913 i3 = 5 - 1;
914
915 while (i1 > 0) {
916 d[i1] -= d[i2] ^ (d[i3] << 25 | d[i3] >> 7);
917 i1--; i2--; i3--;
918
919 if (i3 < 0)
920 i3 += n;
921
922 d[i1] -= d[i2] ^ d[i3];
923 i1--; i2--; i3--;
924
925 if (i2 < 0)
926 i2 += n;
927
928 d[i1] -= d[i2] ^ (d[i3] << 10 | d[i3] >> 22);
929 i1--; i2--; i3--;
930
931 d[i1] -= d[i2] ^ d[i3];
932 i1--; i2--; i3--;
933 }
934 }
935}
936
937static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *dmreq)
938{
939 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
940 u8 *es, *ks, *data, *data2, *data_offset;
941 struct skcipher_request *req;
942 struct scatterlist *sg, *sg2, src, dst;
943 DECLARE_CRYPTO_WAIT(wait);
944 int i, r;
945
946 req = skcipher_request_alloc(elephant->tfm, GFP_NOIO);
947 es = kzalloc(16, GFP_NOIO);
948 ks = kzalloc(32, GFP_NOIO);
949
950 if (!req || !es || !ks) {
951 r = -ENOMEM;
952 goto out;
953 }
954
955 *(__le64 *)es = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
956
957
958 sg_init_one(&src, es, 16);
959 sg_init_one(&dst, ks, 16);
960 skcipher_request_set_crypt(req, &src, &dst, 16, NULL);
961 skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
962 r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
963 if (r)
964 goto out;
965
966
967 es[15] = 0x80;
968 sg_init_one(&dst, &ks[16], 16);
969 r = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
970 if (r)
971 goto out;
972
973 sg = crypt_get_sg_data(cc, dmreq->sg_out);
974 data = kmap_atomic(sg_page(sg));
975 data_offset = data + sg->offset;
976
977
978 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
979 sg2 = crypt_get_sg_data(cc, dmreq->sg_in);
980 data2 = kmap_atomic(sg_page(sg2));
981 memcpy(data_offset, data2 + sg2->offset, cc->sector_size);
982 kunmap_atomic(data2);
983 }
984
985 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
986 diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32));
987 diffuser_b_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
988 diffuser_a_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
989 diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32));
990 }
991
992 for (i = 0; i < (cc->sector_size / 32); i++)
993 crypto_xor(data_offset + i * 32, ks, 32);
994
995 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
996 diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32));
997 diffuser_a_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
998 diffuser_b_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
999 diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32));
1000 }
1001
1002 kunmap_atomic(data);
1003out:
1004 kfree_sensitive(ks);
1005 kfree_sensitive(es);
1006 skcipher_request_free(req);
1007 return r;
1008}
1009
1010static int crypt_iv_elephant_gen(struct crypt_config *cc, u8 *iv,
1011 struct dm_crypt_request *dmreq)
1012{
1013 int r;
1014
1015 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
1016 r = crypt_iv_elephant(cc, dmreq);
1017 if (r)
1018 return r;
1019 }
1020
1021 return crypt_iv_eboiv_gen(cc, iv, dmreq);
1022}
1023
1024static int crypt_iv_elephant_post(struct crypt_config *cc, u8 *iv,
1025 struct dm_crypt_request *dmreq)
1026{
1027 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
1028 return crypt_iv_elephant(cc, dmreq);
1029
1030 return 0;
1031}
1032
1033static int crypt_iv_elephant_init(struct crypt_config *cc)
1034{
1035 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
1036 int key_offset = cc->key_size - cc->key_extra_size;
1037
1038 return crypto_skcipher_setkey(elephant->tfm, &cc->key[key_offset], cc->key_extra_size);
1039}
1040
1041static int crypt_iv_elephant_wipe(struct crypt_config *cc)
1042{
1043 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
1044 u8 key[ELEPHANT_MAX_KEY_SIZE];
1045
1046 memset(key, 0, cc->key_extra_size);
1047 return crypto_skcipher_setkey(elephant->tfm, key, cc->key_extra_size);
1048}
1049
1050static const struct crypt_iv_operations crypt_iv_plain_ops = {
1051 .generator = crypt_iv_plain_gen
1052};
1053
1054static const struct crypt_iv_operations crypt_iv_plain64_ops = {
1055 .generator = crypt_iv_plain64_gen
1056};
1057
1058static const struct crypt_iv_operations crypt_iv_plain64be_ops = {
1059 .generator = crypt_iv_plain64be_gen
1060};
1061
1062static const struct crypt_iv_operations crypt_iv_essiv_ops = {
1063 .generator = crypt_iv_essiv_gen
1064};
1065
1066static const struct crypt_iv_operations crypt_iv_benbi_ops = {
1067 .ctr = crypt_iv_benbi_ctr,
1068 .dtr = crypt_iv_benbi_dtr,
1069 .generator = crypt_iv_benbi_gen
1070};
1071
1072static const struct crypt_iv_operations crypt_iv_null_ops = {
1073 .generator = crypt_iv_null_gen
1074};
1075
1076static const struct crypt_iv_operations crypt_iv_lmk_ops = {
1077 .ctr = crypt_iv_lmk_ctr,
1078 .dtr = crypt_iv_lmk_dtr,
1079 .init = crypt_iv_lmk_init,
1080 .wipe = crypt_iv_lmk_wipe,
1081 .generator = crypt_iv_lmk_gen,
1082 .post = crypt_iv_lmk_post
1083};
1084
1085static const struct crypt_iv_operations crypt_iv_tcw_ops = {
1086 .ctr = crypt_iv_tcw_ctr,
1087 .dtr = crypt_iv_tcw_dtr,
1088 .init = crypt_iv_tcw_init,
1089 .wipe = crypt_iv_tcw_wipe,
1090 .generator = crypt_iv_tcw_gen,
1091 .post = crypt_iv_tcw_post
1092};
1093
1094static const struct crypt_iv_operations crypt_iv_random_ops = {
1095 .generator = crypt_iv_random_gen
1096};
1097
1098static const struct crypt_iv_operations crypt_iv_eboiv_ops = {
1099 .ctr = crypt_iv_eboiv_ctr,
1100 .generator = crypt_iv_eboiv_gen
1101};
1102
1103static const struct crypt_iv_operations crypt_iv_elephant_ops = {
1104 .ctr = crypt_iv_elephant_ctr,
1105 .dtr = crypt_iv_elephant_dtr,
1106 .init = crypt_iv_elephant_init,
1107 .wipe = crypt_iv_elephant_wipe,
1108 .generator = crypt_iv_elephant_gen,
1109 .post = crypt_iv_elephant_post
1110};
1111
1112
1113
1114
1115static bool crypt_integrity_aead(struct crypt_config *cc)
1116{
1117 return test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
1118}
1119
1120static bool crypt_integrity_hmac(struct crypt_config *cc)
1121{
1122 return crypt_integrity_aead(cc) && cc->key_mac_size;
1123}
1124
1125
1126static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
1127 struct scatterlist *sg)
1128{
1129 if (unlikely(crypt_integrity_aead(cc)))
1130 return &sg[2];
1131
1132 return sg;
1133}
1134
1135static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
1136{
1137 struct bio_integrity_payload *bip;
1138 unsigned int tag_len;
1139 int ret;
1140
1141 if (!bio_sectors(bio) || !io->cc->on_disk_tag_size)
1142 return 0;
1143
1144 bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
1145 if (IS_ERR(bip))
1146 return PTR_ERR(bip);
1147
1148 tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift);
1149
1150 bip->bip_iter.bi_size = tag_len;
1151 bip->bip_iter.bi_sector = io->cc->start + io->sector;
1152
1153 ret = bio_integrity_add_page(bio, virt_to_page(io->integrity_metadata),
1154 tag_len, offset_in_page(io->integrity_metadata));
1155 if (unlikely(ret != tag_len))
1156 return -ENOMEM;
1157
1158 return 0;
1159}
1160
1161static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
1162{
1163#ifdef CONFIG_BLK_DEV_INTEGRITY
1164 struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk);
1165 struct mapped_device *md = dm_table_get_md(ti->table);
1166
1167
1168 if (!bi || strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) {
1169 ti->error = "Integrity profile not supported.";
1170 return -EINVAL;
1171 }
1172
1173 if (bi->tag_size != cc->on_disk_tag_size ||
1174 bi->tuple_size != cc->on_disk_tag_size) {
1175 ti->error = "Integrity profile tag size mismatch.";
1176 return -EINVAL;
1177 }
1178 if (1 << bi->interval_exp != cc->sector_size) {
1179 ti->error = "Integrity profile sector size mismatch.";
1180 return -EINVAL;
1181 }
1182
1183 if (crypt_integrity_aead(cc)) {
1184 cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size;
1185 DMDEBUG("%s: Integrity AEAD, tag size %u, IV size %u.", dm_device_name(md),
1186 cc->integrity_tag_size, cc->integrity_iv_size);
1187
1188 if (crypto_aead_setauthsize(any_tfm_aead(cc), cc->integrity_tag_size)) {
1189 ti->error = "Integrity AEAD auth tag size is not supported.";
1190 return -EINVAL;
1191 }
1192 } else if (cc->integrity_iv_size)
1193 DMDEBUG("%s: Additional per-sector space %u bytes for IV.", dm_device_name(md),
1194 cc->integrity_iv_size);
1195
1196 if ((cc->integrity_tag_size + cc->integrity_iv_size) != bi->tag_size) {
1197 ti->error = "Not enough space for integrity tag in the profile.";
1198 return -EINVAL;
1199 }
1200
1201 return 0;
1202#else
1203 ti->error = "Integrity profile not supported.";
1204 return -EINVAL;
1205#endif
1206}
1207
1208static void crypt_convert_init(struct crypt_config *cc,
1209 struct convert_context *ctx,
1210 struct bio *bio_out, struct bio *bio_in,
1211 sector_t sector)
1212{
1213 ctx->bio_in = bio_in;
1214 ctx->bio_out = bio_out;
1215 if (bio_in)
1216 ctx->iter_in = bio_in->bi_iter;
1217 if (bio_out)
1218 ctx->iter_out = bio_out->bi_iter;
1219 ctx->cc_sector = sector + cc->iv_offset;
1220 init_completion(&ctx->restart);
1221}
1222
1223static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
1224 void *req)
1225{
1226 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
1227}
1228
1229static void *req_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq)
1230{
1231 return (void *)((char *)dmreq - cc->dmreq_start);
1232}
1233
1234static u8 *iv_of_dmreq(struct crypt_config *cc,
1235 struct dm_crypt_request *dmreq)
1236{
1237 if (crypt_integrity_aead(cc))
1238 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
1239 crypto_aead_alignmask(any_tfm_aead(cc)) + 1);
1240 else
1241 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
1242 crypto_skcipher_alignmask(any_tfm(cc)) + 1);
1243}
1244
1245static u8 *org_iv_of_dmreq(struct crypt_config *cc,
1246 struct dm_crypt_request *dmreq)
1247{
1248 return iv_of_dmreq(cc, dmreq) + cc->iv_size;
1249}
1250
1251static __le64 *org_sector_of_dmreq(struct crypt_config *cc,
1252 struct dm_crypt_request *dmreq)
1253{
1254 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size;
1255 return (__le64 *) ptr;
1256}
1257
1258static unsigned int *org_tag_of_dmreq(struct crypt_config *cc,
1259 struct dm_crypt_request *dmreq)
1260{
1261 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size +
1262 cc->iv_size + sizeof(uint64_t);
1263 return (unsigned int*)ptr;
1264}
1265
1266static void *tag_from_dmreq(struct crypt_config *cc,
1267 struct dm_crypt_request *dmreq)
1268{
1269 struct convert_context *ctx = dmreq->ctx;
1270 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
1271
1272 return &io->integrity_metadata[*org_tag_of_dmreq(cc, dmreq) *
1273 cc->on_disk_tag_size];
1274}
1275
1276static void *iv_tag_from_dmreq(struct crypt_config *cc,
1277 struct dm_crypt_request *dmreq)
1278{
1279 return tag_from_dmreq(cc, dmreq) + cc->integrity_tag_size;
1280}
1281
1282static int crypt_convert_block_aead(struct crypt_config *cc,
1283 struct convert_context *ctx,
1284 struct aead_request *req,
1285 unsigned int tag_offset)
1286{
1287 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
1288 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
1289 struct dm_crypt_request *dmreq;
1290 u8 *iv, *org_iv, *tag_iv, *tag;
1291 __le64 *sector;
1292 int r = 0;
1293
1294 BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size);
1295
1296
1297 if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
1298 return -EIO;
1299
1300 dmreq = dmreq_of_req(cc, req);
1301 dmreq->iv_sector = ctx->cc_sector;
1302 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
1303 dmreq->iv_sector >>= cc->sector_shift;
1304 dmreq->ctx = ctx;
1305
1306 *org_tag_of_dmreq(cc, dmreq) = tag_offset;
1307
1308 sector = org_sector_of_dmreq(cc, dmreq);
1309 *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
1310
1311 iv = iv_of_dmreq(cc, dmreq);
1312 org_iv = org_iv_of_dmreq(cc, dmreq);
1313 tag = tag_from_dmreq(cc, dmreq);
1314 tag_iv = iv_tag_from_dmreq(cc, dmreq);
1315
1316
1317
1318
1319
1320
1321 sg_init_table(dmreq->sg_in, 4);
1322 sg_set_buf(&dmreq->sg_in[0], sector, sizeof(uint64_t));
1323 sg_set_buf(&dmreq->sg_in[1], org_iv, cc->iv_size);
1324 sg_set_page(&dmreq->sg_in[2], bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
1325 sg_set_buf(&dmreq->sg_in[3], tag, cc->integrity_tag_size);
1326
1327 sg_init_table(dmreq->sg_out, 4);
1328 sg_set_buf(&dmreq->sg_out[0], sector, sizeof(uint64_t));
1329 sg_set_buf(&dmreq->sg_out[1], org_iv, cc->iv_size);
1330 sg_set_page(&dmreq->sg_out[2], bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
1331 sg_set_buf(&dmreq->sg_out[3], tag, cc->integrity_tag_size);
1332
1333 if (cc->iv_gen_ops) {
1334
1335 if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) {
1336 memcpy(org_iv, tag_iv, cc->iv_size);
1337 } else {
1338 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
1339 if (r < 0)
1340 return r;
1341
1342 if (cc->integrity_iv_size)
1343 memcpy(tag_iv, org_iv, cc->iv_size);
1344 }
1345
1346 memcpy(iv, org_iv, cc->iv_size);
1347 }
1348
1349 aead_request_set_ad(req, sizeof(uint64_t) + cc->iv_size);
1350 if (bio_data_dir(ctx->bio_in) == WRITE) {
1351 aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
1352 cc->sector_size, iv);
1353 r = crypto_aead_encrypt(req);
1354 if (cc->integrity_tag_size + cc->integrity_iv_size != cc->on_disk_tag_size)
1355 memset(tag + cc->integrity_tag_size + cc->integrity_iv_size, 0,
1356 cc->on_disk_tag_size - (cc->integrity_tag_size + cc->integrity_iv_size));
1357 } else {
1358 aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
1359 cc->sector_size + cc->integrity_tag_size, iv);
1360 r = crypto_aead_decrypt(req);
1361 }
1362
1363 if (r == -EBADMSG) {
1364 char b[BDEVNAME_SIZE];
1365 DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b),
1366 (unsigned long long)le64_to_cpu(*sector));
1367 }
1368
1369 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
1370 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
1371
1372 bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
1373 bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
1374
1375 return r;
1376}
1377
1378static int crypt_convert_block_skcipher(struct crypt_config *cc,
1379 struct convert_context *ctx,
1380 struct skcipher_request *req,
1381 unsigned int tag_offset)
1382{
1383 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
1384 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
1385 struct scatterlist *sg_in, *sg_out;
1386 struct dm_crypt_request *dmreq;
1387 u8 *iv, *org_iv, *tag_iv;
1388 __le64 *sector;
1389 int r = 0;
1390
1391
1392 if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
1393 return -EIO;
1394
1395 dmreq = dmreq_of_req(cc, req);
1396 dmreq->iv_sector = ctx->cc_sector;
1397 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
1398 dmreq->iv_sector >>= cc->sector_shift;
1399 dmreq->ctx = ctx;
1400
1401 *org_tag_of_dmreq(cc, dmreq) = tag_offset;
1402
1403 iv = iv_of_dmreq(cc, dmreq);
1404 org_iv = org_iv_of_dmreq(cc, dmreq);
1405 tag_iv = iv_tag_from_dmreq(cc, dmreq);
1406
1407 sector = org_sector_of_dmreq(cc, dmreq);
1408 *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
1409
1410
1411 sg_in = &dmreq->sg_in[0];
1412 sg_out = &dmreq->sg_out[0];
1413
1414 sg_init_table(sg_in, 1);
1415 sg_set_page(sg_in, bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
1416
1417 sg_init_table(sg_out, 1);
1418 sg_set_page(sg_out, bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
1419
1420 if (cc->iv_gen_ops) {
1421
1422 if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) {
1423 memcpy(org_iv, tag_iv, cc->integrity_iv_size);
1424 } else {
1425 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
1426 if (r < 0)
1427 return r;
1428
1429 if (test_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags))
1430 sg_in = sg_out;
1431
1432 if (cc->integrity_iv_size)
1433 memcpy(tag_iv, org_iv, cc->integrity_iv_size);
1434 }
1435
1436 memcpy(iv, org_iv, cc->iv_size);
1437 }
1438
1439 skcipher_request_set_crypt(req, sg_in, sg_out, cc->sector_size, iv);
1440
1441 if (bio_data_dir(ctx->bio_in) == WRITE)
1442 r = crypto_skcipher_encrypt(req);
1443 else
1444 r = crypto_skcipher_decrypt(req);
1445
1446 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
1447 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
1448
1449 bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
1450 bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
1451
1452 return r;
1453}
1454
1455static void kcryptd_async_done(struct crypto_async_request *async_req,
1456 int error);
1457
1458static int crypt_alloc_req_skcipher(struct crypt_config *cc,
1459 struct convert_context *ctx)
1460{
1461 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
1462
1463 if (!ctx->r.req) {
1464 ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
1465 if (!ctx->r.req)
1466 return -ENOMEM;
1467 }
1468
1469 skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]);
1470
1471
1472
1473
1474
1475 skcipher_request_set_callback(ctx->r.req,
1476 CRYPTO_TFM_REQ_MAY_BACKLOG,
1477 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
1478
1479 return 0;
1480}
1481
1482static int crypt_alloc_req_aead(struct crypt_config *cc,
1483 struct convert_context *ctx)
1484{
1485 if (!ctx->r.req_aead) {
1486 ctx->r.req_aead = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
1487 if (!ctx->r.req_aead)
1488 return -ENOMEM;
1489 }
1490
1491 aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]);
1492
1493
1494
1495
1496
1497 aead_request_set_callback(ctx->r.req_aead,
1498 CRYPTO_TFM_REQ_MAY_BACKLOG,
1499 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
1500
1501 return 0;
1502}
1503
1504static int crypt_alloc_req(struct crypt_config *cc,
1505 struct convert_context *ctx)
1506{
1507 if (crypt_integrity_aead(cc))
1508 return crypt_alloc_req_aead(cc, ctx);
1509 else
1510 return crypt_alloc_req_skcipher(cc, ctx);
1511}
1512
1513static void crypt_free_req_skcipher(struct crypt_config *cc,
1514 struct skcipher_request *req, struct bio *base_bio)
1515{
1516 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
1517
1518 if ((struct skcipher_request *)(io + 1) != req)
1519 mempool_free(req, &cc->req_pool);
1520}
1521
1522static void crypt_free_req_aead(struct crypt_config *cc,
1523 struct aead_request *req, struct bio *base_bio)
1524{
1525 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
1526
1527 if ((struct aead_request *)(io + 1) != req)
1528 mempool_free(req, &cc->req_pool);
1529}
1530
1531static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_bio)
1532{
1533 if (crypt_integrity_aead(cc))
1534 crypt_free_req_aead(cc, req, base_bio);
1535 else
1536 crypt_free_req_skcipher(cc, req, base_bio);
1537}
1538
1539
1540
1541
1542static blk_status_t crypt_convert(struct crypt_config *cc,
1543 struct convert_context *ctx, bool atomic, bool reset_pending)
1544{
1545 unsigned int tag_offset = 0;
1546 unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
1547 int r;
1548
1549
1550
1551
1552
1553
1554 if (reset_pending)
1555 atomic_set(&ctx->cc_pending, 1);
1556
1557 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
1558
1559 r = crypt_alloc_req(cc, ctx);
1560 if (r) {
1561 complete(&ctx->restart);
1562 return BLK_STS_DEV_RESOURCE;
1563 }
1564
1565 atomic_inc(&ctx->cc_pending);
1566
1567 if (crypt_integrity_aead(cc))
1568 r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, tag_offset);
1569 else
1570 r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, tag_offset);
1571
1572 switch (r) {
1573
1574
1575
1576
1577 case -EBUSY:
1578 if (in_interrupt()) {
1579 if (try_wait_for_completion(&ctx->restart)) {
1580
1581
1582
1583
1584 } else {
1585
1586
1587
1588
1589 ctx->r.req = NULL;
1590 ctx->cc_sector += sector_step;
1591 tag_offset++;
1592 return BLK_STS_DEV_RESOURCE;
1593 }
1594 } else {
1595 wait_for_completion(&ctx->restart);
1596 }
1597 reinit_completion(&ctx->restart);
1598 fallthrough;
1599
1600
1601
1602
1603 case -EINPROGRESS:
1604 ctx->r.req = NULL;
1605 ctx->cc_sector += sector_step;
1606 tag_offset++;
1607 continue;
1608
1609
1610
1611 case 0:
1612 atomic_dec(&ctx->cc_pending);
1613 ctx->cc_sector += sector_step;
1614 tag_offset++;
1615 if (!atomic)
1616 cond_resched();
1617 continue;
1618
1619
1620
1621 case -EBADMSG:
1622 atomic_dec(&ctx->cc_pending);
1623 return BLK_STS_PROTECTION;
1624
1625
1626
1627 default:
1628 atomic_dec(&ctx->cc_pending);
1629 return BLK_STS_IOERR;
1630 }
1631 }
1632
1633 return 0;
1634}
1635
1636static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
1656{
1657 struct crypt_config *cc = io->cc;
1658 struct bio *clone;
1659 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1660 gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
1661 unsigned i, len, remaining_size;
1662 struct page *page;
1663
1664retry:
1665 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
1666 mutex_lock(&cc->bio_alloc_lock);
1667
1668 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, &cc->bs);
1669 if (!clone)
1670 goto out;
1671
1672 clone_init(io, clone);
1673
1674 remaining_size = size;
1675
1676 for (i = 0; i < nr_iovecs; i++) {
1677 page = mempool_alloc(&cc->page_pool, gfp_mask);
1678 if (!page) {
1679 crypt_free_buffer_pages(cc, clone);
1680 bio_put(clone);
1681 gfp_mask |= __GFP_DIRECT_RECLAIM;
1682 goto retry;
1683 }
1684
1685 len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
1686
1687 bio_add_page(clone, page, len, 0);
1688
1689 remaining_size -= len;
1690 }
1691
1692
1693 if (dm_crypt_integrity_io_alloc(io, clone)) {
1694 crypt_free_buffer_pages(cc, clone);
1695 bio_put(clone);
1696 clone = NULL;
1697 }
1698out:
1699 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
1700 mutex_unlock(&cc->bio_alloc_lock);
1701
1702 return clone;
1703}
1704
1705static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
1706{
1707 struct bio_vec *bv;
1708 struct bvec_iter_all iter_all;
1709
1710 bio_for_each_segment_all(bv, clone, iter_all) {
1711 BUG_ON(!bv->bv_page);
1712 mempool_free(bv->bv_page, &cc->page_pool);
1713 }
1714}
1715
1716static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
1717 struct bio *bio, sector_t sector)
1718{
1719 io->cc = cc;
1720 io->base_bio = bio;
1721 io->sector = sector;
1722 io->error = 0;
1723 io->ctx.r.req = NULL;
1724 io->integrity_metadata = NULL;
1725 io->integrity_metadata_from_pool = false;
1726 atomic_set(&io->io_pending, 0);
1727}
1728
1729static void crypt_inc_pending(struct dm_crypt_io *io)
1730{
1731 atomic_inc(&io->io_pending);
1732}
1733
1734static void kcryptd_io_bio_endio(struct work_struct *work)
1735{
1736 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1737 bio_endio(io->base_bio);
1738}
1739
1740
1741
1742
1743
1744static void crypt_dec_pending(struct dm_crypt_io *io)
1745{
1746 struct crypt_config *cc = io->cc;
1747 struct bio *base_bio = io->base_bio;
1748 blk_status_t error = io->error;
1749
1750 if (!atomic_dec_and_test(&io->io_pending))
1751 return;
1752
1753 if (io->ctx.r.req)
1754 crypt_free_req(cc, io->ctx.r.req, base_bio);
1755
1756 if (unlikely(io->integrity_metadata_from_pool))
1757 mempool_free(io->integrity_metadata, &io->cc->tag_pool);
1758 else
1759 kfree(io->integrity_metadata);
1760
1761 base_bio->bi_status = error;
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771 if (tasklet_trylock(&io->tasklet)) {
1772 tasklet_unlock(&io->tasklet);
1773 bio_endio(base_bio);
1774 return;
1775 }
1776
1777 INIT_WORK(&io->work, kcryptd_io_bio_endio);
1778 queue_work(cc->io_queue, &io->work);
1779}
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798static void crypt_endio(struct bio *clone)
1799{
1800 struct dm_crypt_io *io = clone->bi_private;
1801 struct crypt_config *cc = io->cc;
1802 unsigned rw = bio_data_dir(clone);
1803 blk_status_t error;
1804
1805
1806
1807
1808 if (rw == WRITE)
1809 crypt_free_buffer_pages(cc, clone);
1810
1811 error = clone->bi_status;
1812 bio_put(clone);
1813
1814 if (rw == READ && !error) {
1815 kcryptd_queue_crypt(io);
1816 return;
1817 }
1818
1819 if (unlikely(error))
1820 io->error = error;
1821
1822 crypt_dec_pending(io);
1823}
1824
1825static void clone_init(struct dm_crypt_io *io, struct bio *clone)
1826{
1827 struct crypt_config *cc = io->cc;
1828
1829 clone->bi_private = io;
1830 clone->bi_end_io = crypt_endio;
1831 bio_set_dev(clone, cc->dev->bdev);
1832 clone->bi_opf = io->base_bio->bi_opf;
1833}
1834
1835static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
1836{
1837 struct crypt_config *cc = io->cc;
1838 struct bio *clone;
1839
1840
1841
1842
1843
1844
1845
1846 clone = bio_clone_fast(io->base_bio, gfp, &cc->bs);
1847 if (!clone)
1848 return 1;
1849
1850 crypt_inc_pending(io);
1851
1852 clone_init(io, clone);
1853 clone->bi_iter.bi_sector = cc->start + io->sector;
1854
1855 if (dm_crypt_integrity_io_alloc(io, clone)) {
1856 crypt_dec_pending(io);
1857 bio_put(clone);
1858 return 1;
1859 }
1860
1861 submit_bio_noacct(clone);
1862 return 0;
1863}
1864
1865static void kcryptd_io_read_work(struct work_struct *work)
1866{
1867 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1868
1869 crypt_inc_pending(io);
1870 if (kcryptd_io_read(io, GFP_NOIO))
1871 io->error = BLK_STS_RESOURCE;
1872 crypt_dec_pending(io);
1873}
1874
1875static void kcryptd_queue_read(struct dm_crypt_io *io)
1876{
1877 struct crypt_config *cc = io->cc;
1878
1879 INIT_WORK(&io->work, kcryptd_io_read_work);
1880 queue_work(cc->io_queue, &io->work);
1881}
1882
1883static void kcryptd_io_write(struct dm_crypt_io *io)
1884{
1885 struct bio *clone = io->ctx.bio_out;
1886
1887 submit_bio_noacct(clone);
1888}
1889
1890#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
1891
1892static int dmcrypt_write(void *data)
1893{
1894 struct crypt_config *cc = data;
1895 struct dm_crypt_io *io;
1896
1897 while (1) {
1898 struct rb_root write_tree;
1899 struct blk_plug plug;
1900
1901 spin_lock_irq(&cc->write_thread_lock);
1902continue_locked:
1903
1904 if (!RB_EMPTY_ROOT(&cc->write_tree))
1905 goto pop_from_list;
1906
1907 set_current_state(TASK_INTERRUPTIBLE);
1908
1909 spin_unlock_irq(&cc->write_thread_lock);
1910
1911 if (unlikely(kthread_should_stop())) {
1912 set_current_state(TASK_RUNNING);
1913 break;
1914 }
1915
1916 schedule();
1917
1918 set_current_state(TASK_RUNNING);
1919 spin_lock_irq(&cc->write_thread_lock);
1920 goto continue_locked;
1921
1922pop_from_list:
1923 write_tree = cc->write_tree;
1924 cc->write_tree = RB_ROOT;
1925 spin_unlock_irq(&cc->write_thread_lock);
1926
1927 BUG_ON(rb_parent(write_tree.rb_node));
1928
1929
1930
1931
1932
1933 blk_start_plug(&plug);
1934 do {
1935 io = crypt_io_from_node(rb_first(&write_tree));
1936 rb_erase(&io->rb_node, &write_tree);
1937 kcryptd_io_write(io);
1938 } while (!RB_EMPTY_ROOT(&write_tree));
1939 blk_finish_plug(&plug);
1940 }
1941 return 0;
1942}
1943
1944static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
1945{
1946 struct bio *clone = io->ctx.bio_out;
1947 struct crypt_config *cc = io->cc;
1948 unsigned long flags;
1949 sector_t sector;
1950 struct rb_node **rbp, *parent;
1951
1952 if (unlikely(io->error)) {
1953 crypt_free_buffer_pages(cc, clone);
1954 bio_put(clone);
1955 crypt_dec_pending(io);
1956 return;
1957 }
1958
1959
1960 BUG_ON(io->ctx.iter_out.bi_size);
1961
1962 clone->bi_iter.bi_sector = cc->start + io->sector;
1963
1964 if ((likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) ||
1965 test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) {
1966 submit_bio_noacct(clone);
1967 return;
1968 }
1969
1970 spin_lock_irqsave(&cc->write_thread_lock, flags);
1971 if (RB_EMPTY_ROOT(&cc->write_tree))
1972 wake_up_process(cc->write_thread);
1973 rbp = &cc->write_tree.rb_node;
1974 parent = NULL;
1975 sector = io->sector;
1976 while (*rbp) {
1977 parent = *rbp;
1978 if (sector < crypt_io_from_node(parent)->sector)
1979 rbp = &(*rbp)->rb_left;
1980 else
1981 rbp = &(*rbp)->rb_right;
1982 }
1983 rb_link_node(&io->rb_node, parent, rbp);
1984 rb_insert_color(&io->rb_node, &cc->write_tree);
1985 spin_unlock_irqrestore(&cc->write_thread_lock, flags);
1986}
1987
1988static bool kcryptd_crypt_write_inline(struct crypt_config *cc,
1989 struct convert_context *ctx)
1990
1991{
1992 if (!test_bit(DM_CRYPT_WRITE_INLINE, &cc->flags))
1993 return false;
1994
1995
1996
1997
1998
1999
2000 switch (bio_op(ctx->bio_in)) {
2001 case REQ_OP_WRITE:
2002 case REQ_OP_WRITE_SAME:
2003 case REQ_OP_WRITE_ZEROES:
2004 return true;
2005 default:
2006 return false;
2007 }
2008}
2009
2010static void kcryptd_crypt_write_continue(struct work_struct *work)
2011{
2012 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
2013 struct crypt_config *cc = io->cc;
2014 struct convert_context *ctx = &io->ctx;
2015 int crypt_finished;
2016 sector_t sector = io->sector;
2017 blk_status_t r;
2018
2019 wait_for_completion(&ctx->restart);
2020 reinit_completion(&ctx->restart);
2021
2022 r = crypt_convert(cc, &io->ctx, true, false);
2023 if (r)
2024 io->error = r;
2025 crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
2026 if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
2027
2028 wait_for_completion(&ctx->restart);
2029 crypt_finished = 1;
2030 }
2031
2032
2033 if (crypt_finished) {
2034 kcryptd_crypt_write_io_submit(io, 0);
2035 io->sector = sector;
2036 }
2037
2038 crypt_dec_pending(io);
2039}
2040
2041static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
2042{
2043 struct crypt_config *cc = io->cc;
2044 struct convert_context *ctx = &io->ctx;
2045 struct bio *clone;
2046 int crypt_finished;
2047 sector_t sector = io->sector;
2048 blk_status_t r;
2049
2050
2051
2052
2053 crypt_inc_pending(io);
2054 crypt_convert_init(cc, ctx, NULL, io->base_bio, sector);
2055
2056 clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
2057 if (unlikely(!clone)) {
2058 io->error = BLK_STS_IOERR;
2059 goto dec;
2060 }
2061
2062 io->ctx.bio_out = clone;
2063 io->ctx.iter_out = clone->bi_iter;
2064
2065 sector += bio_sectors(clone);
2066
2067 crypt_inc_pending(io);
2068 r = crypt_convert(cc, ctx,
2069 test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags), true);
2070
2071
2072
2073
2074
2075 if (r == BLK_STS_DEV_RESOURCE) {
2076 INIT_WORK(&io->work, kcryptd_crypt_write_continue);
2077 queue_work(cc->crypt_queue, &io->work);
2078 return;
2079 }
2080 if (r)
2081 io->error = r;
2082 crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
2083 if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
2084
2085 wait_for_completion(&ctx->restart);
2086 crypt_finished = 1;
2087 }
2088
2089
2090 if (crypt_finished) {
2091 kcryptd_crypt_write_io_submit(io, 0);
2092 io->sector = sector;
2093 }
2094
2095dec:
2096 crypt_dec_pending(io);
2097}
2098
2099static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
2100{
2101 crypt_dec_pending(io);
2102}
2103
2104static void kcryptd_crypt_read_continue(struct work_struct *work)
2105{
2106 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
2107 struct crypt_config *cc = io->cc;
2108 blk_status_t r;
2109
2110 wait_for_completion(&io->ctx.restart);
2111 reinit_completion(&io->ctx.restart);
2112
2113 r = crypt_convert(cc, &io->ctx, true, false);
2114 if (r)
2115 io->error = r;
2116
2117 if (atomic_dec_and_test(&io->ctx.cc_pending))
2118 kcryptd_crypt_read_done(io);
2119
2120 crypt_dec_pending(io);
2121}
2122
2123static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
2124{
2125 struct crypt_config *cc = io->cc;
2126 blk_status_t r;
2127
2128 crypt_inc_pending(io);
2129
2130 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
2131 io->sector);
2132
2133 r = crypt_convert(cc, &io->ctx,
2134 test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
2135
2136
2137
2138
2139 if (r == BLK_STS_DEV_RESOURCE) {
2140 INIT_WORK(&io->work, kcryptd_crypt_read_continue);
2141 queue_work(cc->crypt_queue, &io->work);
2142 return;
2143 }
2144 if (r)
2145 io->error = r;
2146
2147 if (atomic_dec_and_test(&io->ctx.cc_pending))
2148 kcryptd_crypt_read_done(io);
2149
2150 crypt_dec_pending(io);
2151}
2152
2153static void kcryptd_async_done(struct crypto_async_request *async_req,
2154 int error)
2155{
2156 struct dm_crypt_request *dmreq = async_req->data;
2157 struct convert_context *ctx = dmreq->ctx;
2158 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
2159 struct crypt_config *cc = io->cc;
2160
2161
2162
2163
2164
2165
2166 if (error == -EINPROGRESS) {
2167 complete(&ctx->restart);
2168 return;
2169 }
2170
2171 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
2172 error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq);
2173
2174 if (error == -EBADMSG) {
2175 char b[BDEVNAME_SIZE];
2176 DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b),
2177 (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq)));
2178 io->error = BLK_STS_PROTECTION;
2179 } else if (error < 0)
2180 io->error = BLK_STS_IOERR;
2181
2182 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
2183
2184 if (!atomic_dec_and_test(&ctx->cc_pending))
2185 return;
2186
2187
2188
2189
2190
2191 if (bio_data_dir(io->base_bio) == READ) {
2192 kcryptd_crypt_read_done(io);
2193 return;
2194 }
2195
2196 if (kcryptd_crypt_write_inline(cc, ctx)) {
2197 complete(&ctx->restart);
2198 return;
2199 }
2200
2201 kcryptd_crypt_write_io_submit(io, 1);
2202}
2203
2204static void kcryptd_crypt(struct work_struct *work)
2205{
2206 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
2207
2208 if (bio_data_dir(io->base_bio) == READ)
2209 kcryptd_crypt_read_convert(io);
2210 else
2211 kcryptd_crypt_write_convert(io);
2212}
2213
2214static void kcryptd_crypt_tasklet(unsigned long work)
2215{
2216 kcryptd_crypt((struct work_struct *)work);
2217}
2218
2219static void kcryptd_queue_crypt(struct dm_crypt_io *io)
2220{
2221 struct crypt_config *cc = io->cc;
2222
2223 if ((bio_data_dir(io->base_bio) == READ && test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags)) ||
2224 (bio_data_dir(io->base_bio) == WRITE && test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))) {
2225
2226
2227
2228
2229
2230 if (in_hardirq() || irqs_disabled()) {
2231 tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
2232 tasklet_schedule(&io->tasklet);
2233 return;
2234 }
2235
2236 kcryptd_crypt(&io->work);
2237 return;
2238 }
2239
2240 INIT_WORK(&io->work, kcryptd_crypt);
2241 queue_work(cc->crypt_queue, &io->work);
2242}
2243
2244static void crypt_free_tfms_aead(struct crypt_config *cc)
2245{
2246 if (!cc->cipher_tfm.tfms_aead)
2247 return;
2248
2249 if (cc->cipher_tfm.tfms_aead[0] && !IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
2250 crypto_free_aead(cc->cipher_tfm.tfms_aead[0]);
2251 cc->cipher_tfm.tfms_aead[0] = NULL;
2252 }
2253
2254 kfree(cc->cipher_tfm.tfms_aead);
2255 cc->cipher_tfm.tfms_aead = NULL;
2256}
2257
2258static void crypt_free_tfms_skcipher(struct crypt_config *cc)
2259{
2260 unsigned i;
2261
2262 if (!cc->cipher_tfm.tfms)
2263 return;
2264
2265 for (i = 0; i < cc->tfms_count; i++)
2266 if (cc->cipher_tfm.tfms[i] && !IS_ERR(cc->cipher_tfm.tfms[i])) {
2267 crypto_free_skcipher(cc->cipher_tfm.tfms[i]);
2268 cc->cipher_tfm.tfms[i] = NULL;
2269 }
2270
2271 kfree(cc->cipher_tfm.tfms);
2272 cc->cipher_tfm.tfms = NULL;
2273}
2274
2275static void crypt_free_tfms(struct crypt_config *cc)
2276{
2277 if (crypt_integrity_aead(cc))
2278 crypt_free_tfms_aead(cc);
2279 else
2280 crypt_free_tfms_skcipher(cc);
2281}
2282
2283static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode)
2284{
2285 unsigned i;
2286 int err;
2287
2288 cc->cipher_tfm.tfms = kcalloc(cc->tfms_count,
2289 sizeof(struct crypto_skcipher *),
2290 GFP_KERNEL);
2291 if (!cc->cipher_tfm.tfms)
2292 return -ENOMEM;
2293
2294 for (i = 0; i < cc->tfms_count; i++) {
2295 cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0,
2296 CRYPTO_ALG_ALLOCATES_MEMORY);
2297 if (IS_ERR(cc->cipher_tfm.tfms[i])) {
2298 err = PTR_ERR(cc->cipher_tfm.tfms[i]);
2299 crypt_free_tfms(cc);
2300 return err;
2301 }
2302 }
2303
2304
2305
2306
2307
2308
2309 DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
2310 crypto_skcipher_alg(any_tfm(cc))->base.cra_driver_name);
2311 return 0;
2312}
2313
2314static int crypt_alloc_tfms_aead(struct crypt_config *cc, char *ciphermode)
2315{
2316 int err;
2317
2318 cc->cipher_tfm.tfms = kmalloc(sizeof(struct crypto_aead *), GFP_KERNEL);
2319 if (!cc->cipher_tfm.tfms)
2320 return -ENOMEM;
2321
2322 cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0,
2323 CRYPTO_ALG_ALLOCATES_MEMORY);
2324 if (IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
2325 err = PTR_ERR(cc->cipher_tfm.tfms_aead[0]);
2326 crypt_free_tfms(cc);
2327 return err;
2328 }
2329
2330 DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
2331 crypto_aead_alg(any_tfm_aead(cc))->base.cra_driver_name);
2332 return 0;
2333}
2334
2335static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
2336{
2337 if (crypt_integrity_aead(cc))
2338 return crypt_alloc_tfms_aead(cc, ciphermode);
2339 else
2340 return crypt_alloc_tfms_skcipher(cc, ciphermode);
2341}
2342
2343static unsigned crypt_subkey_size(struct crypt_config *cc)
2344{
2345 return (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
2346}
2347
2348static unsigned crypt_authenckey_size(struct crypt_config *cc)
2349{
2350 return crypt_subkey_size(cc) + RTA_SPACE(sizeof(struct crypto_authenc_key_param));
2351}
2352
2353
2354
2355
2356
2357
2358static void crypt_copy_authenckey(char *p, const void *key,
2359 unsigned enckeylen, unsigned authkeylen)
2360{
2361 struct crypto_authenc_key_param *param;
2362 struct rtattr *rta;
2363
2364 rta = (struct rtattr *)p;
2365 param = RTA_DATA(rta);
2366 param->enckeylen = cpu_to_be32(enckeylen);
2367 rta->rta_len = RTA_LENGTH(sizeof(*param));
2368 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
2369 p += RTA_SPACE(sizeof(*param));
2370 memcpy(p, key + enckeylen, authkeylen);
2371 p += authkeylen;
2372 memcpy(p, key, enckeylen);
2373}
2374
2375static int crypt_setkey(struct crypt_config *cc)
2376{
2377 unsigned subkey_size;
2378 int err = 0, i, r;
2379
2380
2381 subkey_size = crypt_subkey_size(cc);
2382
2383 if (crypt_integrity_hmac(cc)) {
2384 if (subkey_size < cc->key_mac_size)
2385 return -EINVAL;
2386
2387 crypt_copy_authenckey(cc->authenc_key, cc->key,
2388 subkey_size - cc->key_mac_size,
2389 cc->key_mac_size);
2390 }
2391
2392 for (i = 0; i < cc->tfms_count; i++) {
2393 if (crypt_integrity_hmac(cc))
2394 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
2395 cc->authenc_key, crypt_authenckey_size(cc));
2396 else if (crypt_integrity_aead(cc))
2397 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
2398 cc->key + (i * subkey_size),
2399 subkey_size);
2400 else
2401 r = crypto_skcipher_setkey(cc->cipher_tfm.tfms[i],
2402 cc->key + (i * subkey_size),
2403 subkey_size);
2404 if (r)
2405 err = r;
2406 }
2407
2408 if (crypt_integrity_hmac(cc))
2409 memzero_explicit(cc->authenc_key, crypt_authenckey_size(cc));
2410
2411 return err;
2412}
2413
2414#ifdef CONFIG_KEYS
2415
2416static bool contains_whitespace(const char *str)
2417{
2418 while (*str)
2419 if (isspace(*str++))
2420 return true;
2421 return false;
2422}
2423
2424static int set_key_user(struct crypt_config *cc, struct key *key)
2425{
2426 const struct user_key_payload *ukp;
2427
2428 ukp = user_key_payload_locked(key);
2429 if (!ukp)
2430 return -EKEYREVOKED;
2431
2432 if (cc->key_size != ukp->datalen)
2433 return -EINVAL;
2434
2435 memcpy(cc->key, ukp->data, cc->key_size);
2436
2437 return 0;
2438}
2439
2440static int set_key_encrypted(struct crypt_config *cc, struct key *key)
2441{
2442 const struct encrypted_key_payload *ekp;
2443
2444 ekp = key->payload.data[0];
2445 if (!ekp)
2446 return -EKEYREVOKED;
2447
2448 if (cc->key_size != ekp->decrypted_datalen)
2449 return -EINVAL;
2450
2451 memcpy(cc->key, ekp->decrypted_data, cc->key_size);
2452
2453 return 0;
2454}
2455
2456static int set_key_trusted(struct crypt_config *cc, struct key *key)
2457{
2458 const struct trusted_key_payload *tkp;
2459
2460 tkp = key->payload.data[0];
2461 if (!tkp)
2462 return -EKEYREVOKED;
2463
2464 if (cc->key_size != tkp->key_len)
2465 return -EINVAL;
2466
2467 memcpy(cc->key, tkp->key, cc->key_size);
2468
2469 return 0;
2470}
2471
2472static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
2473{
2474 char *new_key_string, *key_desc;
2475 int ret;
2476 struct key_type *type;
2477 struct key *key;
2478 int (*set_key)(struct crypt_config *cc, struct key *key);
2479
2480
2481
2482
2483
2484 if (contains_whitespace(key_string)) {
2485 DMERR("whitespace chars not allowed in key string");
2486 return -EINVAL;
2487 }
2488
2489
2490 key_desc = strpbrk(key_string, ":");
2491 if (!key_desc || key_desc == key_string || !strlen(key_desc + 1))
2492 return -EINVAL;
2493
2494 if (!strncmp(key_string, "logon:", key_desc - key_string + 1)) {
2495 type = &key_type_logon;
2496 set_key = set_key_user;
2497 } else if (!strncmp(key_string, "user:", key_desc - key_string + 1)) {
2498 type = &key_type_user;
2499 set_key = set_key_user;
2500 } else if (IS_ENABLED(CONFIG_ENCRYPTED_KEYS) &&
2501 !strncmp(key_string, "encrypted:", key_desc - key_string + 1)) {
2502 type = &key_type_encrypted;
2503 set_key = set_key_encrypted;
2504 } else if (IS_ENABLED(CONFIG_TRUSTED_KEYS) &&
2505 !strncmp(key_string, "trusted:", key_desc - key_string + 1)) {
2506 type = &key_type_trusted;
2507 set_key = set_key_trusted;
2508 } else {
2509 return -EINVAL;
2510 }
2511
2512 new_key_string = kstrdup(key_string, GFP_KERNEL);
2513 if (!new_key_string)
2514 return -ENOMEM;
2515
2516 key = request_key(type, key_desc + 1, NULL);
2517 if (IS_ERR(key)) {
2518 kfree_sensitive(new_key_string);
2519 return PTR_ERR(key);
2520 }
2521
2522 down_read(&key->sem);
2523
2524 ret = set_key(cc, key);
2525 if (ret < 0) {
2526 up_read(&key->sem);
2527 key_put(key);
2528 kfree_sensitive(new_key_string);
2529 return ret;
2530 }
2531
2532 up_read(&key->sem);
2533 key_put(key);
2534
2535
2536 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2537
2538 ret = crypt_setkey(cc);
2539
2540 if (!ret) {
2541 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2542 kfree_sensitive(cc->key_string);
2543 cc->key_string = new_key_string;
2544 } else
2545 kfree_sensitive(new_key_string);
2546
2547 return ret;
2548}
2549
2550static int get_key_size(char **key_string)
2551{
2552 char *colon, dummy;
2553 int ret;
2554
2555 if (*key_string[0] != ':')
2556 return strlen(*key_string) >> 1;
2557
2558
2559 colon = strpbrk(*key_string + 1, ":");
2560 if (!colon)
2561 return -EINVAL;
2562
2563 if (sscanf(*key_string + 1, "%u%c", &ret, &dummy) != 2 || dummy != ':')
2564 return -EINVAL;
2565
2566 *key_string = colon;
2567
2568
2569
2570 return ret;
2571}
2572
2573#else
2574
2575static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
2576{
2577 return -EINVAL;
2578}
2579
2580static int get_key_size(char **key_string)
2581{
2582 return (*key_string[0] == ':') ? -EINVAL : strlen(*key_string) >> 1;
2583}
2584
2585#endif
2586
2587static int crypt_set_key(struct crypt_config *cc, char *key)
2588{
2589 int r = -EINVAL;
2590 int key_string_len = strlen(key);
2591
2592
2593 if (!cc->key_size && strcmp(key, "-"))
2594 goto out;
2595
2596
2597 if (key[0] == ':') {
2598 r = crypt_set_keyring_key(cc, key + 1);
2599 goto out;
2600 }
2601
2602
2603 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2604
2605
2606 kfree_sensitive(cc->key_string);
2607 cc->key_string = NULL;
2608
2609
2610 if (cc->key_size && hex2bin(cc->key, key, cc->key_size) < 0)
2611 goto out;
2612
2613 r = crypt_setkey(cc);
2614 if (!r)
2615 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2616
2617out:
2618
2619 memset(key, '0', key_string_len);
2620
2621 return r;
2622}
2623
2624static int crypt_wipe_key(struct crypt_config *cc)
2625{
2626 int r;
2627
2628 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2629 get_random_bytes(&cc->key, cc->key_size);
2630
2631
2632 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
2633 r = cc->iv_gen_ops->wipe(cc);
2634 if (r)
2635 return r;
2636 }
2637
2638 kfree_sensitive(cc->key_string);
2639 cc->key_string = NULL;
2640 r = crypt_setkey(cc);
2641 memset(&cc->key, 0, cc->key_size * sizeof(u8));
2642
2643 return r;
2644}
2645
2646static void crypt_calculate_pages_per_client(void)
2647{
2648 unsigned long pages = (totalram_pages() - totalhigh_pages()) * DM_CRYPT_MEMORY_PERCENT / 100;
2649
2650 if (!dm_crypt_clients_n)
2651 return;
2652
2653 pages /= dm_crypt_clients_n;
2654 if (pages < DM_CRYPT_MIN_PAGES_PER_CLIENT)
2655 pages = DM_CRYPT_MIN_PAGES_PER_CLIENT;
2656 dm_crypt_pages_per_client = pages;
2657}
2658
2659static void *crypt_page_alloc(gfp_t gfp_mask, void *pool_data)
2660{
2661 struct crypt_config *cc = pool_data;
2662 struct page *page;
2663
2664
2665
2666
2667
2668
2669 if (unlikely(percpu_counter_read_positive(&cc->n_allocated_pages) >= dm_crypt_pages_per_client) &&
2670 likely(gfp_mask & __GFP_NORETRY))
2671 return NULL;
2672
2673 page = alloc_page(gfp_mask);
2674 if (likely(page != NULL))
2675 percpu_counter_add(&cc->n_allocated_pages, 1);
2676
2677 return page;
2678}
2679
2680static void crypt_page_free(void *page, void *pool_data)
2681{
2682 struct crypt_config *cc = pool_data;
2683
2684 __free_page(page);
2685 percpu_counter_sub(&cc->n_allocated_pages, 1);
2686}
2687
2688static void crypt_dtr(struct dm_target *ti)
2689{
2690 struct crypt_config *cc = ti->private;
2691
2692 ti->private = NULL;
2693
2694 if (!cc)
2695 return;
2696
2697 if (cc->write_thread)
2698 kthread_stop(cc->write_thread);
2699
2700 if (cc->io_queue)
2701 destroy_workqueue(cc->io_queue);
2702 if (cc->crypt_queue)
2703 destroy_workqueue(cc->crypt_queue);
2704
2705 crypt_free_tfms(cc);
2706
2707 bioset_exit(&cc->bs);
2708
2709 mempool_exit(&cc->page_pool);
2710 mempool_exit(&cc->req_pool);
2711 mempool_exit(&cc->tag_pool);
2712
2713 WARN_ON(percpu_counter_sum(&cc->n_allocated_pages) != 0);
2714 percpu_counter_destroy(&cc->n_allocated_pages);
2715
2716 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
2717 cc->iv_gen_ops->dtr(cc);
2718
2719 if (cc->dev)
2720 dm_put_device(ti, cc->dev);
2721
2722 kfree_sensitive(cc->cipher_string);
2723 kfree_sensitive(cc->key_string);
2724 kfree_sensitive(cc->cipher_auth);
2725 kfree_sensitive(cc->authenc_key);
2726
2727 mutex_destroy(&cc->bio_alloc_lock);
2728
2729
2730 kfree_sensitive(cc);
2731
2732 spin_lock(&dm_crypt_clients_lock);
2733 WARN_ON(!dm_crypt_clients_n);
2734 dm_crypt_clients_n--;
2735 crypt_calculate_pages_per_client();
2736 spin_unlock(&dm_crypt_clients_lock);
2737}
2738
2739static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode)
2740{
2741 struct crypt_config *cc = ti->private;
2742
2743 if (crypt_integrity_aead(cc))
2744 cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
2745 else
2746 cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
2747
2748 if (cc->iv_size)
2749
2750 cc->iv_size = max(cc->iv_size,
2751 (unsigned int)(sizeof(u64) / sizeof(u8)));
2752 else if (ivmode) {
2753 DMWARN("Selected cipher does not support IVs");
2754 ivmode = NULL;
2755 }
2756
2757
2758 if (ivmode == NULL)
2759 cc->iv_gen_ops = NULL;
2760 else if (strcmp(ivmode, "plain") == 0)
2761 cc->iv_gen_ops = &crypt_iv_plain_ops;
2762 else if (strcmp(ivmode, "plain64") == 0)
2763 cc->iv_gen_ops = &crypt_iv_plain64_ops;
2764 else if (strcmp(ivmode, "plain64be") == 0)
2765 cc->iv_gen_ops = &crypt_iv_plain64be_ops;
2766 else if (strcmp(ivmode, "essiv") == 0)
2767 cc->iv_gen_ops = &crypt_iv_essiv_ops;
2768 else if (strcmp(ivmode, "benbi") == 0)
2769 cc->iv_gen_ops = &crypt_iv_benbi_ops;
2770 else if (strcmp(ivmode, "null") == 0)
2771 cc->iv_gen_ops = &crypt_iv_null_ops;
2772 else if (strcmp(ivmode, "eboiv") == 0)
2773 cc->iv_gen_ops = &crypt_iv_eboiv_ops;
2774 else if (strcmp(ivmode, "elephant") == 0) {
2775 cc->iv_gen_ops = &crypt_iv_elephant_ops;
2776 cc->key_parts = 2;
2777 cc->key_extra_size = cc->key_size / 2;
2778 if (cc->key_extra_size > ELEPHANT_MAX_KEY_SIZE)
2779 return -EINVAL;
2780 set_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags);
2781 } else if (strcmp(ivmode, "lmk") == 0) {
2782 cc->iv_gen_ops = &crypt_iv_lmk_ops;
2783
2784
2785
2786
2787
2788
2789 if (cc->key_size % cc->key_parts) {
2790 cc->key_parts++;
2791 cc->key_extra_size = cc->key_size / cc->key_parts;
2792 }
2793 } else if (strcmp(ivmode, "tcw") == 0) {
2794 cc->iv_gen_ops = &crypt_iv_tcw_ops;
2795 cc->key_parts += 2;
2796 cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
2797 } else if (strcmp(ivmode, "random") == 0) {
2798 cc->iv_gen_ops = &crypt_iv_random_ops;
2799
2800 cc->integrity_iv_size = cc->iv_size;
2801 } else {
2802 ti->error = "Invalid IV mode";
2803 return -EINVAL;
2804 }
2805
2806 return 0;
2807}
2808
2809
2810
2811
2812
2813
2814static int crypt_ctr_auth_cipher(struct crypt_config *cc, char *cipher_api)
2815{
2816 char *start, *end, *mac_alg = NULL;
2817 struct crypto_ahash *mac;
2818
2819 if (!strstarts(cipher_api, "authenc("))
2820 return 0;
2821
2822 start = strchr(cipher_api, '(');
2823 end = strchr(cipher_api, ',');
2824 if (!start || !end || ++start > end)
2825 return -EINVAL;
2826
2827 mac_alg = kzalloc(end - start + 1, GFP_KERNEL);
2828 if (!mac_alg)
2829 return -ENOMEM;
2830 strncpy(mac_alg, start, end - start);
2831
2832 mac = crypto_alloc_ahash(mac_alg, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
2833 kfree(mac_alg);
2834
2835 if (IS_ERR(mac))
2836 return PTR_ERR(mac);
2837
2838 cc->key_mac_size = crypto_ahash_digestsize(mac);
2839 crypto_free_ahash(mac);
2840
2841 cc->authenc_key = kmalloc(crypt_authenckey_size(cc), GFP_KERNEL);
2842 if (!cc->authenc_key)
2843 return -ENOMEM;
2844
2845 return 0;
2846}
2847
2848static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key,
2849 char **ivmode, char **ivopts)
2850{
2851 struct crypt_config *cc = ti->private;
2852 char *tmp, *cipher_api, buf[CRYPTO_MAX_ALG_NAME];
2853 int ret = -EINVAL;
2854
2855 cc->tfms_count = 1;
2856
2857
2858
2859
2860
2861 tmp = &cipher_in[strlen("capi:")];
2862
2863
2864 *ivopts = strrchr(tmp, ':');
2865 if (*ivopts) {
2866 **ivopts = '\0';
2867 (*ivopts)++;
2868 }
2869
2870 *ivmode = strrchr(tmp, '-');
2871 if (*ivmode) {
2872 **ivmode = '\0';
2873 (*ivmode)++;
2874 }
2875
2876 cipher_api = tmp;
2877
2878
2879 if (crypt_integrity_aead(cc)) {
2880 ret = crypt_ctr_auth_cipher(cc, cipher_api);
2881 if (ret < 0) {
2882 ti->error = "Invalid AEAD cipher spec";
2883 return -ENOMEM;
2884 }
2885 }
2886
2887 if (*ivmode && !strcmp(*ivmode, "lmk"))
2888 cc->tfms_count = 64;
2889
2890 if (*ivmode && !strcmp(*ivmode, "essiv")) {
2891 if (!*ivopts) {
2892 ti->error = "Digest algorithm missing for ESSIV mode";
2893 return -EINVAL;
2894 }
2895 ret = snprintf(buf, CRYPTO_MAX_ALG_NAME, "essiv(%s,%s)",
2896 cipher_api, *ivopts);
2897 if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) {
2898 ti->error = "Cannot allocate cipher string";
2899 return -ENOMEM;
2900 }
2901 cipher_api = buf;
2902 }
2903
2904 cc->key_parts = cc->tfms_count;
2905
2906
2907 ret = crypt_alloc_tfms(cc, cipher_api);
2908 if (ret < 0) {
2909 ti->error = "Error allocating crypto tfm";
2910 return ret;
2911 }
2912
2913 if (crypt_integrity_aead(cc))
2914 cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
2915 else
2916 cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
2917
2918 return 0;
2919}
2920
2921static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key,
2922 char **ivmode, char **ivopts)
2923{
2924 struct crypt_config *cc = ti->private;
2925 char *tmp, *cipher, *chainmode, *keycount;
2926 char *cipher_api = NULL;
2927 int ret = -EINVAL;
2928 char dummy;
2929
2930 if (strchr(cipher_in, '(') || crypt_integrity_aead(cc)) {
2931 ti->error = "Bad cipher specification";
2932 return -EINVAL;
2933 }
2934
2935
2936
2937
2938
2939 tmp = cipher_in;
2940 keycount = strsep(&tmp, "-");
2941 cipher = strsep(&keycount, ":");
2942
2943 if (!keycount)
2944 cc->tfms_count = 1;
2945 else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 ||
2946 !is_power_of_2(cc->tfms_count)) {
2947 ti->error = "Bad cipher key count specification";
2948 return -EINVAL;
2949 }
2950 cc->key_parts = cc->tfms_count;
2951
2952 chainmode = strsep(&tmp, "-");
2953 *ivmode = strsep(&tmp, ":");
2954 *ivopts = tmp;
2955
2956
2957
2958
2959
2960 if (!chainmode || (!strcmp(chainmode, "plain") && !*ivmode)) {
2961 chainmode = "cbc";
2962 *ivmode = "plain";
2963 }
2964
2965 if (strcmp(chainmode, "ecb") && !*ivmode) {
2966 ti->error = "IV mechanism required";
2967 return -EINVAL;
2968 }
2969
2970 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
2971 if (!cipher_api)
2972 goto bad_mem;
2973
2974 if (*ivmode && !strcmp(*ivmode, "essiv")) {
2975 if (!*ivopts) {
2976 ti->error = "Digest algorithm missing for ESSIV mode";
2977 kfree(cipher_api);
2978 return -EINVAL;
2979 }
2980 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
2981 "essiv(%s(%s),%s)", chainmode, cipher, *ivopts);
2982 } else {
2983 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
2984 "%s(%s)", chainmode, cipher);
2985 }
2986 if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) {
2987 kfree(cipher_api);
2988 goto bad_mem;
2989 }
2990
2991
2992 ret = crypt_alloc_tfms(cc, cipher_api);
2993 if (ret < 0) {
2994 ti->error = "Error allocating crypto tfm";
2995 kfree(cipher_api);
2996 return ret;
2997 }
2998 kfree(cipher_api);
2999
3000 return 0;
3001bad_mem:
3002 ti->error = "Cannot allocate cipher strings";
3003 return -ENOMEM;
3004}
3005
3006static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key)
3007{
3008 struct crypt_config *cc = ti->private;
3009 char *ivmode = NULL, *ivopts = NULL;
3010 int ret;
3011
3012 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
3013 if (!cc->cipher_string) {
3014 ti->error = "Cannot allocate cipher strings";
3015 return -ENOMEM;
3016 }
3017
3018 if (strstarts(cipher_in, "capi:"))
3019 ret = crypt_ctr_cipher_new(ti, cipher_in, key, &ivmode, &ivopts);
3020 else
3021 ret = crypt_ctr_cipher_old(ti, cipher_in, key, &ivmode, &ivopts);
3022 if (ret)
3023 return ret;
3024
3025
3026 ret = crypt_ctr_ivmode(ti, ivmode);
3027 if (ret < 0)
3028 return ret;
3029
3030
3031 ret = crypt_set_key(cc, key);
3032 if (ret < 0) {
3033 ti->error = "Error decoding and setting key";
3034 return ret;
3035 }
3036
3037
3038 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
3039 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
3040 if (ret < 0) {
3041 ti->error = "Error creating IV";
3042 return ret;
3043 }
3044 }
3045
3046
3047 if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
3048 ret = cc->iv_gen_ops->init(cc);
3049 if (ret < 0) {
3050 ti->error = "Error initialising IV";
3051 return ret;
3052 }
3053 }
3054
3055
3056 if (cc->key_string)
3057 memset(cc->key, 0, cc->key_size * sizeof(u8));
3058
3059 return ret;
3060}
3061
3062static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **argv)
3063{
3064 struct crypt_config *cc = ti->private;
3065 struct dm_arg_set as;
3066 static const struct dm_arg _args[] = {
3067 {0, 8, "Invalid number of feature args"},
3068 };
3069 unsigned int opt_params, val;
3070 const char *opt_string, *sval;
3071 char dummy;
3072 int ret;
3073
3074
3075 as.argc = argc;
3076 as.argv = argv;
3077
3078 ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
3079 if (ret)
3080 return ret;
3081
3082 while (opt_params--) {
3083 opt_string = dm_shift_arg(&as);
3084 if (!opt_string) {
3085 ti->error = "Not enough feature arguments";
3086 return -EINVAL;
3087 }
3088
3089 if (!strcasecmp(opt_string, "allow_discards"))
3090 ti->num_discard_bios = 1;
3091
3092 else if (!strcasecmp(opt_string, "same_cpu_crypt"))
3093 set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
3094
3095 else if (!strcasecmp(opt_string, "submit_from_crypt_cpus"))
3096 set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
3097 else if (!strcasecmp(opt_string, "no_read_workqueue"))
3098 set_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags);
3099 else if (!strcasecmp(opt_string, "no_write_workqueue"))
3100 set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
3101 else if (sscanf(opt_string, "integrity:%u:", &val) == 1) {
3102 if (val == 0 || val > MAX_TAG_SIZE) {
3103 ti->error = "Invalid integrity arguments";
3104 return -EINVAL;
3105 }
3106 cc->on_disk_tag_size = val;
3107 sval = strchr(opt_string + strlen("integrity:"), ':') + 1;
3108 if (!strcasecmp(sval, "aead")) {
3109 set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
3110 } else if (strcasecmp(sval, "none")) {
3111 ti->error = "Unknown integrity profile";
3112 return -EINVAL;
3113 }
3114
3115 cc->cipher_auth = kstrdup(sval, GFP_KERNEL);
3116 if (!cc->cipher_auth)
3117 return -ENOMEM;
3118 } else if (sscanf(opt_string, "sector_size:%hu%c", &cc->sector_size, &dummy) == 1) {
3119 if (cc->sector_size < (1 << SECTOR_SHIFT) ||
3120 cc->sector_size > 4096 ||
3121 (cc->sector_size & (cc->sector_size - 1))) {
3122 ti->error = "Invalid feature value for sector_size";
3123 return -EINVAL;
3124 }
3125 if (ti->len & ((cc->sector_size >> SECTOR_SHIFT) - 1)) {
3126 ti->error = "Device size is not multiple of sector_size feature";
3127 return -EINVAL;
3128 }
3129 cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
3130 } else if (!strcasecmp(opt_string, "iv_large_sectors"))
3131 set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
3132 else {
3133 ti->error = "Invalid feature arguments";
3134 return -EINVAL;
3135 }
3136 }
3137
3138 return 0;
3139}
3140
3141#ifdef CONFIG_BLK_DEV_ZONED
3142static int crypt_report_zones(struct dm_target *ti,
3143 struct dm_report_zones_args *args, unsigned int nr_zones)
3144{
3145 struct crypt_config *cc = ti->private;
3146
3147 return dm_report_zones(cc->dev->bdev, cc->start,
3148 cc->start + dm_target_offset(ti, args->next_sector),
3149 args, nr_zones);
3150}
3151#else
3152#define crypt_report_zones NULL
3153#endif
3154
3155
3156
3157
3158
3159static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3160{
3161 struct crypt_config *cc;
3162 const char *devname = dm_table_device_name(ti->table);
3163 int key_size;
3164 unsigned int align_mask;
3165 unsigned long long tmpll;
3166 int ret;
3167 size_t iv_size_padding, additional_req_size;
3168 char dummy;
3169
3170 if (argc < 5) {
3171 ti->error = "Not enough arguments";
3172 return -EINVAL;
3173 }
3174
3175 key_size = get_key_size(&argv[1]);
3176 if (key_size < 0) {
3177 ti->error = "Cannot parse key size";
3178 return -EINVAL;
3179 }
3180
3181 cc = kzalloc(struct_size(cc, key, key_size), GFP_KERNEL);
3182 if (!cc) {
3183 ti->error = "Cannot allocate encryption context";
3184 return -ENOMEM;
3185 }
3186 cc->key_size = key_size;
3187 cc->sector_size = (1 << SECTOR_SHIFT);
3188 cc->sector_shift = 0;
3189
3190 ti->private = cc;
3191
3192 spin_lock(&dm_crypt_clients_lock);
3193 dm_crypt_clients_n++;
3194 crypt_calculate_pages_per_client();
3195 spin_unlock(&dm_crypt_clients_lock);
3196
3197 ret = percpu_counter_init(&cc->n_allocated_pages, 0, GFP_KERNEL);
3198 if (ret < 0)
3199 goto bad;
3200
3201
3202 if (argc > 5) {
3203 ret = crypt_ctr_optional(ti, argc - 5, &argv[5]);
3204 if (ret)
3205 goto bad;
3206 }
3207
3208 ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
3209 if (ret < 0)
3210 goto bad;
3211
3212 if (crypt_integrity_aead(cc)) {
3213 cc->dmreq_start = sizeof(struct aead_request);
3214 cc->dmreq_start += crypto_aead_reqsize(any_tfm_aead(cc));
3215 align_mask = crypto_aead_alignmask(any_tfm_aead(cc));
3216 } else {
3217 cc->dmreq_start = sizeof(struct skcipher_request);
3218 cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc));
3219 align_mask = crypto_skcipher_alignmask(any_tfm(cc));
3220 }
3221 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
3222
3223 if (align_mask < CRYPTO_MINALIGN) {
3224
3225 iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
3226 & align_mask;
3227 } else {
3228
3229
3230
3231
3232
3233 iv_size_padding = align_mask;
3234 }
3235
3236
3237 additional_req_size = sizeof(struct dm_crypt_request) +
3238 iv_size_padding + cc->iv_size +
3239 cc->iv_size +
3240 sizeof(uint64_t) +
3241 sizeof(unsigned int);
3242
3243 ret = mempool_init_kmalloc_pool(&cc->req_pool, MIN_IOS, cc->dmreq_start + additional_req_size);
3244 if (ret) {
3245 ti->error = "Cannot allocate crypt request mempool";
3246 goto bad;
3247 }
3248
3249 cc->per_bio_data_size = ti->per_io_data_size =
3250 ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size,
3251 ARCH_KMALLOC_MINALIGN);
3252
3253 ret = mempool_init(&cc->page_pool, BIO_MAX_VECS, crypt_page_alloc, crypt_page_free, cc);
3254 if (ret) {
3255 ti->error = "Cannot allocate page mempool";
3256 goto bad;
3257 }
3258
3259 ret = bioset_init(&cc->bs, MIN_IOS, 0, BIOSET_NEED_BVECS);
3260 if (ret) {
3261 ti->error = "Cannot allocate crypt bioset";
3262 goto bad;
3263 }
3264
3265 mutex_init(&cc->bio_alloc_lock);
3266
3267 ret = -EINVAL;
3268 if ((sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) ||
3269 (tmpll & ((cc->sector_size >> SECTOR_SHIFT) - 1))) {
3270 ti->error = "Invalid iv_offset sector";
3271 goto bad;
3272 }
3273 cc->iv_offset = tmpll;
3274
3275 ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev);
3276 if (ret) {
3277 ti->error = "Device lookup failed";
3278 goto bad;
3279 }
3280
3281 ret = -EINVAL;
3282 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
3283 ti->error = "Invalid device sector";
3284 goto bad;
3285 }
3286 cc->start = tmpll;
3287
3288 if (bdev_is_zoned(cc->dev->bdev)) {
3289
3290
3291
3292
3293
3294 set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
3295 set_bit(DM_CRYPT_WRITE_INLINE, &cc->flags);
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308 DMDEBUG("Zone append operations will be emulated");
3309 ti->emulate_zone_append = true;
3310 }
3311
3312 if (crypt_integrity_aead(cc) || cc->integrity_iv_size) {
3313 ret = crypt_integrity_ctr(cc, ti);
3314 if (ret)
3315 goto bad;
3316
3317 cc->tag_pool_max_sectors = POOL_ENTRY_SIZE / cc->on_disk_tag_size;
3318 if (!cc->tag_pool_max_sectors)
3319 cc->tag_pool_max_sectors = 1;
3320
3321 ret = mempool_init_kmalloc_pool(&cc->tag_pool, MIN_IOS,
3322 cc->tag_pool_max_sectors * cc->on_disk_tag_size);
3323 if (ret) {
3324 ti->error = "Cannot allocate integrity tags mempool";
3325 goto bad;
3326 }
3327
3328 cc->tag_pool_max_sectors <<= cc->sector_shift;
3329 }
3330
3331 ret = -ENOMEM;
3332 cc->io_queue = alloc_workqueue("kcryptd_io/%s", WQ_MEM_RECLAIM, 1, devname);
3333 if (!cc->io_queue) {
3334 ti->error = "Couldn't create kcryptd io queue";
3335 goto bad;
3336 }
3337
3338 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
3339 cc->crypt_queue = alloc_workqueue("kcryptd/%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
3340 1, devname);
3341 else
3342 cc->crypt_queue = alloc_workqueue("kcryptd/%s",
3343 WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
3344 num_online_cpus(), devname);
3345 if (!cc->crypt_queue) {
3346 ti->error = "Couldn't create kcryptd queue";
3347 goto bad;
3348 }
3349
3350 spin_lock_init(&cc->write_thread_lock);
3351 cc->write_tree = RB_ROOT;
3352
3353 cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write/%s", devname);
3354 if (IS_ERR(cc->write_thread)) {
3355 ret = PTR_ERR(cc->write_thread);
3356 cc->write_thread = NULL;
3357 ti->error = "Couldn't spawn write thread";
3358 goto bad;
3359 }
3360 wake_up_process(cc->write_thread);
3361
3362 ti->num_flush_bios = 1;
3363 ti->limit_swap_bios = true;
3364
3365 return 0;
3366
3367bad:
3368 crypt_dtr(ti);
3369 return ret;
3370}
3371
3372static int crypt_map(struct dm_target *ti, struct bio *bio)
3373{
3374 struct dm_crypt_io *io;
3375 struct crypt_config *cc = ti->private;
3376
3377
3378
3379
3380
3381
3382 if (unlikely(bio->bi_opf & REQ_PREFLUSH ||
3383 bio_op(bio) == REQ_OP_DISCARD)) {
3384 bio_set_dev(bio, cc->dev->bdev);
3385 if (bio_sectors(bio))
3386 bio->bi_iter.bi_sector = cc->start +
3387 dm_target_offset(ti, bio->bi_iter.bi_sector);
3388 return DM_MAPIO_REMAPPED;
3389 }
3390
3391
3392
3393
3394 if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_VECS << PAGE_SHIFT)) &&
3395 (bio_data_dir(bio) == WRITE || cc->on_disk_tag_size))
3396 dm_accept_partial_bio(bio, ((BIO_MAX_VECS << PAGE_SHIFT) >> SECTOR_SHIFT));
3397
3398
3399
3400
3401
3402 if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0))
3403 return DM_MAPIO_KILL;
3404
3405 if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1)))
3406 return DM_MAPIO_KILL;
3407
3408 io = dm_per_bio_data(bio, cc->per_bio_data_size);
3409 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
3410
3411 if (cc->on_disk_tag_size) {
3412 unsigned tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift);
3413
3414 if (unlikely(tag_len > KMALLOC_MAX_SIZE) ||
3415 unlikely(!(io->integrity_metadata = kmalloc(tag_len,
3416 GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
3417 if (bio_sectors(bio) > cc->tag_pool_max_sectors)
3418 dm_accept_partial_bio(bio, cc->tag_pool_max_sectors);
3419 io->integrity_metadata = mempool_alloc(&cc->tag_pool, GFP_NOIO);
3420 io->integrity_metadata_from_pool = true;
3421 }
3422 }
3423
3424 if (crypt_integrity_aead(cc))
3425 io->ctx.r.req_aead = (struct aead_request *)(io + 1);
3426 else
3427 io->ctx.r.req = (struct skcipher_request *)(io + 1);
3428
3429 if (bio_data_dir(io->base_bio) == READ) {
3430 if (kcryptd_io_read(io, GFP_NOWAIT))
3431 kcryptd_queue_read(io);
3432 } else
3433 kcryptd_queue_crypt(io);
3434
3435 return DM_MAPIO_SUBMITTED;
3436}
3437
3438static void crypt_status(struct dm_target *ti, status_type_t type,
3439 unsigned status_flags, char *result, unsigned maxlen)
3440{
3441 struct crypt_config *cc = ti->private;
3442 unsigned i, sz = 0;
3443 int num_feature_args = 0;
3444
3445 switch (type) {
3446 case STATUSTYPE_INFO:
3447 result[0] = '\0';
3448 break;
3449
3450 case STATUSTYPE_TABLE:
3451 DMEMIT("%s ", cc->cipher_string);
3452
3453 if (cc->key_size > 0) {
3454 if (cc->key_string)
3455 DMEMIT(":%u:%s", cc->key_size, cc->key_string);
3456 else
3457 for (i = 0; i < cc->key_size; i++)
3458 DMEMIT("%02x", cc->key[i]);
3459 } else
3460 DMEMIT("-");
3461
3462 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
3463 cc->dev->name, (unsigned long long)cc->start);
3464
3465 num_feature_args += !!ti->num_discard_bios;
3466 num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
3467 num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
3468 num_feature_args += test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags);
3469 num_feature_args += test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
3470 num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT);
3471 num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
3472 if (cc->on_disk_tag_size)
3473 num_feature_args++;
3474 if (num_feature_args) {
3475 DMEMIT(" %d", num_feature_args);
3476 if (ti->num_discard_bios)
3477 DMEMIT(" allow_discards");
3478 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
3479 DMEMIT(" same_cpu_crypt");
3480 if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
3481 DMEMIT(" submit_from_crypt_cpus");
3482 if (test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags))
3483 DMEMIT(" no_read_workqueue");
3484 if (test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))
3485 DMEMIT(" no_write_workqueue");
3486 if (cc->on_disk_tag_size)
3487 DMEMIT(" integrity:%u:%s", cc->on_disk_tag_size, cc->cipher_auth);
3488 if (cc->sector_size != (1 << SECTOR_SHIFT))
3489 DMEMIT(" sector_size:%d", cc->sector_size);
3490 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
3491 DMEMIT(" iv_large_sectors");
3492 }
3493 break;
3494
3495 case STATUSTYPE_IMA:
3496 DMEMIT_TARGET_NAME_VERSION(ti->type);
3497 DMEMIT(",allow_discards=%c", ti->num_discard_bios ? 'y' : 'n');
3498 DMEMIT(",same_cpu_crypt=%c", test_bit(DM_CRYPT_SAME_CPU, &cc->flags) ? 'y' : 'n');
3499 DMEMIT(",submit_from_crypt_cpus=%c", test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags) ?
3500 'y' : 'n');
3501 DMEMIT(",no_read_workqueue=%c", test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags) ?
3502 'y' : 'n');
3503 DMEMIT(",no_write_workqueue=%c", test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags) ?
3504 'y' : 'n');
3505 DMEMIT(",iv_large_sectors=%c", test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags) ?
3506 'y' : 'n');
3507
3508 if (cc->on_disk_tag_size)
3509 DMEMIT(",integrity_tag_size=%u,cipher_auth=%s",
3510 cc->on_disk_tag_size, cc->cipher_auth);
3511 if (cc->sector_size != (1 << SECTOR_SHIFT))
3512 DMEMIT(",sector_size=%d", cc->sector_size);
3513 if (cc->cipher_string)
3514 DMEMIT(",cipher_string=%s", cc->cipher_string);
3515
3516 DMEMIT(",key_size=%u", cc->key_size);
3517 DMEMIT(",key_parts=%u", cc->key_parts);
3518 DMEMIT(",key_extra_size=%u", cc->key_extra_size);
3519 DMEMIT(",key_mac_size=%u", cc->key_mac_size);
3520 DMEMIT(";");
3521 break;
3522 }
3523}
3524
3525static void crypt_postsuspend(struct dm_target *ti)
3526{
3527 struct crypt_config *cc = ti->private;
3528
3529 set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
3530}
3531
3532static int crypt_preresume(struct dm_target *ti)
3533{
3534 struct crypt_config *cc = ti->private;
3535
3536 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
3537 DMERR("aborting resume - crypt key is not set.");
3538 return -EAGAIN;
3539 }
3540
3541 return 0;
3542}
3543
3544static void crypt_resume(struct dm_target *ti)
3545{
3546 struct crypt_config *cc = ti->private;
3547
3548 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
3549}
3550
3551
3552
3553
3554
3555static int crypt_message(struct dm_target *ti, unsigned argc, char **argv,
3556 char *result, unsigned maxlen)
3557{
3558 struct crypt_config *cc = ti->private;
3559 int key_size, ret = -EINVAL;
3560
3561 if (argc < 2)
3562 goto error;
3563
3564 if (!strcasecmp(argv[0], "key")) {
3565 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
3566 DMWARN("not suspended during key manipulation.");
3567 return -EINVAL;
3568 }
3569 if (argc == 3 && !strcasecmp(argv[1], "set")) {
3570
3571 key_size = get_key_size(&argv[2]);
3572 if (key_size < 0 || cc->key_size != key_size) {
3573 memset(argv[2], '0', strlen(argv[2]));
3574 return -EINVAL;
3575 }
3576
3577 ret = crypt_set_key(cc, argv[2]);
3578 if (ret)
3579 return ret;
3580 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
3581 ret = cc->iv_gen_ops->init(cc);
3582
3583 if (cc->key_string)
3584 memset(cc->key, 0, cc->key_size * sizeof(u8));
3585 return ret;
3586 }
3587 if (argc == 2 && !strcasecmp(argv[1], "wipe"))
3588 return crypt_wipe_key(cc);
3589 }
3590
3591error:
3592 DMWARN("unrecognised message received.");
3593 return -EINVAL;
3594}
3595
3596static int crypt_iterate_devices(struct dm_target *ti,
3597 iterate_devices_callout_fn fn, void *data)
3598{
3599 struct crypt_config *cc = ti->private;
3600
3601 return fn(ti, cc->dev, cc->start, ti->len, data);
3602}
3603
3604static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
3605{
3606 struct crypt_config *cc = ti->private;
3607
3608
3609
3610
3611
3612
3613
3614 limits->max_segment_size = PAGE_SIZE;
3615
3616 limits->logical_block_size =
3617 max_t(unsigned, limits->logical_block_size, cc->sector_size);
3618 limits->physical_block_size =
3619 max_t(unsigned, limits->physical_block_size, cc->sector_size);
3620 limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size);
3621}
3622
3623static struct target_type crypt_target = {
3624 .name = "crypt",
3625 .version = {1, 23, 0},
3626 .module = THIS_MODULE,
3627 .ctr = crypt_ctr,
3628 .dtr = crypt_dtr,
3629 .features = DM_TARGET_ZONED_HM,
3630 .report_zones = crypt_report_zones,
3631 .map = crypt_map,
3632 .status = crypt_status,
3633 .postsuspend = crypt_postsuspend,
3634 .preresume = crypt_preresume,
3635 .resume = crypt_resume,
3636 .message = crypt_message,
3637 .iterate_devices = crypt_iterate_devices,
3638 .io_hints = crypt_io_hints,
3639};
3640
3641static int __init dm_crypt_init(void)
3642{
3643 int r;
3644
3645 r = dm_register_target(&crypt_target);
3646 if (r < 0)
3647 DMERR("register failed %d", r);
3648
3649 return r;
3650}
3651
3652static void __exit dm_crypt_exit(void)
3653{
3654 dm_unregister_target(&crypt_target);
3655}
3656
3657module_init(dm_crypt_init);
3658module_exit(dm_crypt_exit);
3659
3660MODULE_AUTHOR("Jana Saout <jana@saout.de>");
3661MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
3662MODULE_LICENSE("GPL");
3663