1
2
3
4
5
6
7
8
9
10#include <linux/completion.h>
11#include <linux/err.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/bio.h>
16#include <linux/blkdev.h>
17#include <linux/mempool.h>
18#include <linux/slab.h>
19#include <linux/crypto.h>
20#include <linux/workqueue.h>
21#include <linux/backing-dev.h>
22#include <linux/percpu.h>
23#include <linux/atomic.h>
24#include <linux/scatterlist.h>
25#include <asm/page.h>
26#include <asm/unaligned.h>
27#include <crypto/hash.h>
28#include <crypto/md5.h>
29#include <crypto/algapi.h>
30
31#include <linux/device-mapper.h>
32
33#define DM_MSG_PREFIX "crypt"
34
35
36
37
38struct convert_context {
39 struct completion restart;
40 struct bio *bio_in;
41 struct bio *bio_out;
42 struct bvec_iter iter_in;
43 struct bvec_iter iter_out;
44 sector_t cc_sector;
45 atomic_t cc_pending;
46};
47
48
49
50
51struct dm_crypt_io {
52 struct crypt_config *cc;
53 struct bio *base_bio;
54 struct work_struct work;
55
56 struct convert_context ctx;
57
58 atomic_t io_pending;
59 int error;
60 sector_t sector;
61 struct dm_crypt_io *base_io;
62};
63
64struct dm_crypt_request {
65 struct convert_context *ctx;
66 struct scatterlist sg_in;
67 struct scatterlist sg_out;
68 sector_t iv_sector;
69};
70
71struct crypt_config;
72
73struct crypt_iv_operations {
74 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
75 const char *opts);
76 void (*dtr)(struct crypt_config *cc);
77 int (*init)(struct crypt_config *cc);
78 int (*wipe)(struct crypt_config *cc);
79 int (*generator)(struct crypt_config *cc, u8 *iv,
80 struct dm_crypt_request *dmreq);
81 int (*post)(struct crypt_config *cc, u8 *iv,
82 struct dm_crypt_request *dmreq);
83};
84
85struct iv_essiv_private {
86 struct crypto_hash *hash_tfm;
87 u8 *salt;
88};
89
90struct iv_benbi_private {
91 int shift;
92};
93
94#define LMK_SEED_SIZE 64
95struct iv_lmk_private {
96 struct crypto_shash *hash_tfm;
97 u8 *seed;
98};
99
100#define TCW_WHITENING_SIZE 16
101struct iv_tcw_private {
102 struct crypto_shash *crc32_tfm;
103 u8 *iv_seed;
104 u8 *whitening;
105};
106
107
108
109
110
111enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
112
113
114
115
116struct crypt_cpu {
117 struct ablkcipher_request *req;
118};
119
120
121
122
123
124struct crypt_config {
125 struct dm_dev *dev;
126 sector_t start;
127
128
129
130
131
132 mempool_t *io_pool;
133 mempool_t *req_pool;
134 mempool_t *page_pool;
135 struct bio_set *bs;
136
137 struct workqueue_struct *io_queue;
138 struct workqueue_struct *crypt_queue;
139
140 char *cipher;
141 char *cipher_string;
142
143 struct crypt_iv_operations *iv_gen_ops;
144 union {
145 struct iv_essiv_private essiv;
146 struct iv_benbi_private benbi;
147 struct iv_lmk_private lmk;
148 struct iv_tcw_private tcw;
149 } iv_gen_private;
150 sector_t iv_offset;
151 unsigned int iv_size;
152
153
154
155
156
157 struct crypt_cpu __percpu *cpu;
158
159
160 void *iv_private;
161 struct crypto_ablkcipher **tfms;
162 unsigned tfms_count;
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177 unsigned int dmreq_start;
178
179 unsigned long flags;
180 unsigned int key_size;
181 unsigned int key_parts;
182 unsigned int key_extra_size;
183 u8 key[0];
184};
185
186#define MIN_IOS 16
187#define MIN_POOL_PAGES 32
188
189static struct kmem_cache *_crypt_io_pool;
190
191static void clone_init(struct dm_crypt_io *, struct bio *);
192static void kcryptd_queue_crypt(struct dm_crypt_io *io);
193static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
194
195static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
196{
197 return this_cpu_ptr(cc->cpu);
198}
199
200
201
202
203static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
204{
205 return cc->tfms[0];
206}
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
256 struct dm_crypt_request *dmreq)
257{
258 memset(iv, 0, cc->iv_size);
259 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
260
261 return 0;
262}
263
264static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
265 struct dm_crypt_request *dmreq)
266{
267 memset(iv, 0, cc->iv_size);
268 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
269
270 return 0;
271}
272
273
274static int crypt_iv_essiv_init(struct crypt_config *cc)
275{
276 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
277 struct hash_desc desc;
278 struct scatterlist sg;
279 struct crypto_cipher *essiv_tfm;
280 int err;
281
282 sg_init_one(&sg, cc->key, cc->key_size);
283 desc.tfm = essiv->hash_tfm;
284 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
285
286 err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt);
287 if (err)
288 return err;
289
290 essiv_tfm = cc->iv_private;
291
292 err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
293 crypto_hash_digestsize(essiv->hash_tfm));
294 if (err)
295 return err;
296
297 return 0;
298}
299
300
301static int crypt_iv_essiv_wipe(struct crypt_config *cc)
302{
303 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
304 unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
305 struct crypto_cipher *essiv_tfm;
306 int r, err = 0;
307
308 memset(essiv->salt, 0, salt_size);
309
310 essiv_tfm = cc->iv_private;
311 r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
312 if (r)
313 err = r;
314
315 return err;
316}
317
318
319static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
320 struct dm_target *ti,
321 u8 *salt, unsigned saltsize)
322{
323 struct crypto_cipher *essiv_tfm;
324 int err;
325
326
327 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
328 if (IS_ERR(essiv_tfm)) {
329 ti->error = "Error allocating crypto tfm for ESSIV";
330 return essiv_tfm;
331 }
332
333 if (crypto_cipher_blocksize(essiv_tfm) !=
334 crypto_ablkcipher_ivsize(any_tfm(cc))) {
335 ti->error = "Block size of ESSIV cipher does "
336 "not match IV size of block cipher";
337 crypto_free_cipher(essiv_tfm);
338 return ERR_PTR(-EINVAL);
339 }
340
341 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
342 if (err) {
343 ti->error = "Failed to set key for ESSIV cipher";
344 crypto_free_cipher(essiv_tfm);
345 return ERR_PTR(err);
346 }
347
348 return essiv_tfm;
349}
350
351static void crypt_iv_essiv_dtr(struct crypt_config *cc)
352{
353 struct crypto_cipher *essiv_tfm;
354 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
355
356 crypto_free_hash(essiv->hash_tfm);
357 essiv->hash_tfm = NULL;
358
359 kzfree(essiv->salt);
360 essiv->salt = NULL;
361
362 essiv_tfm = cc->iv_private;
363
364 if (essiv_tfm)
365 crypto_free_cipher(essiv_tfm);
366
367 cc->iv_private = NULL;
368}
369
370static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
371 const char *opts)
372{
373 struct crypto_cipher *essiv_tfm = NULL;
374 struct crypto_hash *hash_tfm = NULL;
375 u8 *salt = NULL;
376 int err;
377
378 if (!opts) {
379 ti->error = "Digest algorithm missing for ESSIV mode";
380 return -EINVAL;
381 }
382
383
384 hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
385 if (IS_ERR(hash_tfm)) {
386 ti->error = "Error initializing ESSIV hash";
387 err = PTR_ERR(hash_tfm);
388 goto bad;
389 }
390
391 salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL);
392 if (!salt) {
393 ti->error = "Error kmallocing salt storage in ESSIV";
394 err = -ENOMEM;
395 goto bad;
396 }
397
398 cc->iv_gen_private.essiv.salt = salt;
399 cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
400
401 essiv_tfm = setup_essiv_cpu(cc, ti, salt,
402 crypto_hash_digestsize(hash_tfm));
403 if (IS_ERR(essiv_tfm)) {
404 crypt_iv_essiv_dtr(cc);
405 return PTR_ERR(essiv_tfm);
406 }
407 cc->iv_private = essiv_tfm;
408
409 return 0;
410
411bad:
412 if (hash_tfm && !IS_ERR(hash_tfm))
413 crypto_free_hash(hash_tfm);
414 kfree(salt);
415 return err;
416}
417
418static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
419 struct dm_crypt_request *dmreq)
420{
421 struct crypto_cipher *essiv_tfm = cc->iv_private;
422
423 memset(iv, 0, cc->iv_size);
424 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
425 crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
426
427 return 0;
428}
429
430static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
431 const char *opts)
432{
433 unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc));
434 int log = ilog2(bs);
435
436
437
438
439 if (1 << log != bs) {
440 ti->error = "cypher blocksize is not a power of 2";
441 return -EINVAL;
442 }
443
444 if (log > 9) {
445 ti->error = "cypher blocksize is > 512";
446 return -EINVAL;
447 }
448
449 cc->iv_gen_private.benbi.shift = 9 - log;
450
451 return 0;
452}
453
454static void crypt_iv_benbi_dtr(struct crypt_config *cc)
455{
456}
457
458static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
459 struct dm_crypt_request *dmreq)
460{
461 __be64 val;
462
463 memset(iv, 0, cc->iv_size - sizeof(u64));
464
465 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
466 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
467
468 return 0;
469}
470
471static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
472 struct dm_crypt_request *dmreq)
473{
474 memset(iv, 0, cc->iv_size);
475
476 return 0;
477}
478
479static void crypt_iv_lmk_dtr(struct crypt_config *cc)
480{
481 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
482
483 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
484 crypto_free_shash(lmk->hash_tfm);
485 lmk->hash_tfm = NULL;
486
487 kzfree(lmk->seed);
488 lmk->seed = NULL;
489}
490
491static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
492 const char *opts)
493{
494 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
495
496 lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
497 if (IS_ERR(lmk->hash_tfm)) {
498 ti->error = "Error initializing LMK hash";
499 return PTR_ERR(lmk->hash_tfm);
500 }
501
502
503 if (cc->key_parts == cc->tfms_count) {
504 lmk->seed = NULL;
505 return 0;
506 }
507
508 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
509 if (!lmk->seed) {
510 crypt_iv_lmk_dtr(cc);
511 ti->error = "Error kmallocing seed storage in LMK";
512 return -ENOMEM;
513 }
514
515 return 0;
516}
517
518static int crypt_iv_lmk_init(struct crypt_config *cc)
519{
520 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
521 int subkey_size = cc->key_size / cc->key_parts;
522
523
524 if (lmk->seed)
525 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
526 crypto_shash_digestsize(lmk->hash_tfm));
527
528 return 0;
529}
530
531static int crypt_iv_lmk_wipe(struct crypt_config *cc)
532{
533 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
534
535 if (lmk->seed)
536 memset(lmk->seed, 0, LMK_SEED_SIZE);
537
538 return 0;
539}
540
541static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
542 struct dm_crypt_request *dmreq,
543 u8 *data)
544{
545 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
546 struct {
547 struct shash_desc desc;
548 char ctx[crypto_shash_descsize(lmk->hash_tfm)];
549 } sdesc;
550 struct md5_state md5state;
551 __le32 buf[4];
552 int i, r;
553
554 sdesc.desc.tfm = lmk->hash_tfm;
555 sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
556
557 r = crypto_shash_init(&sdesc.desc);
558 if (r)
559 return r;
560
561 if (lmk->seed) {
562 r = crypto_shash_update(&sdesc.desc, lmk->seed, LMK_SEED_SIZE);
563 if (r)
564 return r;
565 }
566
567
568 r = crypto_shash_update(&sdesc.desc, data + 16, 16 * 31);
569 if (r)
570 return r;
571
572
573 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
574 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
575 buf[2] = cpu_to_le32(4024);
576 buf[3] = 0;
577 r = crypto_shash_update(&sdesc.desc, (u8 *)buf, sizeof(buf));
578 if (r)
579 return r;
580
581
582 r = crypto_shash_export(&sdesc.desc, &md5state);
583 if (r)
584 return r;
585
586 for (i = 0; i < MD5_HASH_WORDS; i++)
587 __cpu_to_le32s(&md5state.hash[i]);
588 memcpy(iv, &md5state.hash, cc->iv_size);
589
590 return 0;
591}
592
593static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
594 struct dm_crypt_request *dmreq)
595{
596 u8 *src;
597 int r = 0;
598
599 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
600 src = kmap_atomic(sg_page(&dmreq->sg_in));
601 r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset);
602 kunmap_atomic(src);
603 } else
604 memset(iv, 0, cc->iv_size);
605
606 return r;
607}
608
609static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
610 struct dm_crypt_request *dmreq)
611{
612 u8 *dst;
613 int r;
614
615 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
616 return 0;
617
618 dst = kmap_atomic(sg_page(&dmreq->sg_out));
619 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset);
620
621
622 if (!r)
623 crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size);
624
625 kunmap_atomic(dst);
626 return r;
627}
628
629static void crypt_iv_tcw_dtr(struct crypt_config *cc)
630{
631 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
632
633 kzfree(tcw->iv_seed);
634 tcw->iv_seed = NULL;
635 kzfree(tcw->whitening);
636 tcw->whitening = NULL;
637
638 if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
639 crypto_free_shash(tcw->crc32_tfm);
640 tcw->crc32_tfm = NULL;
641}
642
643static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
644 const char *opts)
645{
646 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
647
648 if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
649 ti->error = "Wrong key size for TCW";
650 return -EINVAL;
651 }
652
653 tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0);
654 if (IS_ERR(tcw->crc32_tfm)) {
655 ti->error = "Error initializing CRC32 in TCW";
656 return PTR_ERR(tcw->crc32_tfm);
657 }
658
659 tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
660 tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
661 if (!tcw->iv_seed || !tcw->whitening) {
662 crypt_iv_tcw_dtr(cc);
663 ti->error = "Error allocating seed storage in TCW";
664 return -ENOMEM;
665 }
666
667 return 0;
668}
669
670static int crypt_iv_tcw_init(struct crypt_config *cc)
671{
672 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
673 int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
674
675 memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
676 memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
677 TCW_WHITENING_SIZE);
678
679 return 0;
680}
681
682static int crypt_iv_tcw_wipe(struct crypt_config *cc)
683{
684 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
685
686 memset(tcw->iv_seed, 0, cc->iv_size);
687 memset(tcw->whitening, 0, TCW_WHITENING_SIZE);
688
689 return 0;
690}
691
692static int crypt_iv_tcw_whitening(struct crypt_config *cc,
693 struct dm_crypt_request *dmreq,
694 u8 *data)
695{
696 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
697 u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
698 u8 buf[TCW_WHITENING_SIZE];
699 struct {
700 struct shash_desc desc;
701 char ctx[crypto_shash_descsize(tcw->crc32_tfm)];
702 } sdesc;
703 int i, r;
704
705
706 memcpy(buf, tcw->whitening, TCW_WHITENING_SIZE);
707 crypto_xor(buf, (u8 *)§or, 8);
708 crypto_xor(&buf[8], (u8 *)§or, 8);
709
710
711 sdesc.desc.tfm = tcw->crc32_tfm;
712 sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
713 for (i = 0; i < 4; i++) {
714 r = crypto_shash_init(&sdesc.desc);
715 if (r)
716 goto out;
717 r = crypto_shash_update(&sdesc.desc, &buf[i * 4], 4);
718 if (r)
719 goto out;
720 r = crypto_shash_final(&sdesc.desc, &buf[i * 4]);
721 if (r)
722 goto out;
723 }
724 crypto_xor(&buf[0], &buf[12], 4);
725 crypto_xor(&buf[4], &buf[8], 4);
726
727
728 for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
729 crypto_xor(data + i * 8, buf, 8);
730out:
731 memset(buf, 0, sizeof(buf));
732 return r;
733}
734
735static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
736 struct dm_crypt_request *dmreq)
737{
738 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
739 u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
740 u8 *src;
741 int r = 0;
742
743
744 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
745 src = kmap_atomic(sg_page(&dmreq->sg_in));
746 r = crypt_iv_tcw_whitening(cc, dmreq, src + dmreq->sg_in.offset);
747 kunmap_atomic(src);
748 }
749
750
751 memcpy(iv, tcw->iv_seed, cc->iv_size);
752 crypto_xor(iv, (u8 *)§or, 8);
753 if (cc->iv_size > 8)
754 crypto_xor(&iv[8], (u8 *)§or, cc->iv_size - 8);
755
756 return r;
757}
758
759static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
760 struct dm_crypt_request *dmreq)
761{
762 u8 *dst;
763 int r;
764
765 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
766 return 0;
767
768
769 dst = kmap_atomic(sg_page(&dmreq->sg_out));
770 r = crypt_iv_tcw_whitening(cc, dmreq, dst + dmreq->sg_out.offset);
771 kunmap_atomic(dst);
772
773 return r;
774}
775
776static struct crypt_iv_operations crypt_iv_plain_ops = {
777 .generator = crypt_iv_plain_gen
778};
779
780static struct crypt_iv_operations crypt_iv_plain64_ops = {
781 .generator = crypt_iv_plain64_gen
782};
783
784static struct crypt_iv_operations crypt_iv_essiv_ops = {
785 .ctr = crypt_iv_essiv_ctr,
786 .dtr = crypt_iv_essiv_dtr,
787 .init = crypt_iv_essiv_init,
788 .wipe = crypt_iv_essiv_wipe,
789 .generator = crypt_iv_essiv_gen
790};
791
792static struct crypt_iv_operations crypt_iv_benbi_ops = {
793 .ctr = crypt_iv_benbi_ctr,
794 .dtr = crypt_iv_benbi_dtr,
795 .generator = crypt_iv_benbi_gen
796};
797
798static struct crypt_iv_operations crypt_iv_null_ops = {
799 .generator = crypt_iv_null_gen
800};
801
802static struct crypt_iv_operations crypt_iv_lmk_ops = {
803 .ctr = crypt_iv_lmk_ctr,
804 .dtr = crypt_iv_lmk_dtr,
805 .init = crypt_iv_lmk_init,
806 .wipe = crypt_iv_lmk_wipe,
807 .generator = crypt_iv_lmk_gen,
808 .post = crypt_iv_lmk_post
809};
810
811static struct crypt_iv_operations crypt_iv_tcw_ops = {
812 .ctr = crypt_iv_tcw_ctr,
813 .dtr = crypt_iv_tcw_dtr,
814 .init = crypt_iv_tcw_init,
815 .wipe = crypt_iv_tcw_wipe,
816 .generator = crypt_iv_tcw_gen,
817 .post = crypt_iv_tcw_post
818};
819
820static void crypt_convert_init(struct crypt_config *cc,
821 struct convert_context *ctx,
822 struct bio *bio_out, struct bio *bio_in,
823 sector_t sector)
824{
825 ctx->bio_in = bio_in;
826 ctx->bio_out = bio_out;
827 if (bio_in)
828 ctx->iter_in = bio_in->bi_iter;
829 if (bio_out)
830 ctx->iter_out = bio_out->bi_iter;
831 ctx->cc_sector = sector + cc->iv_offset;
832 init_completion(&ctx->restart);
833}
834
835static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
836 struct ablkcipher_request *req)
837{
838 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
839}
840
841static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
842 struct dm_crypt_request *dmreq)
843{
844 return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
845}
846
847static u8 *iv_of_dmreq(struct crypt_config *cc,
848 struct dm_crypt_request *dmreq)
849{
850 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
851 crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
852}
853
854static int crypt_convert_block(struct crypt_config *cc,
855 struct convert_context *ctx,
856 struct ablkcipher_request *req)
857{
858 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
859 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
860 struct dm_crypt_request *dmreq;
861 u8 *iv;
862 int r;
863
864 dmreq = dmreq_of_req(cc, req);
865 iv = iv_of_dmreq(cc, dmreq);
866
867 dmreq->iv_sector = ctx->cc_sector;
868 dmreq->ctx = ctx;
869 sg_init_table(&dmreq->sg_in, 1);
870 sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT,
871 bv_in.bv_offset);
872
873 sg_init_table(&dmreq->sg_out, 1);
874 sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT,
875 bv_out.bv_offset);
876
877 bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT);
878 bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT);
879
880 if (cc->iv_gen_ops) {
881 r = cc->iv_gen_ops->generator(cc, iv, dmreq);
882 if (r < 0)
883 return r;
884 }
885
886 ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
887 1 << SECTOR_SHIFT, iv);
888
889 if (bio_data_dir(ctx->bio_in) == WRITE)
890 r = crypto_ablkcipher_encrypt(req);
891 else
892 r = crypto_ablkcipher_decrypt(req);
893
894 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
895 r = cc->iv_gen_ops->post(cc, iv, dmreq);
896
897 return r;
898}
899
900static void kcryptd_async_done(struct crypto_async_request *async_req,
901 int error);
902
903static void crypt_alloc_req(struct crypt_config *cc,
904 struct convert_context *ctx)
905{
906 struct crypt_cpu *this_cc = this_crypt_config(cc);
907 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
908
909 if (!this_cc->req)
910 this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
911
912 ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]);
913 ablkcipher_request_set_callback(this_cc->req,
914 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
915 kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
916}
917
918
919
920
921static int crypt_convert(struct crypt_config *cc,
922 struct convert_context *ctx)
923{
924 struct crypt_cpu *this_cc = this_crypt_config(cc);
925 int r;
926
927 atomic_set(&ctx->cc_pending, 1);
928
929 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
930
931 crypt_alloc_req(cc, ctx);
932
933 atomic_inc(&ctx->cc_pending);
934
935 r = crypt_convert_block(cc, ctx, this_cc->req);
936
937 switch (r) {
938
939 case -EBUSY:
940 wait_for_completion(&ctx->restart);
941 reinit_completion(&ctx->restart);
942
943 case -EINPROGRESS:
944 this_cc->req = NULL;
945 ctx->cc_sector++;
946 continue;
947
948
949 case 0:
950 atomic_dec(&ctx->cc_pending);
951 ctx->cc_sector++;
952 cond_resched();
953 continue;
954
955
956 default:
957 atomic_dec(&ctx->cc_pending);
958 return r;
959 }
960 }
961
962 return 0;
963}
964
965
966
967
968
969
970
971static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
972 unsigned *out_of_pages)
973{
974 struct crypt_config *cc = io->cc;
975 struct bio *clone;
976 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
977 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
978 unsigned i, len;
979 struct page *page;
980
981 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
982 if (!clone)
983 return NULL;
984
985 clone_init(io, clone);
986 *out_of_pages = 0;
987
988 for (i = 0; i < nr_iovecs; i++) {
989 page = mempool_alloc(cc->page_pool, gfp_mask);
990 if (!page) {
991 *out_of_pages = 1;
992 break;
993 }
994
995
996
997
998
999
1000 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
1001
1002 len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
1003
1004 if (!bio_add_page(clone, page, len, 0)) {
1005 mempool_free(page, cc->page_pool);
1006 break;
1007 }
1008
1009 size -= len;
1010 }
1011
1012 if (!clone->bi_iter.bi_size) {
1013 bio_put(clone);
1014 return NULL;
1015 }
1016
1017 return clone;
1018}
1019
1020static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
1021{
1022 unsigned int i;
1023 struct bio_vec *bv;
1024
1025 bio_for_each_segment_all(bv, clone, i) {
1026 BUG_ON(!bv->bv_page);
1027 mempool_free(bv->bv_page, cc->page_pool);
1028 bv->bv_page = NULL;
1029 }
1030}
1031
1032static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc,
1033 struct bio *bio, sector_t sector)
1034{
1035 struct dm_crypt_io *io;
1036
1037 io = mempool_alloc(cc->io_pool, GFP_NOIO);
1038 io->cc = cc;
1039 io->base_bio = bio;
1040 io->sector = sector;
1041 io->error = 0;
1042 io->base_io = NULL;
1043 atomic_set(&io->io_pending, 0);
1044
1045 return io;
1046}
1047
1048static void crypt_inc_pending(struct dm_crypt_io *io)
1049{
1050 atomic_inc(&io->io_pending);
1051}
1052
1053
1054
1055
1056
1057
1058static void crypt_dec_pending(struct dm_crypt_io *io)
1059{
1060 struct crypt_config *cc = io->cc;
1061 struct bio *base_bio = io->base_bio;
1062 struct dm_crypt_io *base_io = io->base_io;
1063 int error = io->error;
1064
1065 if (!atomic_dec_and_test(&io->io_pending))
1066 return;
1067
1068 mempool_free(io, cc->io_pool);
1069
1070 if (likely(!base_io))
1071 bio_endio(base_bio, error);
1072 else {
1073 if (error && !base_io->error)
1074 base_io->error = error;
1075 crypt_dec_pending(base_io);
1076 }
1077}
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096static void crypt_endio(struct bio *clone, int error)
1097{
1098 struct dm_crypt_io *io = clone->bi_private;
1099 struct crypt_config *cc = io->cc;
1100 unsigned rw = bio_data_dir(clone);
1101
1102 if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
1103 error = -EIO;
1104
1105
1106
1107
1108 if (rw == WRITE)
1109 crypt_free_buffer_pages(cc, clone);
1110
1111 bio_put(clone);
1112
1113 if (rw == READ && !error) {
1114 kcryptd_queue_crypt(io);
1115 return;
1116 }
1117
1118 if (unlikely(error))
1119 io->error = error;
1120
1121 crypt_dec_pending(io);
1122}
1123
1124static void clone_init(struct dm_crypt_io *io, struct bio *clone)
1125{
1126 struct crypt_config *cc = io->cc;
1127
1128 clone->bi_private = io;
1129 clone->bi_end_io = crypt_endio;
1130 clone->bi_bdev = cc->dev->bdev;
1131 clone->bi_rw = io->base_bio->bi_rw;
1132}
1133
1134static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
1135{
1136 struct crypt_config *cc = io->cc;
1137 struct bio *base_bio = io->base_bio;
1138 struct bio *clone;
1139
1140
1141
1142
1143
1144
1145 clone = bio_clone_bioset(base_bio, gfp, cc->bs);
1146 if (!clone)
1147 return 1;
1148
1149 crypt_inc_pending(io);
1150
1151 clone_init(io, clone);
1152 clone->bi_iter.bi_sector = cc->start + io->sector;
1153
1154 generic_make_request(clone);
1155 return 0;
1156}
1157
1158static void kcryptd_io_write(struct dm_crypt_io *io)
1159{
1160 struct bio *clone = io->ctx.bio_out;
1161 generic_make_request(clone);
1162}
1163
1164static void kcryptd_io(struct work_struct *work)
1165{
1166 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1167
1168 if (bio_data_dir(io->base_bio) == READ) {
1169 crypt_inc_pending(io);
1170 if (kcryptd_io_read(io, GFP_NOIO))
1171 io->error = -ENOMEM;
1172 crypt_dec_pending(io);
1173 } else
1174 kcryptd_io_write(io);
1175}
1176
1177static void kcryptd_queue_io(struct dm_crypt_io *io)
1178{
1179 struct crypt_config *cc = io->cc;
1180
1181 INIT_WORK(&io->work, kcryptd_io);
1182 queue_work(cc->io_queue, &io->work);
1183}
1184
1185static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
1186{
1187 struct bio *clone = io->ctx.bio_out;
1188 struct crypt_config *cc = io->cc;
1189
1190 if (unlikely(io->error < 0)) {
1191 crypt_free_buffer_pages(cc, clone);
1192 bio_put(clone);
1193 crypt_dec_pending(io);
1194 return;
1195 }
1196
1197
1198 BUG_ON(io->ctx.iter_out.bi_size);
1199
1200 clone->bi_iter.bi_sector = cc->start + io->sector;
1201
1202 if (async)
1203 kcryptd_queue_io(io);
1204 else
1205 generic_make_request(clone);
1206}
1207
1208static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1209{
1210 struct crypt_config *cc = io->cc;
1211 struct bio *clone;
1212 struct dm_crypt_io *new_io;
1213 int crypt_finished;
1214 unsigned out_of_pages = 0;
1215 unsigned remaining = io->base_bio->bi_iter.bi_size;
1216 sector_t sector = io->sector;
1217 int r;
1218
1219
1220
1221
1222 crypt_inc_pending(io);
1223 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
1224
1225
1226
1227
1228
1229 while (remaining) {
1230 clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
1231 if (unlikely(!clone)) {
1232 io->error = -ENOMEM;
1233 break;
1234 }
1235
1236 io->ctx.bio_out = clone;
1237 io->ctx.iter_out = clone->bi_iter;
1238
1239 remaining -= clone->bi_iter.bi_size;
1240 sector += bio_sectors(clone);
1241
1242 crypt_inc_pending(io);
1243
1244 r = crypt_convert(cc, &io->ctx);
1245 if (r < 0)
1246 io->error = -EIO;
1247
1248 crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
1249
1250
1251 if (crypt_finished) {
1252 kcryptd_crypt_write_io_submit(io, 0);
1253
1254
1255
1256
1257
1258 if (unlikely(r < 0))
1259 break;
1260
1261 io->sector = sector;
1262 }
1263
1264
1265
1266
1267
1268 if (unlikely(out_of_pages))
1269 congestion_wait(BLK_RW_ASYNC, HZ/100);
1270
1271
1272
1273
1274
1275 if (unlikely(!crypt_finished && remaining)) {
1276 new_io = crypt_io_alloc(io->cc, io->base_bio,
1277 sector);
1278 crypt_inc_pending(new_io);
1279 crypt_convert_init(cc, &new_io->ctx, NULL,
1280 io->base_bio, sector);
1281 new_io->ctx.iter_in = io->ctx.iter_in;
1282
1283
1284
1285
1286
1287 if (!io->base_io)
1288 new_io->base_io = io;
1289 else {
1290 new_io->base_io = io->base_io;
1291 crypt_inc_pending(io->base_io);
1292 crypt_dec_pending(io);
1293 }
1294
1295 io = new_io;
1296 }
1297 }
1298
1299 crypt_dec_pending(io);
1300}
1301
1302static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
1303{
1304 crypt_dec_pending(io);
1305}
1306
1307static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
1308{
1309 struct crypt_config *cc = io->cc;
1310 int r = 0;
1311
1312 crypt_inc_pending(io);
1313
1314 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
1315 io->sector);
1316
1317 r = crypt_convert(cc, &io->ctx);
1318 if (r < 0)
1319 io->error = -EIO;
1320
1321 if (atomic_dec_and_test(&io->ctx.cc_pending))
1322 kcryptd_crypt_read_done(io);
1323
1324 crypt_dec_pending(io);
1325}
1326
1327static void kcryptd_async_done(struct crypto_async_request *async_req,
1328 int error)
1329{
1330 struct dm_crypt_request *dmreq = async_req->data;
1331 struct convert_context *ctx = dmreq->ctx;
1332 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
1333 struct crypt_config *cc = io->cc;
1334
1335 if (error == -EINPROGRESS) {
1336 complete(&ctx->restart);
1337 return;
1338 }
1339
1340 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
1341 error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
1342
1343 if (error < 0)
1344 io->error = -EIO;
1345
1346 mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
1347
1348 if (!atomic_dec_and_test(&ctx->cc_pending))
1349 return;
1350
1351 if (bio_data_dir(io->base_bio) == READ)
1352 kcryptd_crypt_read_done(io);
1353 else
1354 kcryptd_crypt_write_io_submit(io, 1);
1355}
1356
1357static void kcryptd_crypt(struct work_struct *work)
1358{
1359 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1360
1361 if (bio_data_dir(io->base_bio) == READ)
1362 kcryptd_crypt_read_convert(io);
1363 else
1364 kcryptd_crypt_write_convert(io);
1365}
1366
1367static void kcryptd_queue_crypt(struct dm_crypt_io *io)
1368{
1369 struct crypt_config *cc = io->cc;
1370
1371 INIT_WORK(&io->work, kcryptd_crypt);
1372 queue_work(cc->crypt_queue, &io->work);
1373}
1374
1375
1376
1377
1378static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
1379{
1380 char buffer[3];
1381 unsigned int i;
1382
1383 buffer[2] = '\0';
1384
1385 for (i = 0; i < size; i++) {
1386 buffer[0] = *hex++;
1387 buffer[1] = *hex++;
1388
1389 if (kstrtou8(buffer, 16, &key[i]))
1390 return -EINVAL;
1391 }
1392
1393 if (*hex != '\0')
1394 return -EINVAL;
1395
1396 return 0;
1397}
1398
1399static void crypt_free_tfms(struct crypt_config *cc)
1400{
1401 unsigned i;
1402
1403 if (!cc->tfms)
1404 return;
1405
1406 for (i = 0; i < cc->tfms_count; i++)
1407 if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) {
1408 crypto_free_ablkcipher(cc->tfms[i]);
1409 cc->tfms[i] = NULL;
1410 }
1411
1412 kfree(cc->tfms);
1413 cc->tfms = NULL;
1414}
1415
1416static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
1417{
1418 unsigned i;
1419 int err;
1420
1421 cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *),
1422 GFP_KERNEL);
1423 if (!cc->tfms)
1424 return -ENOMEM;
1425
1426 for (i = 0; i < cc->tfms_count; i++) {
1427 cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
1428 if (IS_ERR(cc->tfms[i])) {
1429 err = PTR_ERR(cc->tfms[i]);
1430 crypt_free_tfms(cc);
1431 return err;
1432 }
1433 }
1434
1435 return 0;
1436}
1437
1438static int crypt_setkey_allcpus(struct crypt_config *cc)
1439{
1440 unsigned subkey_size;
1441 int err = 0, i, r;
1442
1443
1444 subkey_size = (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
1445
1446 for (i = 0; i < cc->tfms_count; i++) {
1447 r = crypto_ablkcipher_setkey(cc->tfms[i],
1448 cc->key + (i * subkey_size),
1449 subkey_size);
1450 if (r)
1451 err = r;
1452 }
1453
1454 return err;
1455}
1456
1457static int crypt_set_key(struct crypt_config *cc, char *key)
1458{
1459 int r = -EINVAL;
1460 int key_string_len = strlen(key);
1461
1462
1463 if (cc->key_size != (key_string_len >> 1))
1464 goto out;
1465
1466
1467 if (!cc->key_size && strcmp(key, "-"))
1468 goto out;
1469
1470 if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
1471 goto out;
1472
1473 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1474
1475 r = crypt_setkey_allcpus(cc);
1476
1477out:
1478
1479 memset(key, '0', key_string_len);
1480
1481 return r;
1482}
1483
1484static int crypt_wipe_key(struct crypt_config *cc)
1485{
1486 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1487 memset(&cc->key, 0, cc->key_size * sizeof(u8));
1488
1489 return crypt_setkey_allcpus(cc);
1490}
1491
1492static void crypt_dtr(struct dm_target *ti)
1493{
1494 struct crypt_config *cc = ti->private;
1495 struct crypt_cpu *cpu_cc;
1496 int cpu;
1497
1498 ti->private = NULL;
1499
1500 if (!cc)
1501 return;
1502
1503 if (cc->io_queue)
1504 destroy_workqueue(cc->io_queue);
1505 if (cc->crypt_queue)
1506 destroy_workqueue(cc->crypt_queue);
1507
1508 if (cc->cpu)
1509 for_each_possible_cpu(cpu) {
1510 cpu_cc = per_cpu_ptr(cc->cpu, cpu);
1511 if (cpu_cc->req)
1512 mempool_free(cpu_cc->req, cc->req_pool);
1513 }
1514
1515 crypt_free_tfms(cc);
1516
1517 if (cc->bs)
1518 bioset_free(cc->bs);
1519
1520 if (cc->page_pool)
1521 mempool_destroy(cc->page_pool);
1522 if (cc->req_pool)
1523 mempool_destroy(cc->req_pool);
1524 if (cc->io_pool)
1525 mempool_destroy(cc->io_pool);
1526
1527 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
1528 cc->iv_gen_ops->dtr(cc);
1529
1530 if (cc->dev)
1531 dm_put_device(ti, cc->dev);
1532
1533 if (cc->cpu)
1534 free_percpu(cc->cpu);
1535
1536 kzfree(cc->cipher);
1537 kzfree(cc->cipher_string);
1538
1539
1540 kzfree(cc);
1541}
1542
1543static int crypt_ctr_cipher(struct dm_target *ti,
1544 char *cipher_in, char *key)
1545{
1546 struct crypt_config *cc = ti->private;
1547 char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount;
1548 char *cipher_api = NULL;
1549 int ret = -EINVAL;
1550 char dummy;
1551
1552
1553 if (strchr(cipher_in, '(')) {
1554 ti->error = "Bad cipher specification";
1555 return -EINVAL;
1556 }
1557
1558 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
1559 if (!cc->cipher_string)
1560 goto bad_mem;
1561
1562
1563
1564
1565
1566 tmp = cipher_in;
1567 keycount = strsep(&tmp, "-");
1568 cipher = strsep(&keycount, ":");
1569
1570 if (!keycount)
1571 cc->tfms_count = 1;
1572 else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 ||
1573 !is_power_of_2(cc->tfms_count)) {
1574 ti->error = "Bad cipher key count specification";
1575 return -EINVAL;
1576 }
1577 cc->key_parts = cc->tfms_count;
1578 cc->key_extra_size = 0;
1579
1580 cc->cipher = kstrdup(cipher, GFP_KERNEL);
1581 if (!cc->cipher)
1582 goto bad_mem;
1583
1584 chainmode = strsep(&tmp, "-");
1585 ivopts = strsep(&tmp, "-");
1586 ivmode = strsep(&ivopts, ":");
1587
1588 if (tmp)
1589 DMWARN("Ignoring unexpected additional cipher options");
1590
1591 cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)),
1592 __alignof__(struct crypt_cpu));
1593 if (!cc->cpu) {
1594 ti->error = "Cannot allocate per cpu state";
1595 goto bad_mem;
1596 }
1597
1598
1599
1600
1601
1602 if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) {
1603 chainmode = "cbc";
1604 ivmode = "plain";
1605 }
1606
1607 if (strcmp(chainmode, "ecb") && !ivmode) {
1608 ti->error = "IV mechanism required";
1609 return -EINVAL;
1610 }
1611
1612 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
1613 if (!cipher_api)
1614 goto bad_mem;
1615
1616 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
1617 "%s(%s)", chainmode, cipher);
1618 if (ret < 0) {
1619 kfree(cipher_api);
1620 goto bad_mem;
1621 }
1622
1623
1624 ret = crypt_alloc_tfms(cc, cipher_api);
1625 if (ret < 0) {
1626 ti->error = "Error allocating crypto tfm";
1627 goto bad;
1628 }
1629
1630
1631 cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
1632 if (cc->iv_size)
1633
1634 cc->iv_size = max(cc->iv_size,
1635 (unsigned int)(sizeof(u64) / sizeof(u8)));
1636 else if (ivmode) {
1637 DMWARN("Selected cipher does not support IVs");
1638 ivmode = NULL;
1639 }
1640
1641
1642 if (ivmode == NULL)
1643 cc->iv_gen_ops = NULL;
1644 else if (strcmp(ivmode, "plain") == 0)
1645 cc->iv_gen_ops = &crypt_iv_plain_ops;
1646 else if (strcmp(ivmode, "plain64") == 0)
1647 cc->iv_gen_ops = &crypt_iv_plain64_ops;
1648 else if (strcmp(ivmode, "essiv") == 0)
1649 cc->iv_gen_ops = &crypt_iv_essiv_ops;
1650 else if (strcmp(ivmode, "benbi") == 0)
1651 cc->iv_gen_ops = &crypt_iv_benbi_ops;
1652 else if (strcmp(ivmode, "null") == 0)
1653 cc->iv_gen_ops = &crypt_iv_null_ops;
1654 else if (strcmp(ivmode, "lmk") == 0) {
1655 cc->iv_gen_ops = &crypt_iv_lmk_ops;
1656
1657
1658
1659
1660
1661
1662 if (cc->key_size % cc->key_parts) {
1663 cc->key_parts++;
1664 cc->key_extra_size = cc->key_size / cc->key_parts;
1665 }
1666 } else if (strcmp(ivmode, "tcw") == 0) {
1667 cc->iv_gen_ops = &crypt_iv_tcw_ops;
1668 cc->key_parts += 2;
1669 cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
1670 } else {
1671 ret = -EINVAL;
1672 ti->error = "Invalid IV mode";
1673 goto bad;
1674 }
1675
1676
1677 ret = crypt_set_key(cc, key);
1678 if (ret < 0) {
1679 ti->error = "Error decoding and setting key";
1680 goto bad;
1681 }
1682
1683
1684 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
1685 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
1686 if (ret < 0) {
1687 ti->error = "Error creating IV";
1688 goto bad;
1689 }
1690 }
1691
1692
1693 if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
1694 ret = cc->iv_gen_ops->init(cc);
1695 if (ret < 0) {
1696 ti->error = "Error initialising IV";
1697 goto bad;
1698 }
1699 }
1700
1701 ret = 0;
1702bad:
1703 kfree(cipher_api);
1704 return ret;
1705
1706bad_mem:
1707 ti->error = "Cannot allocate cipher strings";
1708 return -ENOMEM;
1709}
1710
1711
1712
1713
1714
1715static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1716{
1717 struct crypt_config *cc;
1718 unsigned int key_size, opt_params;
1719 unsigned long long tmpll;
1720 int ret;
1721 struct dm_arg_set as;
1722 const char *opt_string;
1723 char dummy;
1724
1725 static struct dm_arg _args[] = {
1726 {0, 1, "Invalid number of feature args"},
1727 };
1728
1729 if (argc < 5) {
1730 ti->error = "Not enough arguments";
1731 return -EINVAL;
1732 }
1733
1734 key_size = strlen(argv[1]) >> 1;
1735
1736 cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
1737 if (!cc) {
1738 ti->error = "Cannot allocate encryption context";
1739 return -ENOMEM;
1740 }
1741 cc->key_size = key_size;
1742
1743 ti->private = cc;
1744 ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
1745 if (ret < 0)
1746 goto bad;
1747
1748 ret = -ENOMEM;
1749 cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
1750 if (!cc->io_pool) {
1751 ti->error = "Cannot allocate crypt io mempool";
1752 goto bad;
1753 }
1754
1755 cc->dmreq_start = sizeof(struct ablkcipher_request);
1756 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
1757 cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment());
1758 cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) &
1759 ~(crypto_tfm_ctx_alignment() - 1);
1760
1761 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
1762 sizeof(struct dm_crypt_request) + cc->iv_size);
1763 if (!cc->req_pool) {
1764 ti->error = "Cannot allocate crypt request mempool";
1765 goto bad;
1766 }
1767
1768 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
1769 if (!cc->page_pool) {
1770 ti->error = "Cannot allocate page mempool";
1771 goto bad;
1772 }
1773
1774 cc->bs = bioset_create(MIN_IOS, 0);
1775 if (!cc->bs) {
1776 ti->error = "Cannot allocate crypt bioset";
1777 goto bad;
1778 }
1779
1780 ret = -EINVAL;
1781 if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) {
1782 ti->error = "Invalid iv_offset sector";
1783 goto bad;
1784 }
1785 cc->iv_offset = tmpll;
1786
1787 if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) {
1788 ti->error = "Device lookup failed";
1789 goto bad;
1790 }
1791
1792 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
1793 ti->error = "Invalid device sector";
1794 goto bad;
1795 }
1796 cc->start = tmpll;
1797
1798 argv += 5;
1799 argc -= 5;
1800
1801
1802 if (argc) {
1803 as.argc = argc;
1804 as.argv = argv;
1805
1806 ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
1807 if (ret)
1808 goto bad;
1809
1810 opt_string = dm_shift_arg(&as);
1811
1812 if (opt_params == 1 && opt_string &&
1813 !strcasecmp(opt_string, "allow_discards"))
1814 ti->num_discard_bios = 1;
1815 else if (opt_params) {
1816 ret = -EINVAL;
1817 ti->error = "Invalid feature arguments";
1818 goto bad;
1819 }
1820 }
1821
1822 ret = -ENOMEM;
1823 cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1);
1824 if (!cc->io_queue) {
1825 ti->error = "Couldn't create kcryptd io queue";
1826 goto bad;
1827 }
1828
1829 cc->crypt_queue = alloc_workqueue("kcryptd",
1830 WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
1831 if (!cc->crypt_queue) {
1832 ti->error = "Couldn't create kcryptd queue";
1833 goto bad;
1834 }
1835
1836 ti->num_flush_bios = 1;
1837 ti->discard_zeroes_data_unsupported = true;
1838
1839 return 0;
1840
1841bad:
1842 crypt_dtr(ti);
1843 return ret;
1844}
1845
1846static int crypt_map(struct dm_target *ti, struct bio *bio)
1847{
1848 struct dm_crypt_io *io;
1849 struct crypt_config *cc = ti->private;
1850
1851
1852
1853
1854
1855
1856 if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
1857 bio->bi_bdev = cc->dev->bdev;
1858 if (bio_sectors(bio))
1859 bio->bi_iter.bi_sector = cc->start +
1860 dm_target_offset(ti, bio->bi_iter.bi_sector);
1861 return DM_MAPIO_REMAPPED;
1862 }
1863
1864 io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
1865
1866 if (bio_data_dir(io->base_bio) == READ) {
1867 if (kcryptd_io_read(io, GFP_NOWAIT))
1868 kcryptd_queue_io(io);
1869 } else
1870 kcryptd_queue_crypt(io);
1871
1872 return DM_MAPIO_SUBMITTED;
1873}
1874
1875static void crypt_status(struct dm_target *ti, status_type_t type,
1876 unsigned status_flags, char *result, unsigned maxlen)
1877{
1878 struct crypt_config *cc = ti->private;
1879 unsigned i, sz = 0;
1880
1881 switch (type) {
1882 case STATUSTYPE_INFO:
1883 result[0] = '\0';
1884 break;
1885
1886 case STATUSTYPE_TABLE:
1887 DMEMIT("%s ", cc->cipher_string);
1888
1889 if (cc->key_size > 0)
1890 for (i = 0; i < cc->key_size; i++)
1891 DMEMIT("%02x", cc->key[i]);
1892 else
1893 DMEMIT("-");
1894
1895 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
1896 cc->dev->name, (unsigned long long)cc->start);
1897
1898 if (ti->num_discard_bios)
1899 DMEMIT(" 1 allow_discards");
1900
1901 break;
1902 }
1903}
1904
1905static void crypt_postsuspend(struct dm_target *ti)
1906{
1907 struct crypt_config *cc = ti->private;
1908
1909 set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1910}
1911
1912static int crypt_preresume(struct dm_target *ti)
1913{
1914 struct crypt_config *cc = ti->private;
1915
1916 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
1917 DMERR("aborting resume - crypt key is not set.");
1918 return -EAGAIN;
1919 }
1920
1921 return 0;
1922}
1923
1924static void crypt_resume(struct dm_target *ti)
1925{
1926 struct crypt_config *cc = ti->private;
1927
1928 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1929}
1930
1931
1932
1933
1934
1935static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
1936{
1937 struct crypt_config *cc = ti->private;
1938 int ret = -EINVAL;
1939
1940 if (argc < 2)
1941 goto error;
1942
1943 if (!strcasecmp(argv[0], "key")) {
1944 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
1945 DMWARN("not suspended during key manipulation.");
1946 return -EINVAL;
1947 }
1948 if (argc == 3 && !strcasecmp(argv[1], "set")) {
1949 ret = crypt_set_key(cc, argv[2]);
1950 if (ret)
1951 return ret;
1952 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
1953 ret = cc->iv_gen_ops->init(cc);
1954 return ret;
1955 }
1956 if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
1957 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
1958 ret = cc->iv_gen_ops->wipe(cc);
1959 if (ret)
1960 return ret;
1961 }
1962 return crypt_wipe_key(cc);
1963 }
1964 }
1965
1966error:
1967 DMWARN("unrecognised message received.");
1968 return -EINVAL;
1969}
1970
1971static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
1972 struct bio_vec *biovec, int max_size)
1973{
1974 struct crypt_config *cc = ti->private;
1975 struct request_queue *q = bdev_get_queue(cc->dev->bdev);
1976
1977 if (!q->merge_bvec_fn)
1978 return max_size;
1979
1980 bvm->bi_bdev = cc->dev->bdev;
1981 bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector);
1982
1983 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
1984}
1985
1986static int crypt_iterate_devices(struct dm_target *ti,
1987 iterate_devices_callout_fn fn, void *data)
1988{
1989 struct crypt_config *cc = ti->private;
1990
1991 return fn(ti, cc->dev, cc->start, ti->len, data);
1992}
1993
1994static struct target_type crypt_target = {
1995 .name = "crypt",
1996 .version = {1, 13, 0},
1997 .module = THIS_MODULE,
1998 .ctr = crypt_ctr,
1999 .dtr = crypt_dtr,
2000 .map = crypt_map,
2001 .status = crypt_status,
2002 .postsuspend = crypt_postsuspend,
2003 .preresume = crypt_preresume,
2004 .resume = crypt_resume,
2005 .message = crypt_message,
2006 .merge = crypt_merge,
2007 .iterate_devices = crypt_iterate_devices,
2008};
2009
2010static int __init dm_crypt_init(void)
2011{
2012 int r;
2013
2014 _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
2015 if (!_crypt_io_pool)
2016 return -ENOMEM;
2017
2018 r = dm_register_target(&crypt_target);
2019 if (r < 0) {
2020 DMERR("register failed %d", r);
2021 kmem_cache_destroy(_crypt_io_pool);
2022 }
2023
2024 return r;
2025}
2026
2027static void __exit dm_crypt_exit(void)
2028{
2029 dm_unregister_target(&crypt_target);
2030 kmem_cache_destroy(_crypt_io_pool);
2031}
2032
2033module_init(dm_crypt_init);
2034module_exit(dm_crypt_exit);
2035
2036MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
2037MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
2038MODULE_LICENSE("GPL");
2039