1
2
3
4
5
6
7
8
9
10#include <linux/completion.h>
11#include <linux/err.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/bio.h>
16#include <linux/blkdev.h>
17#include <linux/mempool.h>
18#include <linux/slab.h>
19#include <linux/crypto.h>
20#include <linux/workqueue.h>
21#include <linux/backing-dev.h>
22#include <linux/percpu.h>
23#include <linux/atomic.h>
24#include <linux/scatterlist.h>
25#include <asm/page.h>
26#include <asm/unaligned.h>
27#include <crypto/hash.h>
28#include <crypto/md5.h>
29#include <crypto/algapi.h>
30
31#include <linux/device-mapper.h>
32
33#define DM_MSG_PREFIX "crypt"
34
35
36
37
38struct convert_context {
39 struct completion restart;
40 struct bio *bio_in;
41 struct bio *bio_out;
42 unsigned int offset_in;
43 unsigned int offset_out;
44 unsigned int idx_in;
45 unsigned int idx_out;
46 sector_t cc_sector;
47 atomic_t cc_pending;
48};
49
50
51
52
53struct dm_crypt_io {
54 struct crypt_config *cc;
55 struct bio *base_bio;
56 struct work_struct work;
57
58 struct convert_context ctx;
59
60 atomic_t io_pending;
61 int error;
62 sector_t sector;
63 struct dm_crypt_io *base_io;
64};
65
66struct dm_crypt_request {
67 struct convert_context *ctx;
68 struct scatterlist sg_in;
69 struct scatterlist sg_out;
70 sector_t iv_sector;
71};
72
73struct crypt_config;
74
75struct crypt_iv_operations {
76 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
77 const char *opts);
78 void (*dtr)(struct crypt_config *cc);
79 int (*init)(struct crypt_config *cc);
80 int (*wipe)(struct crypt_config *cc);
81 int (*generator)(struct crypt_config *cc, u8 *iv,
82 struct dm_crypt_request *dmreq);
83 int (*post)(struct crypt_config *cc, u8 *iv,
84 struct dm_crypt_request *dmreq);
85};
86
87struct iv_essiv_private {
88 struct crypto_hash *hash_tfm;
89 u8 *salt;
90};
91
92struct iv_benbi_private {
93 int shift;
94};
95
96#define LMK_SEED_SIZE 64
97struct iv_lmk_private {
98 struct crypto_shash *hash_tfm;
99 u8 *seed;
100};
101
102#define TCW_WHITENING_SIZE 16
103struct iv_tcw_private {
104 struct crypto_shash *crc32_tfm;
105 u8 *iv_seed;
106 u8 *whitening;
107};
108
109
110
111
112
113enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
114
115
116
117
118struct crypt_cpu {
119 struct ablkcipher_request *req;
120};
121
122
123
124
125
126struct crypt_config {
127 struct dm_dev *dev;
128 sector_t start;
129
130
131
132
133
134 mempool_t *io_pool;
135 mempool_t *req_pool;
136 mempool_t *page_pool;
137 struct bio_set *bs;
138
139 struct workqueue_struct *io_queue;
140 struct workqueue_struct *crypt_queue;
141
142 char *cipher;
143 char *cipher_string;
144
145 struct crypt_iv_operations *iv_gen_ops;
146 union {
147 struct iv_essiv_private essiv;
148 struct iv_benbi_private benbi;
149 struct iv_lmk_private lmk;
150 struct iv_tcw_private tcw;
151 } iv_gen_private;
152 sector_t iv_offset;
153 unsigned int iv_size;
154
155
156
157
158
159 struct crypt_cpu __percpu *cpu;
160
161
162 void *iv_private;
163 struct crypto_ablkcipher **tfms;
164 unsigned tfms_count;
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179 unsigned int dmreq_start;
180
181 unsigned long flags;
182 unsigned int key_size;
183 unsigned int key_parts;
184 unsigned int key_extra_size;
185 u8 key[0];
186};
187
188#define MIN_IOS 16
189#define MIN_POOL_PAGES 32
190
191static struct kmem_cache *_crypt_io_pool;
192
193static void clone_init(struct dm_crypt_io *, struct bio *);
194static void kcryptd_queue_crypt(struct dm_crypt_io *io);
195static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
196
197static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
198{
199 return this_cpu_ptr(cc->cpu);
200}
201
202
203
204
205static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
206{
207 return cc->tfms[0];
208}
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
258 struct dm_crypt_request *dmreq)
259{
260 memset(iv, 0, cc->iv_size);
261 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
262
263 return 0;
264}
265
266static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
267 struct dm_crypt_request *dmreq)
268{
269 memset(iv, 0, cc->iv_size);
270 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
271
272 return 0;
273}
274
275
276static int crypt_iv_essiv_init(struct crypt_config *cc)
277{
278 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
279 struct hash_desc desc;
280 struct scatterlist sg;
281 struct crypto_cipher *essiv_tfm;
282 int err;
283
284 sg_init_one(&sg, cc->key, cc->key_size);
285 desc.tfm = essiv->hash_tfm;
286 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
287
288 err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt);
289 if (err)
290 return err;
291
292 essiv_tfm = cc->iv_private;
293
294 err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
295 crypto_hash_digestsize(essiv->hash_tfm));
296 if (err)
297 return err;
298
299 return 0;
300}
301
302
303static int crypt_iv_essiv_wipe(struct crypt_config *cc)
304{
305 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
306 unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
307 struct crypto_cipher *essiv_tfm;
308 int r, err = 0;
309
310 memset(essiv->salt, 0, salt_size);
311
312 essiv_tfm = cc->iv_private;
313 r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
314 if (r)
315 err = r;
316
317 return err;
318}
319
320
321static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
322 struct dm_target *ti,
323 u8 *salt, unsigned saltsize)
324{
325 struct crypto_cipher *essiv_tfm;
326 int err;
327
328
329 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
330 if (IS_ERR(essiv_tfm)) {
331 ti->error = "Error allocating crypto tfm for ESSIV";
332 return essiv_tfm;
333 }
334
335 if (crypto_cipher_blocksize(essiv_tfm) !=
336 crypto_ablkcipher_ivsize(any_tfm(cc))) {
337 ti->error = "Block size of ESSIV cipher does "
338 "not match IV size of block cipher";
339 crypto_free_cipher(essiv_tfm);
340 return ERR_PTR(-EINVAL);
341 }
342
343 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
344 if (err) {
345 ti->error = "Failed to set key for ESSIV cipher";
346 crypto_free_cipher(essiv_tfm);
347 return ERR_PTR(err);
348 }
349
350 return essiv_tfm;
351}
352
353static void crypt_iv_essiv_dtr(struct crypt_config *cc)
354{
355 struct crypto_cipher *essiv_tfm;
356 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
357
358 crypto_free_hash(essiv->hash_tfm);
359 essiv->hash_tfm = NULL;
360
361 kzfree(essiv->salt);
362 essiv->salt = NULL;
363
364 essiv_tfm = cc->iv_private;
365
366 if (essiv_tfm)
367 crypto_free_cipher(essiv_tfm);
368
369 cc->iv_private = NULL;
370}
371
372static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
373 const char *opts)
374{
375 struct crypto_cipher *essiv_tfm = NULL;
376 struct crypto_hash *hash_tfm = NULL;
377 u8 *salt = NULL;
378 int err;
379
380 if (!opts) {
381 ti->error = "Digest algorithm missing for ESSIV mode";
382 return -EINVAL;
383 }
384
385
386 hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
387 if (IS_ERR(hash_tfm)) {
388 ti->error = "Error initializing ESSIV hash";
389 err = PTR_ERR(hash_tfm);
390 goto bad;
391 }
392
393 salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL);
394 if (!salt) {
395 ti->error = "Error kmallocing salt storage in ESSIV";
396 err = -ENOMEM;
397 goto bad;
398 }
399
400 cc->iv_gen_private.essiv.salt = salt;
401 cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
402
403 essiv_tfm = setup_essiv_cpu(cc, ti, salt,
404 crypto_hash_digestsize(hash_tfm));
405 if (IS_ERR(essiv_tfm)) {
406 crypt_iv_essiv_dtr(cc);
407 return PTR_ERR(essiv_tfm);
408 }
409 cc->iv_private = essiv_tfm;
410
411 return 0;
412
413bad:
414 if (hash_tfm && !IS_ERR(hash_tfm))
415 crypto_free_hash(hash_tfm);
416 kfree(salt);
417 return err;
418}
419
420static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
421 struct dm_crypt_request *dmreq)
422{
423 struct crypto_cipher *essiv_tfm = cc->iv_private;
424
425 memset(iv, 0, cc->iv_size);
426 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
427 crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
428
429 return 0;
430}
431
432static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
433 const char *opts)
434{
435 unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc));
436 int log = ilog2(bs);
437
438
439
440
441 if (1 << log != bs) {
442 ti->error = "cypher blocksize is not a power of 2";
443 return -EINVAL;
444 }
445
446 if (log > 9) {
447 ti->error = "cypher blocksize is > 512";
448 return -EINVAL;
449 }
450
451 cc->iv_gen_private.benbi.shift = 9 - log;
452
453 return 0;
454}
455
456static void crypt_iv_benbi_dtr(struct crypt_config *cc)
457{
458}
459
460static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
461 struct dm_crypt_request *dmreq)
462{
463 __be64 val;
464
465 memset(iv, 0, cc->iv_size - sizeof(u64));
466
467 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
468 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
469
470 return 0;
471}
472
473static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
474 struct dm_crypt_request *dmreq)
475{
476 memset(iv, 0, cc->iv_size);
477
478 return 0;
479}
480
481static void crypt_iv_lmk_dtr(struct crypt_config *cc)
482{
483 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
484
485 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
486 crypto_free_shash(lmk->hash_tfm);
487 lmk->hash_tfm = NULL;
488
489 kzfree(lmk->seed);
490 lmk->seed = NULL;
491}
492
493static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
494 const char *opts)
495{
496 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
497
498 lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
499 if (IS_ERR(lmk->hash_tfm)) {
500 ti->error = "Error initializing LMK hash";
501 return PTR_ERR(lmk->hash_tfm);
502 }
503
504
505 if (cc->key_parts == cc->tfms_count) {
506 lmk->seed = NULL;
507 return 0;
508 }
509
510 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
511 if (!lmk->seed) {
512 crypt_iv_lmk_dtr(cc);
513 ti->error = "Error kmallocing seed storage in LMK";
514 return -ENOMEM;
515 }
516
517 return 0;
518}
519
520static int crypt_iv_lmk_init(struct crypt_config *cc)
521{
522 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
523 int subkey_size = cc->key_size / cc->key_parts;
524
525
526 if (lmk->seed)
527 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
528 crypto_shash_digestsize(lmk->hash_tfm));
529
530 return 0;
531}
532
533static int crypt_iv_lmk_wipe(struct crypt_config *cc)
534{
535 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
536
537 if (lmk->seed)
538 memset(lmk->seed, 0, LMK_SEED_SIZE);
539
540 return 0;
541}
542
543static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
544 struct dm_crypt_request *dmreq,
545 u8 *data)
546{
547 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
548 struct {
549 struct shash_desc desc;
550 char ctx[crypto_shash_descsize(lmk->hash_tfm)];
551 } sdesc;
552 struct md5_state md5state;
553 __le32 buf[4];
554 int i, r;
555
556 sdesc.desc.tfm = lmk->hash_tfm;
557 sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
558
559 r = crypto_shash_init(&sdesc.desc);
560 if (r)
561 return r;
562
563 if (lmk->seed) {
564 r = crypto_shash_update(&sdesc.desc, lmk->seed, LMK_SEED_SIZE);
565 if (r)
566 return r;
567 }
568
569
570 r = crypto_shash_update(&sdesc.desc, data + 16, 16 * 31);
571 if (r)
572 return r;
573
574
575 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
576 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
577 buf[2] = cpu_to_le32(4024);
578 buf[3] = 0;
579 r = crypto_shash_update(&sdesc.desc, (u8 *)buf, sizeof(buf));
580 if (r)
581 return r;
582
583
584 r = crypto_shash_export(&sdesc.desc, &md5state);
585 if (r)
586 return r;
587
588 for (i = 0; i < MD5_HASH_WORDS; i++)
589 __cpu_to_le32s(&md5state.hash[i]);
590 memcpy(iv, &md5state.hash, cc->iv_size);
591
592 return 0;
593}
594
595static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
596 struct dm_crypt_request *dmreq)
597{
598 u8 *src;
599 int r = 0;
600
601 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
602 src = kmap_atomic(sg_page(&dmreq->sg_in));
603 r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset);
604 kunmap_atomic(src);
605 } else
606 memset(iv, 0, cc->iv_size);
607
608 return r;
609}
610
611static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
612 struct dm_crypt_request *dmreq)
613{
614 u8 *dst;
615 int r;
616
617 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
618 return 0;
619
620 dst = kmap_atomic(sg_page(&dmreq->sg_out));
621 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset);
622
623
624 if (!r)
625 crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size);
626
627 kunmap_atomic(dst);
628 return r;
629}
630
631static void crypt_iv_tcw_dtr(struct crypt_config *cc)
632{
633 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
634
635 kzfree(tcw->iv_seed);
636 tcw->iv_seed = NULL;
637 kzfree(tcw->whitening);
638 tcw->whitening = NULL;
639
640 if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
641 crypto_free_shash(tcw->crc32_tfm);
642 tcw->crc32_tfm = NULL;
643}
644
645static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
646 const char *opts)
647{
648 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
649
650 if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
651 ti->error = "Wrong key size for TCW";
652 return -EINVAL;
653 }
654
655 tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0);
656 if (IS_ERR(tcw->crc32_tfm)) {
657 ti->error = "Error initializing CRC32 in TCW";
658 return PTR_ERR(tcw->crc32_tfm);
659 }
660
661 tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
662 tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
663 if (!tcw->iv_seed || !tcw->whitening) {
664 crypt_iv_tcw_dtr(cc);
665 ti->error = "Error allocating seed storage in TCW";
666 return -ENOMEM;
667 }
668
669 return 0;
670}
671
672static int crypt_iv_tcw_init(struct crypt_config *cc)
673{
674 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
675 int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
676
677 memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
678 memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
679 TCW_WHITENING_SIZE);
680
681 return 0;
682}
683
684static int crypt_iv_tcw_wipe(struct crypt_config *cc)
685{
686 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
687
688 memset(tcw->iv_seed, 0, cc->iv_size);
689 memset(tcw->whitening, 0, TCW_WHITENING_SIZE);
690
691 return 0;
692}
693
694static int crypt_iv_tcw_whitening(struct crypt_config *cc,
695 struct dm_crypt_request *dmreq,
696 u8 *data)
697{
698 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
699 u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
700 u8 buf[TCW_WHITENING_SIZE];
701 struct {
702 struct shash_desc desc;
703 char ctx[crypto_shash_descsize(tcw->crc32_tfm)];
704 } sdesc;
705 int i, r;
706
707
708 memcpy(buf, tcw->whitening, TCW_WHITENING_SIZE);
709 crypto_xor(buf, (u8 *)§or, 8);
710 crypto_xor(&buf[8], (u8 *)§or, 8);
711
712
713 sdesc.desc.tfm = tcw->crc32_tfm;
714 sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
715 for (i = 0; i < 4; i++) {
716 r = crypto_shash_init(&sdesc.desc);
717 if (r)
718 goto out;
719 r = crypto_shash_update(&sdesc.desc, &buf[i * 4], 4);
720 if (r)
721 goto out;
722 r = crypto_shash_final(&sdesc.desc, &buf[i * 4]);
723 if (r)
724 goto out;
725 }
726 crypto_xor(&buf[0], &buf[12], 4);
727 crypto_xor(&buf[4], &buf[8], 4);
728
729
730 for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
731 crypto_xor(data + i * 8, buf, 8);
732out:
733 memset(buf, 0, sizeof(buf));
734 return r;
735}
736
737static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
738 struct dm_crypt_request *dmreq)
739{
740 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
741 u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
742 u8 *src;
743 int r = 0;
744
745
746 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
747 src = kmap_atomic(sg_page(&dmreq->sg_in));
748 r = crypt_iv_tcw_whitening(cc, dmreq, src + dmreq->sg_in.offset);
749 kunmap_atomic(src);
750 }
751
752
753 memcpy(iv, tcw->iv_seed, cc->iv_size);
754 crypto_xor(iv, (u8 *)§or, 8);
755 if (cc->iv_size > 8)
756 crypto_xor(&iv[8], (u8 *)§or, cc->iv_size - 8);
757
758 return r;
759}
760
761static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
762 struct dm_crypt_request *dmreq)
763{
764 u8 *dst;
765 int r;
766
767 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
768 return 0;
769
770
771 dst = kmap_atomic(sg_page(&dmreq->sg_out));
772 r = crypt_iv_tcw_whitening(cc, dmreq, dst + dmreq->sg_out.offset);
773 kunmap_atomic(dst);
774
775 return r;
776}
777
778static struct crypt_iv_operations crypt_iv_plain_ops = {
779 .generator = crypt_iv_plain_gen
780};
781
782static struct crypt_iv_operations crypt_iv_plain64_ops = {
783 .generator = crypt_iv_plain64_gen
784};
785
786static struct crypt_iv_operations crypt_iv_essiv_ops = {
787 .ctr = crypt_iv_essiv_ctr,
788 .dtr = crypt_iv_essiv_dtr,
789 .init = crypt_iv_essiv_init,
790 .wipe = crypt_iv_essiv_wipe,
791 .generator = crypt_iv_essiv_gen
792};
793
794static struct crypt_iv_operations crypt_iv_benbi_ops = {
795 .ctr = crypt_iv_benbi_ctr,
796 .dtr = crypt_iv_benbi_dtr,
797 .generator = crypt_iv_benbi_gen
798};
799
800static struct crypt_iv_operations crypt_iv_null_ops = {
801 .generator = crypt_iv_null_gen
802};
803
804static struct crypt_iv_operations crypt_iv_lmk_ops = {
805 .ctr = crypt_iv_lmk_ctr,
806 .dtr = crypt_iv_lmk_dtr,
807 .init = crypt_iv_lmk_init,
808 .wipe = crypt_iv_lmk_wipe,
809 .generator = crypt_iv_lmk_gen,
810 .post = crypt_iv_lmk_post
811};
812
813static struct crypt_iv_operations crypt_iv_tcw_ops = {
814 .ctr = crypt_iv_tcw_ctr,
815 .dtr = crypt_iv_tcw_dtr,
816 .init = crypt_iv_tcw_init,
817 .wipe = crypt_iv_tcw_wipe,
818 .generator = crypt_iv_tcw_gen,
819 .post = crypt_iv_tcw_post
820};
821
822static void crypt_convert_init(struct crypt_config *cc,
823 struct convert_context *ctx,
824 struct bio *bio_out, struct bio *bio_in,
825 sector_t sector)
826{
827 ctx->bio_in = bio_in;
828 ctx->bio_out = bio_out;
829 ctx->offset_in = 0;
830 ctx->offset_out = 0;
831 ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
832 ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
833 ctx->cc_sector = sector + cc->iv_offset;
834 init_completion(&ctx->restart);
835}
836
837static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
838 struct ablkcipher_request *req)
839{
840 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
841}
842
843static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
844 struct dm_crypt_request *dmreq)
845{
846 return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
847}
848
849static u8 *iv_of_dmreq(struct crypt_config *cc,
850 struct dm_crypt_request *dmreq)
851{
852 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
853 crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
854}
855
856static int crypt_convert_block(struct crypt_config *cc,
857 struct convert_context *ctx,
858 struct ablkcipher_request *req)
859{
860 struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
861 struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
862 struct dm_crypt_request *dmreq;
863 u8 *iv;
864 int r;
865
866 dmreq = dmreq_of_req(cc, req);
867 iv = iv_of_dmreq(cc, dmreq);
868
869 dmreq->iv_sector = ctx->cc_sector;
870 dmreq->ctx = ctx;
871 sg_init_table(&dmreq->sg_in, 1);
872 sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
873 bv_in->bv_offset + ctx->offset_in);
874
875 sg_init_table(&dmreq->sg_out, 1);
876 sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT,
877 bv_out->bv_offset + ctx->offset_out);
878
879 ctx->offset_in += 1 << SECTOR_SHIFT;
880 if (ctx->offset_in >= bv_in->bv_len) {
881 ctx->offset_in = 0;
882 ctx->idx_in++;
883 }
884
885 ctx->offset_out += 1 << SECTOR_SHIFT;
886 if (ctx->offset_out >= bv_out->bv_len) {
887 ctx->offset_out = 0;
888 ctx->idx_out++;
889 }
890
891 if (cc->iv_gen_ops) {
892 r = cc->iv_gen_ops->generator(cc, iv, dmreq);
893 if (r < 0)
894 return r;
895 }
896
897 ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
898 1 << SECTOR_SHIFT, iv);
899
900 if (bio_data_dir(ctx->bio_in) == WRITE)
901 r = crypto_ablkcipher_encrypt(req);
902 else
903 r = crypto_ablkcipher_decrypt(req);
904
905 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
906 r = cc->iv_gen_ops->post(cc, iv, dmreq);
907
908 return r;
909}
910
911static void kcryptd_async_done(struct crypto_async_request *async_req,
912 int error);
913
914static void crypt_alloc_req(struct crypt_config *cc,
915 struct convert_context *ctx)
916{
917 struct crypt_cpu *this_cc = this_crypt_config(cc);
918 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
919
920 if (!this_cc->req)
921 this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
922
923 ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]);
924 ablkcipher_request_set_callback(this_cc->req,
925 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
926 kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
927}
928
929
930
931
932static int crypt_convert(struct crypt_config *cc,
933 struct convert_context *ctx)
934{
935 struct crypt_cpu *this_cc = this_crypt_config(cc);
936 int r;
937
938 atomic_set(&ctx->cc_pending, 1);
939
940 while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
941 ctx->idx_out < ctx->bio_out->bi_vcnt) {
942
943 crypt_alloc_req(cc, ctx);
944
945 atomic_inc(&ctx->cc_pending);
946
947 r = crypt_convert_block(cc, ctx, this_cc->req);
948
949 switch (r) {
950
951 case -EBUSY:
952 wait_for_completion(&ctx->restart);
953 reinit_completion(&ctx->restart);
954
955 case -EINPROGRESS:
956 this_cc->req = NULL;
957 ctx->cc_sector++;
958 continue;
959
960
961 case 0:
962 atomic_dec(&ctx->cc_pending);
963 ctx->cc_sector++;
964 cond_resched();
965 continue;
966
967
968 default:
969 atomic_dec(&ctx->cc_pending);
970 return r;
971 }
972 }
973
974 return 0;
975}
976
977
978
979
980
981
982
983static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
984 unsigned *out_of_pages)
985{
986 struct crypt_config *cc = io->cc;
987 struct bio *clone;
988 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
989 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
990 unsigned i, len;
991 struct page *page;
992
993 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
994 if (!clone)
995 return NULL;
996
997 clone_init(io, clone);
998 *out_of_pages = 0;
999
1000 for (i = 0; i < nr_iovecs; i++) {
1001 page = mempool_alloc(cc->page_pool, gfp_mask);
1002 if (!page) {
1003 *out_of_pages = 1;
1004 break;
1005 }
1006
1007
1008
1009
1010
1011
1012 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
1013
1014 len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
1015
1016 if (!bio_add_page(clone, page, len, 0)) {
1017 mempool_free(page, cc->page_pool);
1018 break;
1019 }
1020
1021 size -= len;
1022 }
1023
1024 if (!clone->bi_size) {
1025 bio_put(clone);
1026 return NULL;
1027 }
1028
1029 return clone;
1030}
1031
1032static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
1033{
1034 unsigned int i;
1035 struct bio_vec *bv;
1036
1037 bio_for_each_segment_all(bv, clone, i) {
1038 BUG_ON(!bv->bv_page);
1039 mempool_free(bv->bv_page, cc->page_pool);
1040 bv->bv_page = NULL;
1041 }
1042}
1043
1044static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc,
1045 struct bio *bio, sector_t sector)
1046{
1047 struct dm_crypt_io *io;
1048
1049 io = mempool_alloc(cc->io_pool, GFP_NOIO);
1050 io->cc = cc;
1051 io->base_bio = bio;
1052 io->sector = sector;
1053 io->error = 0;
1054 io->base_io = NULL;
1055 atomic_set(&io->io_pending, 0);
1056
1057 return io;
1058}
1059
1060static void crypt_inc_pending(struct dm_crypt_io *io)
1061{
1062 atomic_inc(&io->io_pending);
1063}
1064
1065
1066
1067
1068
1069
1070static void crypt_dec_pending(struct dm_crypt_io *io)
1071{
1072 struct crypt_config *cc = io->cc;
1073 struct bio *base_bio = io->base_bio;
1074 struct dm_crypt_io *base_io = io->base_io;
1075 int error = io->error;
1076
1077 if (!atomic_dec_and_test(&io->io_pending))
1078 return;
1079
1080 mempool_free(io, cc->io_pool);
1081
1082 if (likely(!base_io))
1083 bio_endio(base_bio, error);
1084 else {
1085 if (error && !base_io->error)
1086 base_io->error = error;
1087 crypt_dec_pending(base_io);
1088 }
1089}
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108static void crypt_endio(struct bio *clone, int error)
1109{
1110 struct dm_crypt_io *io = clone->bi_private;
1111 struct crypt_config *cc = io->cc;
1112 unsigned rw = bio_data_dir(clone);
1113
1114 if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
1115 error = -EIO;
1116
1117
1118
1119
1120 if (rw == WRITE)
1121 crypt_free_buffer_pages(cc, clone);
1122
1123 bio_put(clone);
1124
1125 if (rw == READ && !error) {
1126 kcryptd_queue_crypt(io);
1127 return;
1128 }
1129
1130 if (unlikely(error))
1131 io->error = error;
1132
1133 crypt_dec_pending(io);
1134}
1135
1136static void clone_init(struct dm_crypt_io *io, struct bio *clone)
1137{
1138 struct crypt_config *cc = io->cc;
1139
1140 clone->bi_private = io;
1141 clone->bi_end_io = crypt_endio;
1142 clone->bi_bdev = cc->dev->bdev;
1143 clone->bi_rw = io->base_bio->bi_rw;
1144}
1145
1146static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
1147{
1148 struct crypt_config *cc = io->cc;
1149 struct bio *base_bio = io->base_bio;
1150 struct bio *clone;
1151
1152
1153
1154
1155
1156
1157 clone = bio_clone_bioset(base_bio, gfp, cc->bs);
1158 if (!clone)
1159 return 1;
1160
1161 crypt_inc_pending(io);
1162
1163 clone_init(io, clone);
1164 clone->bi_sector = cc->start + io->sector;
1165
1166 generic_make_request(clone);
1167 return 0;
1168}
1169
1170static void kcryptd_io_write(struct dm_crypt_io *io)
1171{
1172 struct bio *clone = io->ctx.bio_out;
1173 generic_make_request(clone);
1174}
1175
1176static void kcryptd_io(struct work_struct *work)
1177{
1178 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1179
1180 if (bio_data_dir(io->base_bio) == READ) {
1181 crypt_inc_pending(io);
1182 if (kcryptd_io_read(io, GFP_NOIO))
1183 io->error = -ENOMEM;
1184 crypt_dec_pending(io);
1185 } else
1186 kcryptd_io_write(io);
1187}
1188
1189static void kcryptd_queue_io(struct dm_crypt_io *io)
1190{
1191 struct crypt_config *cc = io->cc;
1192
1193 INIT_WORK(&io->work, kcryptd_io);
1194 queue_work(cc->io_queue, &io->work);
1195}
1196
1197static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
1198{
1199 struct bio *clone = io->ctx.bio_out;
1200 struct crypt_config *cc = io->cc;
1201
1202 if (unlikely(io->error < 0)) {
1203 crypt_free_buffer_pages(cc, clone);
1204 bio_put(clone);
1205 crypt_dec_pending(io);
1206 return;
1207 }
1208
1209
1210 BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
1211
1212 clone->bi_sector = cc->start + io->sector;
1213
1214 if (async)
1215 kcryptd_queue_io(io);
1216 else
1217 generic_make_request(clone);
1218}
1219
1220static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1221{
1222 struct crypt_config *cc = io->cc;
1223 struct bio *clone;
1224 struct dm_crypt_io *new_io;
1225 int crypt_finished;
1226 unsigned out_of_pages = 0;
1227 unsigned remaining = io->base_bio->bi_size;
1228 sector_t sector = io->sector;
1229 int r;
1230
1231
1232
1233
1234 crypt_inc_pending(io);
1235 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
1236
1237
1238
1239
1240
1241 while (remaining) {
1242 clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
1243 if (unlikely(!clone)) {
1244 io->error = -ENOMEM;
1245 break;
1246 }
1247
1248 io->ctx.bio_out = clone;
1249 io->ctx.idx_out = 0;
1250
1251 remaining -= clone->bi_size;
1252 sector += bio_sectors(clone);
1253
1254 crypt_inc_pending(io);
1255
1256 r = crypt_convert(cc, &io->ctx);
1257 if (r < 0)
1258 io->error = -EIO;
1259
1260 crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
1261
1262
1263 if (crypt_finished) {
1264 kcryptd_crypt_write_io_submit(io, 0);
1265
1266
1267
1268
1269
1270 if (unlikely(r < 0))
1271 break;
1272
1273 io->sector = sector;
1274 }
1275
1276
1277
1278
1279
1280 if (unlikely(out_of_pages))
1281 congestion_wait(BLK_RW_ASYNC, HZ/100);
1282
1283
1284
1285
1286
1287 if (unlikely(!crypt_finished && remaining)) {
1288 new_io = crypt_io_alloc(io->cc, io->base_bio,
1289 sector);
1290 crypt_inc_pending(new_io);
1291 crypt_convert_init(cc, &new_io->ctx, NULL,
1292 io->base_bio, sector);
1293 new_io->ctx.idx_in = io->ctx.idx_in;
1294 new_io->ctx.offset_in = io->ctx.offset_in;
1295
1296
1297
1298
1299
1300 if (!io->base_io)
1301 new_io->base_io = io;
1302 else {
1303 new_io->base_io = io->base_io;
1304 crypt_inc_pending(io->base_io);
1305 crypt_dec_pending(io);
1306 }
1307
1308 io = new_io;
1309 }
1310 }
1311
1312 crypt_dec_pending(io);
1313}
1314
1315static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
1316{
1317 crypt_dec_pending(io);
1318}
1319
1320static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
1321{
1322 struct crypt_config *cc = io->cc;
1323 int r = 0;
1324
1325 crypt_inc_pending(io);
1326
1327 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
1328 io->sector);
1329
1330 r = crypt_convert(cc, &io->ctx);
1331 if (r < 0)
1332 io->error = -EIO;
1333
1334 if (atomic_dec_and_test(&io->ctx.cc_pending))
1335 kcryptd_crypt_read_done(io);
1336
1337 crypt_dec_pending(io);
1338}
1339
1340static void kcryptd_async_done(struct crypto_async_request *async_req,
1341 int error)
1342{
1343 struct dm_crypt_request *dmreq = async_req->data;
1344 struct convert_context *ctx = dmreq->ctx;
1345 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
1346 struct crypt_config *cc = io->cc;
1347
1348 if (error == -EINPROGRESS) {
1349 complete(&ctx->restart);
1350 return;
1351 }
1352
1353 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
1354 error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
1355
1356 if (error < 0)
1357 io->error = -EIO;
1358
1359 mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
1360
1361 if (!atomic_dec_and_test(&ctx->cc_pending))
1362 return;
1363
1364 if (bio_data_dir(io->base_bio) == READ)
1365 kcryptd_crypt_read_done(io);
1366 else
1367 kcryptd_crypt_write_io_submit(io, 1);
1368}
1369
1370static void kcryptd_crypt(struct work_struct *work)
1371{
1372 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1373
1374 if (bio_data_dir(io->base_bio) == READ)
1375 kcryptd_crypt_read_convert(io);
1376 else
1377 kcryptd_crypt_write_convert(io);
1378}
1379
1380static void kcryptd_queue_crypt(struct dm_crypt_io *io)
1381{
1382 struct crypt_config *cc = io->cc;
1383
1384 INIT_WORK(&io->work, kcryptd_crypt);
1385 queue_work(cc->crypt_queue, &io->work);
1386}
1387
1388
1389
1390
1391static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
1392{
1393 char buffer[3];
1394 unsigned int i;
1395
1396 buffer[2] = '\0';
1397
1398 for (i = 0; i < size; i++) {
1399 buffer[0] = *hex++;
1400 buffer[1] = *hex++;
1401
1402 if (kstrtou8(buffer, 16, &key[i]))
1403 return -EINVAL;
1404 }
1405
1406 if (*hex != '\0')
1407 return -EINVAL;
1408
1409 return 0;
1410}
1411
1412static void crypt_free_tfms(struct crypt_config *cc)
1413{
1414 unsigned i;
1415
1416 if (!cc->tfms)
1417 return;
1418
1419 for (i = 0; i < cc->tfms_count; i++)
1420 if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) {
1421 crypto_free_ablkcipher(cc->tfms[i]);
1422 cc->tfms[i] = NULL;
1423 }
1424
1425 kfree(cc->tfms);
1426 cc->tfms = NULL;
1427}
1428
1429static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
1430{
1431 unsigned i;
1432 int err;
1433
1434 cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *),
1435 GFP_KERNEL);
1436 if (!cc->tfms)
1437 return -ENOMEM;
1438
1439 for (i = 0; i < cc->tfms_count; i++) {
1440 cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
1441 if (IS_ERR(cc->tfms[i])) {
1442 err = PTR_ERR(cc->tfms[i]);
1443 crypt_free_tfms(cc);
1444 return err;
1445 }
1446 }
1447
1448 return 0;
1449}
1450
1451static int crypt_setkey_allcpus(struct crypt_config *cc)
1452{
1453 unsigned subkey_size;
1454 int err = 0, i, r;
1455
1456
1457 subkey_size = (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
1458
1459 for (i = 0; i < cc->tfms_count; i++) {
1460 r = crypto_ablkcipher_setkey(cc->tfms[i],
1461 cc->key + (i * subkey_size),
1462 subkey_size);
1463 if (r)
1464 err = r;
1465 }
1466
1467 return err;
1468}
1469
1470static int crypt_set_key(struct crypt_config *cc, char *key)
1471{
1472 int r = -EINVAL;
1473 int key_string_len = strlen(key);
1474
1475
1476 if (cc->key_size != (key_string_len >> 1))
1477 goto out;
1478
1479
1480 if (!cc->key_size && strcmp(key, "-"))
1481 goto out;
1482
1483 if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
1484 goto out;
1485
1486 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1487
1488 r = crypt_setkey_allcpus(cc);
1489
1490out:
1491
1492 memset(key, '0', key_string_len);
1493
1494 return r;
1495}
1496
1497static int crypt_wipe_key(struct crypt_config *cc)
1498{
1499 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1500 memset(&cc->key, 0, cc->key_size * sizeof(u8));
1501
1502 return crypt_setkey_allcpus(cc);
1503}
1504
1505static void crypt_dtr(struct dm_target *ti)
1506{
1507 struct crypt_config *cc = ti->private;
1508 struct crypt_cpu *cpu_cc;
1509 int cpu;
1510
1511 ti->private = NULL;
1512
1513 if (!cc)
1514 return;
1515
1516 if (cc->io_queue)
1517 destroy_workqueue(cc->io_queue);
1518 if (cc->crypt_queue)
1519 destroy_workqueue(cc->crypt_queue);
1520
1521 if (cc->cpu)
1522 for_each_possible_cpu(cpu) {
1523 cpu_cc = per_cpu_ptr(cc->cpu, cpu);
1524 if (cpu_cc->req)
1525 mempool_free(cpu_cc->req, cc->req_pool);
1526 }
1527
1528 crypt_free_tfms(cc);
1529
1530 if (cc->bs)
1531 bioset_free(cc->bs);
1532
1533 if (cc->page_pool)
1534 mempool_destroy(cc->page_pool);
1535 if (cc->req_pool)
1536 mempool_destroy(cc->req_pool);
1537 if (cc->io_pool)
1538 mempool_destroy(cc->io_pool);
1539
1540 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
1541 cc->iv_gen_ops->dtr(cc);
1542
1543 if (cc->dev)
1544 dm_put_device(ti, cc->dev);
1545
1546 if (cc->cpu)
1547 free_percpu(cc->cpu);
1548
1549 kzfree(cc->cipher);
1550 kzfree(cc->cipher_string);
1551
1552
1553 kzfree(cc);
1554}
1555
1556static int crypt_ctr_cipher(struct dm_target *ti,
1557 char *cipher_in, char *key)
1558{
1559 struct crypt_config *cc = ti->private;
1560 char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount;
1561 char *cipher_api = NULL;
1562 int ret = -EINVAL;
1563 char dummy;
1564
1565
1566 if (strchr(cipher_in, '(')) {
1567 ti->error = "Bad cipher specification";
1568 return -EINVAL;
1569 }
1570
1571 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
1572 if (!cc->cipher_string)
1573 goto bad_mem;
1574
1575
1576
1577
1578
1579 tmp = cipher_in;
1580 keycount = strsep(&tmp, "-");
1581 cipher = strsep(&keycount, ":");
1582
1583 if (!keycount)
1584 cc->tfms_count = 1;
1585 else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 ||
1586 !is_power_of_2(cc->tfms_count)) {
1587 ti->error = "Bad cipher key count specification";
1588 return -EINVAL;
1589 }
1590 cc->key_parts = cc->tfms_count;
1591 cc->key_extra_size = 0;
1592
1593 cc->cipher = kstrdup(cipher, GFP_KERNEL);
1594 if (!cc->cipher)
1595 goto bad_mem;
1596
1597 chainmode = strsep(&tmp, "-");
1598 ivopts = strsep(&tmp, "-");
1599 ivmode = strsep(&ivopts, ":");
1600
1601 if (tmp)
1602 DMWARN("Ignoring unexpected additional cipher options");
1603
1604 cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)),
1605 __alignof__(struct crypt_cpu));
1606 if (!cc->cpu) {
1607 ti->error = "Cannot allocate per cpu state";
1608 goto bad_mem;
1609 }
1610
1611
1612
1613
1614
1615 if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) {
1616 chainmode = "cbc";
1617 ivmode = "plain";
1618 }
1619
1620 if (strcmp(chainmode, "ecb") && !ivmode) {
1621 ti->error = "IV mechanism required";
1622 return -EINVAL;
1623 }
1624
1625 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
1626 if (!cipher_api)
1627 goto bad_mem;
1628
1629 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
1630 "%s(%s)", chainmode, cipher);
1631 if (ret < 0) {
1632 kfree(cipher_api);
1633 goto bad_mem;
1634 }
1635
1636
1637 ret = crypt_alloc_tfms(cc, cipher_api);
1638 if (ret < 0) {
1639 ti->error = "Error allocating crypto tfm";
1640 goto bad;
1641 }
1642
1643
1644 cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
1645 if (cc->iv_size)
1646
1647 cc->iv_size = max(cc->iv_size,
1648 (unsigned int)(sizeof(u64) / sizeof(u8)));
1649 else if (ivmode) {
1650 DMWARN("Selected cipher does not support IVs");
1651 ivmode = NULL;
1652 }
1653
1654
1655 if (ivmode == NULL)
1656 cc->iv_gen_ops = NULL;
1657 else if (strcmp(ivmode, "plain") == 0)
1658 cc->iv_gen_ops = &crypt_iv_plain_ops;
1659 else if (strcmp(ivmode, "plain64") == 0)
1660 cc->iv_gen_ops = &crypt_iv_plain64_ops;
1661 else if (strcmp(ivmode, "essiv") == 0)
1662 cc->iv_gen_ops = &crypt_iv_essiv_ops;
1663 else if (strcmp(ivmode, "benbi") == 0)
1664 cc->iv_gen_ops = &crypt_iv_benbi_ops;
1665 else if (strcmp(ivmode, "null") == 0)
1666 cc->iv_gen_ops = &crypt_iv_null_ops;
1667 else if (strcmp(ivmode, "lmk") == 0) {
1668 cc->iv_gen_ops = &crypt_iv_lmk_ops;
1669
1670
1671
1672
1673
1674
1675 if (cc->key_size % cc->key_parts) {
1676 cc->key_parts++;
1677 cc->key_extra_size = cc->key_size / cc->key_parts;
1678 }
1679 } else if (strcmp(ivmode, "tcw") == 0) {
1680 cc->iv_gen_ops = &crypt_iv_tcw_ops;
1681 cc->key_parts += 2;
1682 cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
1683 } else {
1684 ret = -EINVAL;
1685 ti->error = "Invalid IV mode";
1686 goto bad;
1687 }
1688
1689
1690 ret = crypt_set_key(cc, key);
1691 if (ret < 0) {
1692 ti->error = "Error decoding and setting key";
1693 goto bad;
1694 }
1695
1696
1697 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
1698 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
1699 if (ret < 0) {
1700 ti->error = "Error creating IV";
1701 goto bad;
1702 }
1703 }
1704
1705
1706 if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
1707 ret = cc->iv_gen_ops->init(cc);
1708 if (ret < 0) {
1709 ti->error = "Error initialising IV";
1710 goto bad;
1711 }
1712 }
1713
1714 ret = 0;
1715bad:
1716 kfree(cipher_api);
1717 return ret;
1718
1719bad_mem:
1720 ti->error = "Cannot allocate cipher strings";
1721 return -ENOMEM;
1722}
1723
1724
1725
1726
1727
1728static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1729{
1730 struct crypt_config *cc;
1731 unsigned int key_size, opt_params;
1732 unsigned long long tmpll;
1733 int ret;
1734 struct dm_arg_set as;
1735 const char *opt_string;
1736 char dummy;
1737
1738 static struct dm_arg _args[] = {
1739 {0, 1, "Invalid number of feature args"},
1740 };
1741
1742 if (argc < 5) {
1743 ti->error = "Not enough arguments";
1744 return -EINVAL;
1745 }
1746
1747 key_size = strlen(argv[1]) >> 1;
1748
1749 cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
1750 if (!cc) {
1751 ti->error = "Cannot allocate encryption context";
1752 return -ENOMEM;
1753 }
1754 cc->key_size = key_size;
1755
1756 ti->private = cc;
1757 ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
1758 if (ret < 0)
1759 goto bad;
1760
1761 ret = -ENOMEM;
1762 cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
1763 if (!cc->io_pool) {
1764 ti->error = "Cannot allocate crypt io mempool";
1765 goto bad;
1766 }
1767
1768 cc->dmreq_start = sizeof(struct ablkcipher_request);
1769 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
1770 cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment());
1771 cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) &
1772 ~(crypto_tfm_ctx_alignment() - 1);
1773
1774 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
1775 sizeof(struct dm_crypt_request) + cc->iv_size);
1776 if (!cc->req_pool) {
1777 ti->error = "Cannot allocate crypt request mempool";
1778 goto bad;
1779 }
1780
1781 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
1782 if (!cc->page_pool) {
1783 ti->error = "Cannot allocate page mempool";
1784 goto bad;
1785 }
1786
1787 cc->bs = bioset_create(MIN_IOS, 0);
1788 if (!cc->bs) {
1789 ti->error = "Cannot allocate crypt bioset";
1790 goto bad;
1791 }
1792
1793 ret = -EINVAL;
1794 if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) {
1795 ti->error = "Invalid iv_offset sector";
1796 goto bad;
1797 }
1798 cc->iv_offset = tmpll;
1799
1800 if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) {
1801 ti->error = "Device lookup failed";
1802 goto bad;
1803 }
1804
1805 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
1806 ti->error = "Invalid device sector";
1807 goto bad;
1808 }
1809 cc->start = tmpll;
1810
1811 argv += 5;
1812 argc -= 5;
1813
1814
1815 if (argc) {
1816 as.argc = argc;
1817 as.argv = argv;
1818
1819 ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
1820 if (ret)
1821 goto bad;
1822
1823 opt_string = dm_shift_arg(&as);
1824
1825 if (opt_params == 1 && opt_string &&
1826 !strcasecmp(opt_string, "allow_discards"))
1827 ti->num_discard_bios = 1;
1828 else if (opt_params) {
1829 ret = -EINVAL;
1830 ti->error = "Invalid feature arguments";
1831 goto bad;
1832 }
1833 }
1834
1835 ret = -ENOMEM;
1836 cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1);
1837 if (!cc->io_queue) {
1838 ti->error = "Couldn't create kcryptd io queue";
1839 goto bad;
1840 }
1841
1842 cc->crypt_queue = alloc_workqueue("kcryptd",
1843 WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
1844 if (!cc->crypt_queue) {
1845 ti->error = "Couldn't create kcryptd queue";
1846 goto bad;
1847 }
1848
1849 ti->num_flush_bios = 1;
1850 ti->discard_zeroes_data_unsupported = true;
1851
1852 return 0;
1853
1854bad:
1855 crypt_dtr(ti);
1856 return ret;
1857}
1858
1859static int crypt_map(struct dm_target *ti, struct bio *bio)
1860{
1861 struct dm_crypt_io *io;
1862 struct crypt_config *cc = ti->private;
1863
1864
1865
1866
1867
1868
1869 if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
1870 bio->bi_bdev = cc->dev->bdev;
1871 if (bio_sectors(bio))
1872 bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector);
1873 return DM_MAPIO_REMAPPED;
1874 }
1875
1876 io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_sector));
1877
1878 if (bio_data_dir(io->base_bio) == READ) {
1879 if (kcryptd_io_read(io, GFP_NOWAIT))
1880 kcryptd_queue_io(io);
1881 } else
1882 kcryptd_queue_crypt(io);
1883
1884 return DM_MAPIO_SUBMITTED;
1885}
1886
1887static void crypt_status(struct dm_target *ti, status_type_t type,
1888 unsigned status_flags, char *result, unsigned maxlen)
1889{
1890 struct crypt_config *cc = ti->private;
1891 unsigned i, sz = 0;
1892
1893 switch (type) {
1894 case STATUSTYPE_INFO:
1895 result[0] = '\0';
1896 break;
1897
1898 case STATUSTYPE_TABLE:
1899 DMEMIT("%s ", cc->cipher_string);
1900
1901 if (cc->key_size > 0)
1902 for (i = 0; i < cc->key_size; i++)
1903 DMEMIT("%02x", cc->key[i]);
1904 else
1905 DMEMIT("-");
1906
1907 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
1908 cc->dev->name, (unsigned long long)cc->start);
1909
1910 if (ti->num_discard_bios)
1911 DMEMIT(" 1 allow_discards");
1912
1913 break;
1914 }
1915}
1916
1917static void crypt_postsuspend(struct dm_target *ti)
1918{
1919 struct crypt_config *cc = ti->private;
1920
1921 set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1922}
1923
1924static int crypt_preresume(struct dm_target *ti)
1925{
1926 struct crypt_config *cc = ti->private;
1927
1928 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
1929 DMERR("aborting resume - crypt key is not set.");
1930 return -EAGAIN;
1931 }
1932
1933 return 0;
1934}
1935
1936static void crypt_resume(struct dm_target *ti)
1937{
1938 struct crypt_config *cc = ti->private;
1939
1940 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1941}
1942
1943
1944
1945
1946
1947static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
1948{
1949 struct crypt_config *cc = ti->private;
1950 int ret = -EINVAL;
1951
1952 if (argc < 2)
1953 goto error;
1954
1955 if (!strcasecmp(argv[0], "key")) {
1956 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
1957 DMWARN("not suspended during key manipulation.");
1958 return -EINVAL;
1959 }
1960 if (argc == 3 && !strcasecmp(argv[1], "set")) {
1961 ret = crypt_set_key(cc, argv[2]);
1962 if (ret)
1963 return ret;
1964 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
1965 ret = cc->iv_gen_ops->init(cc);
1966 return ret;
1967 }
1968 if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
1969 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
1970 ret = cc->iv_gen_ops->wipe(cc);
1971 if (ret)
1972 return ret;
1973 }
1974 return crypt_wipe_key(cc);
1975 }
1976 }
1977
1978error:
1979 DMWARN("unrecognised message received.");
1980 return -EINVAL;
1981}
1982
1983static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
1984 struct bio_vec *biovec, int max_size)
1985{
1986 struct crypt_config *cc = ti->private;
1987 struct request_queue *q = bdev_get_queue(cc->dev->bdev);
1988
1989 if (!q->merge_bvec_fn)
1990 return max_size;
1991
1992 bvm->bi_bdev = cc->dev->bdev;
1993 bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector);
1994
1995 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
1996}
1997
1998static int crypt_iterate_devices(struct dm_target *ti,
1999 iterate_devices_callout_fn fn, void *data)
2000{
2001 struct crypt_config *cc = ti->private;
2002
2003 return fn(ti, cc->dev, cc->start, ti->len, data);
2004}
2005
2006static struct target_type crypt_target = {
2007 .name = "crypt",
2008 .version = {1, 13, 0},
2009 .module = THIS_MODULE,
2010 .ctr = crypt_ctr,
2011 .dtr = crypt_dtr,
2012 .map = crypt_map,
2013 .status = crypt_status,
2014 .postsuspend = crypt_postsuspend,
2015 .preresume = crypt_preresume,
2016 .resume = crypt_resume,
2017 .message = crypt_message,
2018 .merge = crypt_merge,
2019 .iterate_devices = crypt_iterate_devices,
2020};
2021
2022static int __init dm_crypt_init(void)
2023{
2024 int r;
2025
2026 _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
2027 if (!_crypt_io_pool)
2028 return -ENOMEM;
2029
2030 r = dm_register_target(&crypt_target);
2031 if (r < 0) {
2032 DMERR("register failed %d", r);
2033 kmem_cache_destroy(_crypt_io_pool);
2034 }
2035
2036 return r;
2037}
2038
2039static void __exit dm_crypt_exit(void)
2040{
2041 dm_unregister_target(&crypt_target);
2042 kmem_cache_destroy(_crypt_io_pool);
2043}
2044
2045module_init(dm_crypt_init);
2046module_exit(dm_crypt_exit);
2047
2048MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
2049MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
2050MODULE_LICENSE("GPL");
2051