1
2
3
4
5
6
7
8
9
10#include <linux/completion.h>
11#include <linux/err.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/bio.h>
16#include <linux/blkdev.h>
17#include <linux/mempool.h>
18#include <linux/slab.h>
19#include <linux/crypto.h>
20#include <linux/workqueue.h>
21#include <linux/kthread.h>
22#include <linux/backing-dev.h>
23#include <linux/atomic.h>
24#include <linux/scatterlist.h>
25#include <linux/rbtree.h>
26#include <asm/page.h>
27#include <asm/unaligned.h>
28#include <crypto/hash.h>
29#include <crypto/md5.h>
30#include <crypto/algapi.h>
31#include <crypto/skcipher.h>
32
33#include <linux/device-mapper.h>
34
35#define DM_MSG_PREFIX "crypt"
36
37
38
39
40struct convert_context {
41 struct completion restart;
42 struct bio *bio_in;
43 struct bio *bio_out;
44 struct bvec_iter iter_in;
45 struct bvec_iter iter_out;
46 sector_t cc_sector;
47 atomic_t cc_pending;
48 struct skcipher_request *req;
49};
50
51
52
53
54struct dm_crypt_io {
55 struct crypt_config *cc;
56 struct bio *base_bio;
57 struct work_struct work;
58
59 struct convert_context ctx;
60
61 atomic_t io_pending;
62 int error;
63 sector_t sector;
64
65 struct rb_node rb_node;
66} CRYPTO_MINALIGN_ATTR;
67
68struct dm_crypt_request {
69 struct convert_context *ctx;
70 struct scatterlist sg_in;
71 struct scatterlist sg_out;
72 sector_t iv_sector;
73};
74
75struct crypt_config;
76
77struct crypt_iv_operations {
78 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
79 const char *opts);
80 void (*dtr)(struct crypt_config *cc);
81 int (*init)(struct crypt_config *cc);
82 int (*wipe)(struct crypt_config *cc);
83 int (*generator)(struct crypt_config *cc, u8 *iv,
84 struct dm_crypt_request *dmreq);
85 int (*post)(struct crypt_config *cc, u8 *iv,
86 struct dm_crypt_request *dmreq);
87};
88
89struct iv_essiv_private {
90 struct crypto_ahash *hash_tfm;
91 u8 *salt;
92};
93
94struct iv_benbi_private {
95 int shift;
96};
97
98#define LMK_SEED_SIZE 64
99struct iv_lmk_private {
100 struct crypto_shash *hash_tfm;
101 u8 *seed;
102};
103
104#define TCW_WHITENING_SIZE 16
105struct iv_tcw_private {
106 struct crypto_shash *crc32_tfm;
107 u8 *iv_seed;
108 u8 *whitening;
109};
110
111
112
113
114
115enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
116 DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
117
118
119
120
121struct crypt_config {
122 struct dm_dev *dev;
123 sector_t start;
124
125
126
127
128
129 mempool_t *req_pool;
130 mempool_t *page_pool;
131 struct bio_set *bs;
132 struct mutex bio_alloc_lock;
133
134 struct workqueue_struct *io_queue;
135 struct workqueue_struct *crypt_queue;
136
137 struct task_struct *write_thread;
138 wait_queue_head_t write_thread_wait;
139 struct rb_root write_tree;
140
141 char *cipher;
142 char *cipher_string;
143
144 struct crypt_iv_operations *iv_gen_ops;
145 union {
146 struct iv_essiv_private essiv;
147 struct iv_benbi_private benbi;
148 struct iv_lmk_private lmk;
149 struct iv_tcw_private tcw;
150 } iv_gen_private;
151 sector_t iv_offset;
152 unsigned int iv_size;
153
154
155 void *iv_private;
156 struct crypto_skcipher **tfms;
157 unsigned tfms_count;
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172 unsigned int dmreq_start;
173
174 unsigned int per_bio_data_size;
175
176 unsigned long flags;
177 unsigned int key_size;
178 unsigned int key_parts;
179 unsigned int key_extra_size;
180 u8 key[0];
181};
182
183#define MIN_IOS 64
184
185static void clone_init(struct dm_crypt_io *, struct bio *);
186static void kcryptd_queue_crypt(struct dm_crypt_io *io);
187static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
188
189
190
191
192static struct crypto_skcipher *any_tfm(struct crypt_config *cc)
193{
194 return cc->tfms[0];
195}
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
245 struct dm_crypt_request *dmreq)
246{
247 memset(iv, 0, cc->iv_size);
248 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
249
250 return 0;
251}
252
253static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
254 struct dm_crypt_request *dmreq)
255{
256 memset(iv, 0, cc->iv_size);
257 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
258
259 return 0;
260}
261
262
263static int crypt_iv_essiv_init(struct crypt_config *cc)
264{
265 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
266 AHASH_REQUEST_ON_STACK(req, essiv->hash_tfm);
267 struct scatterlist sg;
268 struct crypto_cipher *essiv_tfm;
269 int err;
270
271 sg_init_one(&sg, cc->key, cc->key_size);
272 ahash_request_set_tfm(req, essiv->hash_tfm);
273 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
274 ahash_request_set_crypt(req, &sg, essiv->salt, cc->key_size);
275
276 err = crypto_ahash_digest(req);
277 ahash_request_zero(req);
278 if (err)
279 return err;
280
281 essiv_tfm = cc->iv_private;
282
283 err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
284 crypto_ahash_digestsize(essiv->hash_tfm));
285 if (err)
286 return err;
287
288 return 0;
289}
290
291
292static int crypt_iv_essiv_wipe(struct crypt_config *cc)
293{
294 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
295 unsigned salt_size = crypto_ahash_digestsize(essiv->hash_tfm);
296 struct crypto_cipher *essiv_tfm;
297 int r, err = 0;
298
299 memset(essiv->salt, 0, salt_size);
300
301 essiv_tfm = cc->iv_private;
302 r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
303 if (r)
304 err = r;
305
306 return err;
307}
308
309
310static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
311 struct dm_target *ti,
312 u8 *salt, unsigned saltsize)
313{
314 struct crypto_cipher *essiv_tfm;
315 int err;
316
317
318 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
319 if (IS_ERR(essiv_tfm)) {
320 ti->error = "Error allocating crypto tfm for ESSIV";
321 return essiv_tfm;
322 }
323
324 if (crypto_cipher_blocksize(essiv_tfm) !=
325 crypto_skcipher_ivsize(any_tfm(cc))) {
326 ti->error = "Block size of ESSIV cipher does "
327 "not match IV size of block cipher";
328 crypto_free_cipher(essiv_tfm);
329 return ERR_PTR(-EINVAL);
330 }
331
332 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
333 if (err) {
334 ti->error = "Failed to set key for ESSIV cipher";
335 crypto_free_cipher(essiv_tfm);
336 return ERR_PTR(err);
337 }
338
339 return essiv_tfm;
340}
341
342static void crypt_iv_essiv_dtr(struct crypt_config *cc)
343{
344 struct crypto_cipher *essiv_tfm;
345 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
346
347 crypto_free_ahash(essiv->hash_tfm);
348 essiv->hash_tfm = NULL;
349
350 kzfree(essiv->salt);
351 essiv->salt = NULL;
352
353 essiv_tfm = cc->iv_private;
354
355 if (essiv_tfm)
356 crypto_free_cipher(essiv_tfm);
357
358 cc->iv_private = NULL;
359}
360
361static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
362 const char *opts)
363{
364 struct crypto_cipher *essiv_tfm = NULL;
365 struct crypto_ahash *hash_tfm = NULL;
366 u8 *salt = NULL;
367 int err;
368
369 if (!opts) {
370 ti->error = "Digest algorithm missing for ESSIV mode";
371 return -EINVAL;
372 }
373
374
375 hash_tfm = crypto_alloc_ahash(opts, 0, CRYPTO_ALG_ASYNC);
376 if (IS_ERR(hash_tfm)) {
377 ti->error = "Error initializing ESSIV hash";
378 err = PTR_ERR(hash_tfm);
379 goto bad;
380 }
381
382 salt = kzalloc(crypto_ahash_digestsize(hash_tfm), GFP_KERNEL);
383 if (!salt) {
384 ti->error = "Error kmallocing salt storage in ESSIV";
385 err = -ENOMEM;
386 goto bad;
387 }
388
389 cc->iv_gen_private.essiv.salt = salt;
390 cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
391
392 essiv_tfm = setup_essiv_cpu(cc, ti, salt,
393 crypto_ahash_digestsize(hash_tfm));
394 if (IS_ERR(essiv_tfm)) {
395 crypt_iv_essiv_dtr(cc);
396 return PTR_ERR(essiv_tfm);
397 }
398 cc->iv_private = essiv_tfm;
399
400 return 0;
401
402bad:
403 if (hash_tfm && !IS_ERR(hash_tfm))
404 crypto_free_ahash(hash_tfm);
405 kfree(salt);
406 return err;
407}
408
409static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
410 struct dm_crypt_request *dmreq)
411{
412 struct crypto_cipher *essiv_tfm = cc->iv_private;
413
414 memset(iv, 0, cc->iv_size);
415 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
416 crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
417
418 return 0;
419}
420
421static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
422 const char *opts)
423{
424 unsigned bs = crypto_skcipher_blocksize(any_tfm(cc));
425 int log = ilog2(bs);
426
427
428
429
430 if (1 << log != bs) {
431 ti->error = "cypher blocksize is not a power of 2";
432 return -EINVAL;
433 }
434
435 if (log > 9) {
436 ti->error = "cypher blocksize is > 512";
437 return -EINVAL;
438 }
439
440 cc->iv_gen_private.benbi.shift = 9 - log;
441
442 return 0;
443}
444
445static void crypt_iv_benbi_dtr(struct crypt_config *cc)
446{
447}
448
449static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
450 struct dm_crypt_request *dmreq)
451{
452 __be64 val;
453
454 memset(iv, 0, cc->iv_size - sizeof(u64));
455
456 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
457 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
458
459 return 0;
460}
461
462static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
463 struct dm_crypt_request *dmreq)
464{
465 memset(iv, 0, cc->iv_size);
466
467 return 0;
468}
469
470static void crypt_iv_lmk_dtr(struct crypt_config *cc)
471{
472 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
473
474 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
475 crypto_free_shash(lmk->hash_tfm);
476 lmk->hash_tfm = NULL;
477
478 kzfree(lmk->seed);
479 lmk->seed = NULL;
480}
481
482static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
483 const char *opts)
484{
485 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
486
487 lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
488 if (IS_ERR(lmk->hash_tfm)) {
489 ti->error = "Error initializing LMK hash";
490 return PTR_ERR(lmk->hash_tfm);
491 }
492
493
494 if (cc->key_parts == cc->tfms_count) {
495 lmk->seed = NULL;
496 return 0;
497 }
498
499 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
500 if (!lmk->seed) {
501 crypt_iv_lmk_dtr(cc);
502 ti->error = "Error kmallocing seed storage in LMK";
503 return -ENOMEM;
504 }
505
506 return 0;
507}
508
509static int crypt_iv_lmk_init(struct crypt_config *cc)
510{
511 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
512 int subkey_size = cc->key_size / cc->key_parts;
513
514
515 if (lmk->seed)
516 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
517 crypto_shash_digestsize(lmk->hash_tfm));
518
519 return 0;
520}
521
522static int crypt_iv_lmk_wipe(struct crypt_config *cc)
523{
524 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
525
526 if (lmk->seed)
527 memset(lmk->seed, 0, LMK_SEED_SIZE);
528
529 return 0;
530}
531
532static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
533 struct dm_crypt_request *dmreq,
534 u8 *data)
535{
536 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
537 SHASH_DESC_ON_STACK(desc, lmk->hash_tfm);
538 struct md5_state md5state;
539 __le32 buf[4];
540 int i, r;
541
542 desc->tfm = lmk->hash_tfm;
543 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
544
545 r = crypto_shash_init(desc);
546 if (r)
547 return r;
548
549 if (lmk->seed) {
550 r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE);
551 if (r)
552 return r;
553 }
554
555
556 r = crypto_shash_update(desc, data + 16, 16 * 31);
557 if (r)
558 return r;
559
560
561 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
562 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
563 buf[2] = cpu_to_le32(4024);
564 buf[3] = 0;
565 r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf));
566 if (r)
567 return r;
568
569
570 r = crypto_shash_export(desc, &md5state);
571 if (r)
572 return r;
573
574 for (i = 0; i < MD5_HASH_WORDS; i++)
575 __cpu_to_le32s(&md5state.hash[i]);
576 memcpy(iv, &md5state.hash, cc->iv_size);
577
578 return 0;
579}
580
581static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
582 struct dm_crypt_request *dmreq)
583{
584 u8 *src;
585 int r = 0;
586
587 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
588 src = kmap_atomic(sg_page(&dmreq->sg_in));
589 r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset);
590 kunmap_atomic(src);
591 } else
592 memset(iv, 0, cc->iv_size);
593
594 return r;
595}
596
597static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
598 struct dm_crypt_request *dmreq)
599{
600 u8 *dst;
601 int r;
602
603 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
604 return 0;
605
606 dst = kmap_atomic(sg_page(&dmreq->sg_out));
607 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset);
608
609
610 if (!r)
611 crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size);
612
613 kunmap_atomic(dst);
614 return r;
615}
616
617static void crypt_iv_tcw_dtr(struct crypt_config *cc)
618{
619 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
620
621 kzfree(tcw->iv_seed);
622 tcw->iv_seed = NULL;
623 kzfree(tcw->whitening);
624 tcw->whitening = NULL;
625
626 if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
627 crypto_free_shash(tcw->crc32_tfm);
628 tcw->crc32_tfm = NULL;
629}
630
631static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
632 const char *opts)
633{
634 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
635
636 if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
637 ti->error = "Wrong key size for TCW";
638 return -EINVAL;
639 }
640
641 tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0);
642 if (IS_ERR(tcw->crc32_tfm)) {
643 ti->error = "Error initializing CRC32 in TCW";
644 return PTR_ERR(tcw->crc32_tfm);
645 }
646
647 tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
648 tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
649 if (!tcw->iv_seed || !tcw->whitening) {
650 crypt_iv_tcw_dtr(cc);
651 ti->error = "Error allocating seed storage in TCW";
652 return -ENOMEM;
653 }
654
655 return 0;
656}
657
658static int crypt_iv_tcw_init(struct crypt_config *cc)
659{
660 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
661 int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
662
663 memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
664 memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
665 TCW_WHITENING_SIZE);
666
667 return 0;
668}
669
670static int crypt_iv_tcw_wipe(struct crypt_config *cc)
671{
672 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
673
674 memset(tcw->iv_seed, 0, cc->iv_size);
675 memset(tcw->whitening, 0, TCW_WHITENING_SIZE);
676
677 return 0;
678}
679
680static int crypt_iv_tcw_whitening(struct crypt_config *cc,
681 struct dm_crypt_request *dmreq,
682 u8 *data)
683{
684 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
685 __le64 sector = cpu_to_le64(dmreq->iv_sector);
686 u8 buf[TCW_WHITENING_SIZE];
687 SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm);
688 int i, r;
689
690
691 memcpy(buf, tcw->whitening, TCW_WHITENING_SIZE);
692 crypto_xor(buf, (u8 *)§or, 8);
693 crypto_xor(&buf[8], (u8 *)§or, 8);
694
695
696 desc->tfm = tcw->crc32_tfm;
697 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
698 for (i = 0; i < 4; i++) {
699 r = crypto_shash_init(desc);
700 if (r)
701 goto out;
702 r = crypto_shash_update(desc, &buf[i * 4], 4);
703 if (r)
704 goto out;
705 r = crypto_shash_final(desc, &buf[i * 4]);
706 if (r)
707 goto out;
708 }
709 crypto_xor(&buf[0], &buf[12], 4);
710 crypto_xor(&buf[4], &buf[8], 4);
711
712
713 for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
714 crypto_xor(data + i * 8, buf, 8);
715out:
716 memzero_explicit(buf, sizeof(buf));
717 return r;
718}
719
720static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
721 struct dm_crypt_request *dmreq)
722{
723 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
724 __le64 sector = cpu_to_le64(dmreq->iv_sector);
725 u8 *src;
726 int r = 0;
727
728
729 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
730 src = kmap_atomic(sg_page(&dmreq->sg_in));
731 r = crypt_iv_tcw_whitening(cc, dmreq, src + dmreq->sg_in.offset);
732 kunmap_atomic(src);
733 }
734
735
736 memcpy(iv, tcw->iv_seed, cc->iv_size);
737 crypto_xor(iv, (u8 *)§or, 8);
738 if (cc->iv_size > 8)
739 crypto_xor(&iv[8], (u8 *)§or, cc->iv_size - 8);
740
741 return r;
742}
743
744static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
745 struct dm_crypt_request *dmreq)
746{
747 u8 *dst;
748 int r;
749
750 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
751 return 0;
752
753
754 dst = kmap_atomic(sg_page(&dmreq->sg_out));
755 r = crypt_iv_tcw_whitening(cc, dmreq, dst + dmreq->sg_out.offset);
756 kunmap_atomic(dst);
757
758 return r;
759}
760
761static struct crypt_iv_operations crypt_iv_plain_ops = {
762 .generator = crypt_iv_plain_gen
763};
764
765static struct crypt_iv_operations crypt_iv_plain64_ops = {
766 .generator = crypt_iv_plain64_gen
767};
768
769static struct crypt_iv_operations crypt_iv_essiv_ops = {
770 .ctr = crypt_iv_essiv_ctr,
771 .dtr = crypt_iv_essiv_dtr,
772 .init = crypt_iv_essiv_init,
773 .wipe = crypt_iv_essiv_wipe,
774 .generator = crypt_iv_essiv_gen
775};
776
777static struct crypt_iv_operations crypt_iv_benbi_ops = {
778 .ctr = crypt_iv_benbi_ctr,
779 .dtr = crypt_iv_benbi_dtr,
780 .generator = crypt_iv_benbi_gen
781};
782
783static struct crypt_iv_operations crypt_iv_null_ops = {
784 .generator = crypt_iv_null_gen
785};
786
787static struct crypt_iv_operations crypt_iv_lmk_ops = {
788 .ctr = crypt_iv_lmk_ctr,
789 .dtr = crypt_iv_lmk_dtr,
790 .init = crypt_iv_lmk_init,
791 .wipe = crypt_iv_lmk_wipe,
792 .generator = crypt_iv_lmk_gen,
793 .post = crypt_iv_lmk_post
794};
795
796static struct crypt_iv_operations crypt_iv_tcw_ops = {
797 .ctr = crypt_iv_tcw_ctr,
798 .dtr = crypt_iv_tcw_dtr,
799 .init = crypt_iv_tcw_init,
800 .wipe = crypt_iv_tcw_wipe,
801 .generator = crypt_iv_tcw_gen,
802 .post = crypt_iv_tcw_post
803};
804
805static void crypt_convert_init(struct crypt_config *cc,
806 struct convert_context *ctx,
807 struct bio *bio_out, struct bio *bio_in,
808 sector_t sector)
809{
810 ctx->bio_in = bio_in;
811 ctx->bio_out = bio_out;
812 if (bio_in)
813 ctx->iter_in = bio_in->bi_iter;
814 if (bio_out)
815 ctx->iter_out = bio_out->bi_iter;
816 ctx->cc_sector = sector + cc->iv_offset;
817 init_completion(&ctx->restart);
818}
819
820static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
821 struct skcipher_request *req)
822{
823 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
824}
825
826static struct skcipher_request *req_of_dmreq(struct crypt_config *cc,
827 struct dm_crypt_request *dmreq)
828{
829 return (struct skcipher_request *)((char *)dmreq - cc->dmreq_start);
830}
831
832static u8 *iv_of_dmreq(struct crypt_config *cc,
833 struct dm_crypt_request *dmreq)
834{
835 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
836 crypto_skcipher_alignmask(any_tfm(cc)) + 1);
837}
838
839static int crypt_convert_block(struct crypt_config *cc,
840 struct convert_context *ctx,
841 struct skcipher_request *req)
842{
843 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
844 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
845 struct dm_crypt_request *dmreq;
846 u8 *iv;
847 int r;
848
849 dmreq = dmreq_of_req(cc, req);
850 iv = iv_of_dmreq(cc, dmreq);
851
852 dmreq->iv_sector = ctx->cc_sector;
853 dmreq->ctx = ctx;
854 sg_init_table(&dmreq->sg_in, 1);
855 sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT,
856 bv_in.bv_offset);
857
858 sg_init_table(&dmreq->sg_out, 1);
859 sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT,
860 bv_out.bv_offset);
861
862 bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT);
863 bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT);
864
865 if (cc->iv_gen_ops) {
866 r = cc->iv_gen_ops->generator(cc, iv, dmreq);
867 if (r < 0)
868 return r;
869 }
870
871 skcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
872 1 << SECTOR_SHIFT, iv);
873
874 if (bio_data_dir(ctx->bio_in) == WRITE)
875 r = crypto_skcipher_encrypt(req);
876 else
877 r = crypto_skcipher_decrypt(req);
878
879 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
880 r = cc->iv_gen_ops->post(cc, iv, dmreq);
881
882 return r;
883}
884
885static void kcryptd_async_done(struct crypto_async_request *async_req,
886 int error);
887
888static void crypt_alloc_req(struct crypt_config *cc,
889 struct convert_context *ctx)
890{
891 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
892
893 if (!ctx->req)
894 ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
895
896 skcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
897
898
899
900
901
902 skcipher_request_set_callback(ctx->req,
903 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
904 kcryptd_async_done, dmreq_of_req(cc, ctx->req));
905}
906
907static void crypt_free_req(struct crypt_config *cc,
908 struct skcipher_request *req, struct bio *base_bio)
909{
910 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
911
912 if ((struct skcipher_request *)(io + 1) != req)
913 mempool_free(req, cc->req_pool);
914}
915
916
917
918
919static int crypt_convert(struct crypt_config *cc,
920 struct convert_context *ctx)
921{
922 int r;
923
924 atomic_set(&ctx->cc_pending, 1);
925
926 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
927
928 crypt_alloc_req(cc, ctx);
929
930 atomic_inc(&ctx->cc_pending);
931
932 r = crypt_convert_block(cc, ctx, ctx->req);
933
934 switch (r) {
935
936
937
938
939 case -EBUSY:
940 wait_for_completion(&ctx->restart);
941 reinit_completion(&ctx->restart);
942
943
944
945
946
947 case -EINPROGRESS:
948 ctx->req = NULL;
949 ctx->cc_sector++;
950 continue;
951
952
953
954 case 0:
955 atomic_dec(&ctx->cc_pending);
956 ctx->cc_sector++;
957 cond_resched();
958 continue;
959
960
961 default:
962 atomic_dec(&ctx->cc_pending);
963 return r;
964 }
965 }
966
967 return 0;
968}
969
970static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
990{
991 struct crypt_config *cc = io->cc;
992 struct bio *clone;
993 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
994 gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
995 unsigned i, len, remaining_size;
996 struct page *page;
997 struct bio_vec *bvec;
998
999retry:
1000 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
1001 mutex_lock(&cc->bio_alloc_lock);
1002
1003 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
1004 if (!clone)
1005 goto return_clone;
1006
1007 clone_init(io, clone);
1008
1009 remaining_size = size;
1010
1011 for (i = 0; i < nr_iovecs; i++) {
1012 page = mempool_alloc(cc->page_pool, gfp_mask);
1013 if (!page) {
1014 crypt_free_buffer_pages(cc, clone);
1015 bio_put(clone);
1016 gfp_mask |= __GFP_DIRECT_RECLAIM;
1017 goto retry;
1018 }
1019
1020 len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
1021
1022 bvec = &clone->bi_io_vec[clone->bi_vcnt++];
1023 bvec->bv_page = page;
1024 bvec->bv_len = len;
1025 bvec->bv_offset = 0;
1026
1027 clone->bi_iter.bi_size += len;
1028
1029 remaining_size -= len;
1030 }
1031
1032return_clone:
1033 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
1034 mutex_unlock(&cc->bio_alloc_lock);
1035
1036 return clone;
1037}
1038
1039static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
1040{
1041 unsigned int i;
1042 struct bio_vec *bv;
1043
1044 bio_for_each_segment_all(bv, clone, i) {
1045 BUG_ON(!bv->bv_page);
1046 mempool_free(bv->bv_page, cc->page_pool);
1047 bv->bv_page = NULL;
1048 }
1049}
1050
1051static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
1052 struct bio *bio, sector_t sector)
1053{
1054 io->cc = cc;
1055 io->base_bio = bio;
1056 io->sector = sector;
1057 io->error = 0;
1058 io->ctx.req = NULL;
1059 atomic_set(&io->io_pending, 0);
1060}
1061
1062static void crypt_inc_pending(struct dm_crypt_io *io)
1063{
1064 atomic_inc(&io->io_pending);
1065}
1066
1067
1068
1069
1070
1071static void crypt_dec_pending(struct dm_crypt_io *io)
1072{
1073 struct crypt_config *cc = io->cc;
1074 struct bio *base_bio = io->base_bio;
1075 int error = io->error;
1076
1077 if (!atomic_dec_and_test(&io->io_pending))
1078 return;
1079
1080 if (io->ctx.req)
1081 crypt_free_req(cc, io->ctx.req, base_bio);
1082
1083 base_bio->bi_error = error;
1084 bio_endio(base_bio);
1085}
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104static void crypt_endio(struct bio *clone)
1105{
1106 struct dm_crypt_io *io = clone->bi_private;
1107 struct crypt_config *cc = io->cc;
1108 unsigned rw = bio_data_dir(clone);
1109 int error;
1110
1111
1112
1113
1114 if (rw == WRITE)
1115 crypt_free_buffer_pages(cc, clone);
1116
1117 error = clone->bi_error;
1118 bio_put(clone);
1119
1120 if (rw == READ && !error) {
1121 kcryptd_queue_crypt(io);
1122 return;
1123 }
1124
1125 if (unlikely(error))
1126 io->error = error;
1127
1128 crypt_dec_pending(io);
1129}
1130
1131static void clone_init(struct dm_crypt_io *io, struct bio *clone)
1132{
1133 struct crypt_config *cc = io->cc;
1134
1135 clone->bi_private = io;
1136 clone->bi_end_io = crypt_endio;
1137 clone->bi_bdev = cc->dev->bdev;
1138 bio_set_op_attrs(clone, bio_op(io->base_bio), bio_flags(io->base_bio));
1139}
1140
1141static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
1142{
1143 struct crypt_config *cc = io->cc;
1144 struct bio *clone;
1145
1146
1147
1148
1149
1150
1151
1152 clone = bio_clone_fast(io->base_bio, gfp, cc->bs);
1153 if (!clone)
1154 return 1;
1155
1156 crypt_inc_pending(io);
1157
1158 clone_init(io, clone);
1159 clone->bi_iter.bi_sector = cc->start + io->sector;
1160
1161 generic_make_request(clone);
1162 return 0;
1163}
1164
1165static void kcryptd_io_read_work(struct work_struct *work)
1166{
1167 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1168
1169 crypt_inc_pending(io);
1170 if (kcryptd_io_read(io, GFP_NOIO))
1171 io->error = -ENOMEM;
1172 crypt_dec_pending(io);
1173}
1174
1175static void kcryptd_queue_read(struct dm_crypt_io *io)
1176{
1177 struct crypt_config *cc = io->cc;
1178
1179 INIT_WORK(&io->work, kcryptd_io_read_work);
1180 queue_work(cc->io_queue, &io->work);
1181}
1182
1183static void kcryptd_io_write(struct dm_crypt_io *io)
1184{
1185 struct bio *clone = io->ctx.bio_out;
1186
1187 generic_make_request(clone);
1188}
1189
1190#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
1191
1192static int dmcrypt_write(void *data)
1193{
1194 struct crypt_config *cc = data;
1195 struct dm_crypt_io *io;
1196
1197 while (1) {
1198 struct rb_root write_tree;
1199 struct blk_plug plug;
1200
1201 DECLARE_WAITQUEUE(wait, current);
1202
1203 spin_lock_irq(&cc->write_thread_wait.lock);
1204continue_locked:
1205
1206 if (!RB_EMPTY_ROOT(&cc->write_tree))
1207 goto pop_from_list;
1208
1209 set_current_state(TASK_INTERRUPTIBLE);
1210 __add_wait_queue(&cc->write_thread_wait, &wait);
1211
1212 spin_unlock_irq(&cc->write_thread_wait.lock);
1213
1214 if (unlikely(kthread_should_stop())) {
1215 set_task_state(current, TASK_RUNNING);
1216 remove_wait_queue(&cc->write_thread_wait, &wait);
1217 break;
1218 }
1219
1220 schedule();
1221
1222 set_task_state(current, TASK_RUNNING);
1223 spin_lock_irq(&cc->write_thread_wait.lock);
1224 __remove_wait_queue(&cc->write_thread_wait, &wait);
1225 goto continue_locked;
1226
1227pop_from_list:
1228 write_tree = cc->write_tree;
1229 cc->write_tree = RB_ROOT;
1230 spin_unlock_irq(&cc->write_thread_wait.lock);
1231
1232 BUG_ON(rb_parent(write_tree.rb_node));
1233
1234
1235
1236
1237
1238 blk_start_plug(&plug);
1239 do {
1240 io = crypt_io_from_node(rb_first(&write_tree));
1241 rb_erase(&io->rb_node, &write_tree);
1242 kcryptd_io_write(io);
1243 } while (!RB_EMPTY_ROOT(&write_tree));
1244 blk_finish_plug(&plug);
1245 }
1246 return 0;
1247}
1248
1249static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
1250{
1251 struct bio *clone = io->ctx.bio_out;
1252 struct crypt_config *cc = io->cc;
1253 unsigned long flags;
1254 sector_t sector;
1255 struct rb_node **rbp, *parent;
1256
1257 if (unlikely(io->error < 0)) {
1258 crypt_free_buffer_pages(cc, clone);
1259 bio_put(clone);
1260 crypt_dec_pending(io);
1261 return;
1262 }
1263
1264
1265 BUG_ON(io->ctx.iter_out.bi_size);
1266
1267 clone->bi_iter.bi_sector = cc->start + io->sector;
1268
1269 if (likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) {
1270 generic_make_request(clone);
1271 return;
1272 }
1273
1274 spin_lock_irqsave(&cc->write_thread_wait.lock, flags);
1275 rbp = &cc->write_tree.rb_node;
1276 parent = NULL;
1277 sector = io->sector;
1278 while (*rbp) {
1279 parent = *rbp;
1280 if (sector < crypt_io_from_node(parent)->sector)
1281 rbp = &(*rbp)->rb_left;
1282 else
1283 rbp = &(*rbp)->rb_right;
1284 }
1285 rb_link_node(&io->rb_node, parent, rbp);
1286 rb_insert_color(&io->rb_node, &cc->write_tree);
1287
1288 wake_up_locked(&cc->write_thread_wait);
1289 spin_unlock_irqrestore(&cc->write_thread_wait.lock, flags);
1290}
1291
1292static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1293{
1294 struct crypt_config *cc = io->cc;
1295 struct bio *clone;
1296 int crypt_finished;
1297 sector_t sector = io->sector;
1298 int r;
1299
1300
1301
1302
1303 crypt_inc_pending(io);
1304 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
1305
1306 clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
1307 if (unlikely(!clone)) {
1308 io->error = -EIO;
1309 goto dec;
1310 }
1311
1312 io->ctx.bio_out = clone;
1313 io->ctx.iter_out = clone->bi_iter;
1314
1315 sector += bio_sectors(clone);
1316
1317 crypt_inc_pending(io);
1318 r = crypt_convert(cc, &io->ctx);
1319 if (r)
1320 io->error = -EIO;
1321 crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
1322
1323
1324 if (crypt_finished) {
1325 kcryptd_crypt_write_io_submit(io, 0);
1326 io->sector = sector;
1327 }
1328
1329dec:
1330 crypt_dec_pending(io);
1331}
1332
1333static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
1334{
1335 crypt_dec_pending(io);
1336}
1337
1338static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
1339{
1340 struct crypt_config *cc = io->cc;
1341 int r = 0;
1342
1343 crypt_inc_pending(io);
1344
1345 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
1346 io->sector);
1347
1348 r = crypt_convert(cc, &io->ctx);
1349 if (r < 0)
1350 io->error = -EIO;
1351
1352 if (atomic_dec_and_test(&io->ctx.cc_pending))
1353 kcryptd_crypt_read_done(io);
1354
1355 crypt_dec_pending(io);
1356}
1357
1358static void kcryptd_async_done(struct crypto_async_request *async_req,
1359 int error)
1360{
1361 struct dm_crypt_request *dmreq = async_req->data;
1362 struct convert_context *ctx = dmreq->ctx;
1363 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
1364 struct crypt_config *cc = io->cc;
1365
1366
1367
1368
1369
1370
1371 if (error == -EINPROGRESS) {
1372 complete(&ctx->restart);
1373 return;
1374 }
1375
1376 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
1377 error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
1378
1379 if (error < 0)
1380 io->error = -EIO;
1381
1382 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
1383
1384 if (!atomic_dec_and_test(&ctx->cc_pending))
1385 return;
1386
1387 if (bio_data_dir(io->base_bio) == READ)
1388 kcryptd_crypt_read_done(io);
1389 else
1390 kcryptd_crypt_write_io_submit(io, 1);
1391}
1392
1393static void kcryptd_crypt(struct work_struct *work)
1394{
1395 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1396
1397 if (bio_data_dir(io->base_bio) == READ)
1398 kcryptd_crypt_read_convert(io);
1399 else
1400 kcryptd_crypt_write_convert(io);
1401}
1402
1403static void kcryptd_queue_crypt(struct dm_crypt_io *io)
1404{
1405 struct crypt_config *cc = io->cc;
1406
1407 INIT_WORK(&io->work, kcryptd_crypt);
1408 queue_work(cc->crypt_queue, &io->work);
1409}
1410
1411
1412
1413
1414static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
1415{
1416 char buffer[3];
1417 unsigned int i;
1418
1419 buffer[2] = '\0';
1420
1421 for (i = 0; i < size; i++) {
1422 buffer[0] = *hex++;
1423 buffer[1] = *hex++;
1424
1425 if (kstrtou8(buffer, 16, &key[i]))
1426 return -EINVAL;
1427 }
1428
1429 if (*hex != '\0')
1430 return -EINVAL;
1431
1432 return 0;
1433}
1434
1435static void crypt_free_tfms(struct crypt_config *cc)
1436{
1437 unsigned i;
1438
1439 if (!cc->tfms)
1440 return;
1441
1442 for (i = 0; i < cc->tfms_count; i++)
1443 if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) {
1444 crypto_free_skcipher(cc->tfms[i]);
1445 cc->tfms[i] = NULL;
1446 }
1447
1448 kfree(cc->tfms);
1449 cc->tfms = NULL;
1450}
1451
1452static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
1453{
1454 unsigned i;
1455 int err;
1456
1457 cc->tfms = kzalloc(cc->tfms_count * sizeof(struct crypto_skcipher *),
1458 GFP_KERNEL);
1459 if (!cc->tfms)
1460 return -ENOMEM;
1461
1462 for (i = 0; i < cc->tfms_count; i++) {
1463 cc->tfms[i] = crypto_alloc_skcipher(ciphermode, 0, 0);
1464 if (IS_ERR(cc->tfms[i])) {
1465 err = PTR_ERR(cc->tfms[i]);
1466 crypt_free_tfms(cc);
1467 return err;
1468 }
1469 }
1470
1471 return 0;
1472}
1473
1474static int crypt_setkey_allcpus(struct crypt_config *cc)
1475{
1476 unsigned subkey_size;
1477 int err = 0, i, r;
1478
1479
1480 subkey_size = (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
1481
1482 for (i = 0; i < cc->tfms_count; i++) {
1483 r = crypto_skcipher_setkey(cc->tfms[i],
1484 cc->key + (i * subkey_size),
1485 subkey_size);
1486 if (r)
1487 err = r;
1488 }
1489
1490 return err;
1491}
1492
1493static int crypt_set_key(struct crypt_config *cc, char *key)
1494{
1495 int r = -EINVAL;
1496 int key_string_len = strlen(key);
1497
1498
1499 if (cc->key_size != (key_string_len >> 1))
1500 goto out;
1501
1502
1503 if (!cc->key_size && strcmp(key, "-"))
1504 goto out;
1505
1506 if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
1507 goto out;
1508
1509 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1510
1511 r = crypt_setkey_allcpus(cc);
1512
1513out:
1514
1515 memset(key, '0', key_string_len);
1516
1517 return r;
1518}
1519
1520static int crypt_wipe_key(struct crypt_config *cc)
1521{
1522 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1523 memset(&cc->key, 0, cc->key_size * sizeof(u8));
1524
1525 return crypt_setkey_allcpus(cc);
1526}
1527
1528static void crypt_dtr(struct dm_target *ti)
1529{
1530 struct crypt_config *cc = ti->private;
1531
1532 ti->private = NULL;
1533
1534 if (!cc)
1535 return;
1536
1537 if (cc->write_thread)
1538 kthread_stop(cc->write_thread);
1539
1540 if (cc->io_queue)
1541 destroy_workqueue(cc->io_queue);
1542 if (cc->crypt_queue)
1543 destroy_workqueue(cc->crypt_queue);
1544
1545 crypt_free_tfms(cc);
1546
1547 if (cc->bs)
1548 bioset_free(cc->bs);
1549
1550 mempool_destroy(cc->page_pool);
1551 mempool_destroy(cc->req_pool);
1552
1553 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
1554 cc->iv_gen_ops->dtr(cc);
1555
1556 if (cc->dev)
1557 dm_put_device(ti, cc->dev);
1558
1559 kzfree(cc->cipher);
1560 kzfree(cc->cipher_string);
1561
1562
1563 kzfree(cc);
1564}
1565
1566static int crypt_ctr_cipher(struct dm_target *ti,
1567 char *cipher_in, char *key)
1568{
1569 struct crypt_config *cc = ti->private;
1570 char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount;
1571 char *cipher_api = NULL;
1572 int ret = -EINVAL;
1573 char dummy;
1574
1575
1576 if (strchr(cipher_in, '(')) {
1577 ti->error = "Bad cipher specification";
1578 return -EINVAL;
1579 }
1580
1581 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
1582 if (!cc->cipher_string)
1583 goto bad_mem;
1584
1585
1586
1587
1588
1589 tmp = cipher_in;
1590 keycount = strsep(&tmp, "-");
1591 cipher = strsep(&keycount, ":");
1592
1593 if (!keycount)
1594 cc->tfms_count = 1;
1595 else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 ||
1596 !is_power_of_2(cc->tfms_count)) {
1597 ti->error = "Bad cipher key count specification";
1598 return -EINVAL;
1599 }
1600 cc->key_parts = cc->tfms_count;
1601 cc->key_extra_size = 0;
1602
1603 cc->cipher = kstrdup(cipher, GFP_KERNEL);
1604 if (!cc->cipher)
1605 goto bad_mem;
1606
1607 chainmode = strsep(&tmp, "-");
1608 ivopts = strsep(&tmp, "-");
1609 ivmode = strsep(&ivopts, ":");
1610
1611 if (tmp)
1612 DMWARN("Ignoring unexpected additional cipher options");
1613
1614
1615
1616
1617
1618 if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) {
1619 chainmode = "cbc";
1620 ivmode = "plain";
1621 }
1622
1623 if (strcmp(chainmode, "ecb") && !ivmode) {
1624 ti->error = "IV mechanism required";
1625 return -EINVAL;
1626 }
1627
1628 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
1629 if (!cipher_api)
1630 goto bad_mem;
1631
1632 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
1633 "%s(%s)", chainmode, cipher);
1634 if (ret < 0) {
1635 kfree(cipher_api);
1636 goto bad_mem;
1637 }
1638
1639
1640 ret = crypt_alloc_tfms(cc, cipher_api);
1641 if (ret < 0) {
1642 ti->error = "Error allocating crypto tfm";
1643 goto bad;
1644 }
1645
1646
1647 cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
1648 if (cc->iv_size)
1649
1650 cc->iv_size = max(cc->iv_size,
1651 (unsigned int)(sizeof(u64) / sizeof(u8)));
1652 else if (ivmode) {
1653 DMWARN("Selected cipher does not support IVs");
1654 ivmode = NULL;
1655 }
1656
1657
1658 if (ivmode == NULL)
1659 cc->iv_gen_ops = NULL;
1660 else if (strcmp(ivmode, "plain") == 0)
1661 cc->iv_gen_ops = &crypt_iv_plain_ops;
1662 else if (strcmp(ivmode, "plain64") == 0)
1663 cc->iv_gen_ops = &crypt_iv_plain64_ops;
1664 else if (strcmp(ivmode, "essiv") == 0)
1665 cc->iv_gen_ops = &crypt_iv_essiv_ops;
1666 else if (strcmp(ivmode, "benbi") == 0)
1667 cc->iv_gen_ops = &crypt_iv_benbi_ops;
1668 else if (strcmp(ivmode, "null") == 0)
1669 cc->iv_gen_ops = &crypt_iv_null_ops;
1670 else if (strcmp(ivmode, "lmk") == 0) {
1671 cc->iv_gen_ops = &crypt_iv_lmk_ops;
1672
1673
1674
1675
1676
1677
1678 if (cc->key_size % cc->key_parts) {
1679 cc->key_parts++;
1680 cc->key_extra_size = cc->key_size / cc->key_parts;
1681 }
1682 } else if (strcmp(ivmode, "tcw") == 0) {
1683 cc->iv_gen_ops = &crypt_iv_tcw_ops;
1684 cc->key_parts += 2;
1685 cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
1686 } else {
1687 ret = -EINVAL;
1688 ti->error = "Invalid IV mode";
1689 goto bad;
1690 }
1691
1692
1693 ret = crypt_set_key(cc, key);
1694 if (ret < 0) {
1695 ti->error = "Error decoding and setting key";
1696 goto bad;
1697 }
1698
1699
1700 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
1701 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
1702 if (ret < 0) {
1703 ti->error = "Error creating IV";
1704 goto bad;
1705 }
1706 }
1707
1708
1709 if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
1710 ret = cc->iv_gen_ops->init(cc);
1711 if (ret < 0) {
1712 ti->error = "Error initialising IV";
1713 goto bad;
1714 }
1715 }
1716
1717 ret = 0;
1718bad:
1719 kfree(cipher_api);
1720 return ret;
1721
1722bad_mem:
1723 ti->error = "Cannot allocate cipher strings";
1724 return -ENOMEM;
1725}
1726
1727
1728
1729
1730
1731static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1732{
1733 struct crypt_config *cc;
1734 unsigned int key_size, opt_params;
1735 unsigned long long tmpll;
1736 int ret;
1737 size_t iv_size_padding;
1738 struct dm_arg_set as;
1739 const char *opt_string;
1740 char dummy;
1741
1742 static struct dm_arg _args[] = {
1743 {0, 3, "Invalid number of feature args"},
1744 };
1745
1746 if (argc < 5) {
1747 ti->error = "Not enough arguments";
1748 return -EINVAL;
1749 }
1750
1751 key_size = strlen(argv[1]) >> 1;
1752
1753 cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
1754 if (!cc) {
1755 ti->error = "Cannot allocate encryption context";
1756 return -ENOMEM;
1757 }
1758 cc->key_size = key_size;
1759
1760 ti->private = cc;
1761 ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
1762 if (ret < 0)
1763 goto bad;
1764
1765 cc->dmreq_start = sizeof(struct skcipher_request);
1766 cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc));
1767 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
1768
1769 if (crypto_skcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) {
1770
1771 iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
1772 & crypto_skcipher_alignmask(any_tfm(cc));
1773 } else {
1774
1775
1776
1777
1778
1779 iv_size_padding = crypto_skcipher_alignmask(any_tfm(cc));
1780 }
1781
1782 ret = -ENOMEM;
1783 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
1784 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size);
1785 if (!cc->req_pool) {
1786 ti->error = "Cannot allocate crypt request mempool";
1787 goto bad;
1788 }
1789
1790 cc->per_bio_data_size = ti->per_io_data_size =
1791 ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start +
1792 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size,
1793 ARCH_KMALLOC_MINALIGN);
1794
1795 cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0);
1796 if (!cc->page_pool) {
1797 ti->error = "Cannot allocate page mempool";
1798 goto bad;
1799 }
1800
1801 cc->bs = bioset_create(MIN_IOS, 0);
1802 if (!cc->bs) {
1803 ti->error = "Cannot allocate crypt bioset";
1804 goto bad;
1805 }
1806
1807 mutex_init(&cc->bio_alloc_lock);
1808
1809 ret = -EINVAL;
1810 if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) {
1811 ti->error = "Invalid iv_offset sector";
1812 goto bad;
1813 }
1814 cc->iv_offset = tmpll;
1815
1816 ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev);
1817 if (ret) {
1818 ti->error = "Device lookup failed";
1819 goto bad;
1820 }
1821
1822 ret = -EINVAL;
1823 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
1824 ti->error = "Invalid device sector";
1825 goto bad;
1826 }
1827 cc->start = tmpll;
1828
1829 argv += 5;
1830 argc -= 5;
1831
1832
1833 if (argc) {
1834 as.argc = argc;
1835 as.argv = argv;
1836
1837 ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
1838 if (ret)
1839 goto bad;
1840
1841 ret = -EINVAL;
1842 while (opt_params--) {
1843 opt_string = dm_shift_arg(&as);
1844 if (!opt_string) {
1845 ti->error = "Not enough feature arguments";
1846 goto bad;
1847 }
1848
1849 if (!strcasecmp(opt_string, "allow_discards"))
1850 ti->num_discard_bios = 1;
1851
1852 else if (!strcasecmp(opt_string, "same_cpu_crypt"))
1853 set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
1854
1855 else if (!strcasecmp(opt_string, "submit_from_crypt_cpus"))
1856 set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
1857
1858 else {
1859 ti->error = "Invalid feature arguments";
1860 goto bad;
1861 }
1862 }
1863 }
1864
1865 ret = -ENOMEM;
1866 cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1);
1867 if (!cc->io_queue) {
1868 ti->error = "Couldn't create kcryptd io queue";
1869 goto bad;
1870 }
1871
1872 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
1873 cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
1874 else
1875 cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
1876 num_online_cpus());
1877 if (!cc->crypt_queue) {
1878 ti->error = "Couldn't create kcryptd queue";
1879 goto bad;
1880 }
1881
1882 init_waitqueue_head(&cc->write_thread_wait);
1883 cc->write_tree = RB_ROOT;
1884
1885 cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write");
1886 if (IS_ERR(cc->write_thread)) {
1887 ret = PTR_ERR(cc->write_thread);
1888 cc->write_thread = NULL;
1889 ti->error = "Couldn't spawn write thread";
1890 goto bad;
1891 }
1892 wake_up_process(cc->write_thread);
1893
1894 ti->num_flush_bios = 1;
1895 ti->discard_zeroes_data_unsupported = true;
1896
1897 return 0;
1898
1899bad:
1900 crypt_dtr(ti);
1901 return ret;
1902}
1903
1904static int crypt_map(struct dm_target *ti, struct bio *bio)
1905{
1906 struct dm_crypt_io *io;
1907 struct crypt_config *cc = ti->private;
1908
1909
1910
1911
1912
1913
1914 if (unlikely(bio->bi_opf & REQ_PREFLUSH ||
1915 bio_op(bio) == REQ_OP_DISCARD)) {
1916 bio->bi_bdev = cc->dev->bdev;
1917 if (bio_sectors(bio))
1918 bio->bi_iter.bi_sector = cc->start +
1919 dm_target_offset(ti, bio->bi_iter.bi_sector);
1920 return DM_MAPIO_REMAPPED;
1921 }
1922
1923
1924
1925
1926 if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_PAGES << PAGE_SHIFT)) &&
1927 bio_data_dir(bio) == WRITE)
1928 dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT));
1929
1930 io = dm_per_bio_data(bio, cc->per_bio_data_size);
1931 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
1932 io->ctx.req = (struct skcipher_request *)(io + 1);
1933
1934 if (bio_data_dir(io->base_bio) == READ) {
1935 if (kcryptd_io_read(io, GFP_NOWAIT))
1936 kcryptd_queue_read(io);
1937 } else
1938 kcryptd_queue_crypt(io);
1939
1940 return DM_MAPIO_SUBMITTED;
1941}
1942
1943static void crypt_status(struct dm_target *ti, status_type_t type,
1944 unsigned status_flags, char *result, unsigned maxlen)
1945{
1946 struct crypt_config *cc = ti->private;
1947 unsigned i, sz = 0;
1948 int num_feature_args = 0;
1949
1950 switch (type) {
1951 case STATUSTYPE_INFO:
1952 result[0] = '\0';
1953 break;
1954
1955 case STATUSTYPE_TABLE:
1956 DMEMIT("%s ", cc->cipher_string);
1957
1958 if (cc->key_size > 0)
1959 for (i = 0; i < cc->key_size; i++)
1960 DMEMIT("%02x", cc->key[i]);
1961 else
1962 DMEMIT("-");
1963
1964 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
1965 cc->dev->name, (unsigned long long)cc->start);
1966
1967 num_feature_args += !!ti->num_discard_bios;
1968 num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
1969 num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
1970 if (num_feature_args) {
1971 DMEMIT(" %d", num_feature_args);
1972 if (ti->num_discard_bios)
1973 DMEMIT(" allow_discards");
1974 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
1975 DMEMIT(" same_cpu_crypt");
1976 if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
1977 DMEMIT(" submit_from_crypt_cpus");
1978 }
1979
1980 break;
1981 }
1982}
1983
1984static void crypt_postsuspend(struct dm_target *ti)
1985{
1986 struct crypt_config *cc = ti->private;
1987
1988 set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1989}
1990
1991static int crypt_preresume(struct dm_target *ti)
1992{
1993 struct crypt_config *cc = ti->private;
1994
1995 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
1996 DMERR("aborting resume - crypt key is not set.");
1997 return -EAGAIN;
1998 }
1999
2000 return 0;
2001}
2002
2003static void crypt_resume(struct dm_target *ti)
2004{
2005 struct crypt_config *cc = ti->private;
2006
2007 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
2008}
2009
2010
2011
2012
2013
2014static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
2015{
2016 struct crypt_config *cc = ti->private;
2017 int ret = -EINVAL;
2018
2019 if (argc < 2)
2020 goto error;
2021
2022 if (!strcasecmp(argv[0], "key")) {
2023 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
2024 DMWARN("not suspended during key manipulation.");
2025 return -EINVAL;
2026 }
2027 if (argc == 3 && !strcasecmp(argv[1], "set")) {
2028 ret = crypt_set_key(cc, argv[2]);
2029 if (ret)
2030 return ret;
2031 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
2032 ret = cc->iv_gen_ops->init(cc);
2033 return ret;
2034 }
2035 if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
2036 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
2037 ret = cc->iv_gen_ops->wipe(cc);
2038 if (ret)
2039 return ret;
2040 }
2041 return crypt_wipe_key(cc);
2042 }
2043 }
2044
2045error:
2046 DMWARN("unrecognised message received.");
2047 return -EINVAL;
2048}
2049
2050static int crypt_iterate_devices(struct dm_target *ti,
2051 iterate_devices_callout_fn fn, void *data)
2052{
2053 struct crypt_config *cc = ti->private;
2054
2055 return fn(ti, cc->dev, cc->start, ti->len, data);
2056}
2057
2058static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
2059{
2060
2061
2062
2063
2064
2065
2066 limits->max_segment_size = PAGE_SIZE;
2067}
2068
2069static struct target_type crypt_target = {
2070 .name = "crypt",
2071 .version = {1, 14, 1},
2072 .module = THIS_MODULE,
2073 .ctr = crypt_ctr,
2074 .dtr = crypt_dtr,
2075 .map = crypt_map,
2076 .status = crypt_status,
2077 .postsuspend = crypt_postsuspend,
2078 .preresume = crypt_preresume,
2079 .resume = crypt_resume,
2080 .message = crypt_message,
2081 .iterate_devices = crypt_iterate_devices,
2082 .io_hints = crypt_io_hints,
2083};
2084
2085static int __init dm_crypt_init(void)
2086{
2087 int r;
2088
2089 r = dm_register_target(&crypt_target);
2090 if (r < 0)
2091 DMERR("register failed %d", r);
2092
2093 return r;
2094}
2095
2096static void __exit dm_crypt_exit(void)
2097{
2098 dm_unregister_target(&crypt_target);
2099}
2100
2101module_init(dm_crypt_init);
2102module_exit(dm_crypt_exit);
2103
2104MODULE_AUTHOR("Jana Saout <jana@saout.de>");
2105MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
2106MODULE_LICENSE("GPL");
2107