1
2
3
4
5
6
7
8
9
10#include <linux/completion.h>
11#include <linux/err.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/bio.h>
16#include <linux/blkdev.h>
17#include <linux/mempool.h>
18#include <linux/slab.h>
19#include <linux/crypto.h>
20#include <linux/workqueue.h>
21#include <linux/kthread.h>
22#include <linux/backing-dev.h>
23#include <linux/atomic.h>
24#include <linux/scatterlist.h>
25#include <linux/rbtree.h>
26#include <asm/page.h>
27#include <asm/unaligned.h>
28#include <crypto/hash.h>
29#include <crypto/md5.h>
30#include <crypto/algapi.h>
31
32#include <linux/device-mapper.h>
33
34#define DM_MSG_PREFIX "crypt"
35
36
37
38
39struct convert_context {
40 struct completion restart;
41 struct bio *bio_in;
42 struct bio *bio_out;
43 struct bvec_iter iter_in;
44 struct bvec_iter iter_out;
45 sector_t cc_sector;
46 atomic_t cc_pending;
47 struct ablkcipher_request *req;
48};
49
50
51
52
53struct dm_crypt_io {
54 struct crypt_config *cc;
55 struct bio *base_bio;
56 struct work_struct work;
57
58 struct convert_context ctx;
59
60 atomic_t io_pending;
61 int error;
62 sector_t sector;
63
64 struct rb_node rb_node;
65} CRYPTO_MINALIGN_ATTR;
66
67struct dm_crypt_request {
68 struct convert_context *ctx;
69 struct scatterlist sg_in;
70 struct scatterlist sg_out;
71 sector_t iv_sector;
72};
73
74struct crypt_config;
75
76struct crypt_iv_operations {
77 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
78 const char *opts);
79 void (*dtr)(struct crypt_config *cc);
80 int (*init)(struct crypt_config *cc);
81 int (*wipe)(struct crypt_config *cc);
82 int (*generator)(struct crypt_config *cc, u8 *iv,
83 struct dm_crypt_request *dmreq);
84 int (*post)(struct crypt_config *cc, u8 *iv,
85 struct dm_crypt_request *dmreq);
86};
87
88struct iv_essiv_private {
89 struct crypto_hash *hash_tfm;
90 u8 *salt;
91};
92
93struct iv_benbi_private {
94 int shift;
95};
96
97#define LMK_SEED_SIZE 64
98struct iv_lmk_private {
99 struct crypto_shash *hash_tfm;
100 u8 *seed;
101};
102
103#define TCW_WHITENING_SIZE 16
104struct iv_tcw_private {
105 struct crypto_shash *crc32_tfm;
106 u8 *iv_seed;
107 u8 *whitening;
108};
109
110
111
112
113
114enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
115 DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
116
117
118
119
120struct crypt_config {
121 struct dm_dev *dev;
122 sector_t start;
123
124
125
126
127
128 mempool_t *req_pool;
129 mempool_t *page_pool;
130 struct bio_set *bs;
131 struct mutex bio_alloc_lock;
132
133 struct workqueue_struct *io_queue;
134 struct workqueue_struct *crypt_queue;
135
136 struct task_struct *write_thread;
137 wait_queue_head_t write_thread_wait;
138 struct rb_root write_tree;
139
140 char *cipher;
141 char *cipher_string;
142
143 struct crypt_iv_operations *iv_gen_ops;
144 union {
145 struct iv_essiv_private essiv;
146 struct iv_benbi_private benbi;
147 struct iv_lmk_private lmk;
148 struct iv_tcw_private tcw;
149 } iv_gen_private;
150 sector_t iv_offset;
151 unsigned int iv_size;
152
153
154 void *iv_private;
155 struct crypto_ablkcipher **tfms;
156 unsigned tfms_count;
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171 unsigned int dmreq_start;
172
173 unsigned int per_bio_data_size;
174
175 unsigned long flags;
176 unsigned int key_size;
177 unsigned int key_parts;
178 unsigned int key_extra_size;
179 u8 key[0];
180};
181
182#define MIN_IOS 16
183
184static void clone_init(struct dm_crypt_io *, struct bio *);
185static void kcryptd_queue_crypt(struct dm_crypt_io *io);
186static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
187
188
189
190
191static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
192{
193 return cc->tfms[0];
194}
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
244 struct dm_crypt_request *dmreq)
245{
246 memset(iv, 0, cc->iv_size);
247 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
248
249 return 0;
250}
251
252static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
253 struct dm_crypt_request *dmreq)
254{
255 memset(iv, 0, cc->iv_size);
256 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
257
258 return 0;
259}
260
261
262static int crypt_iv_essiv_init(struct crypt_config *cc)
263{
264 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
265 struct hash_desc desc;
266 struct scatterlist sg;
267 struct crypto_cipher *essiv_tfm;
268 int err;
269
270 sg_init_one(&sg, cc->key, cc->key_size);
271 desc.tfm = essiv->hash_tfm;
272 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
273
274 err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt);
275 if (err)
276 return err;
277
278 essiv_tfm = cc->iv_private;
279
280 err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
281 crypto_hash_digestsize(essiv->hash_tfm));
282 if (err)
283 return err;
284
285 return 0;
286}
287
288
289static int crypt_iv_essiv_wipe(struct crypt_config *cc)
290{
291 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
292 unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
293 struct crypto_cipher *essiv_tfm;
294 int r, err = 0;
295
296 memset(essiv->salt, 0, salt_size);
297
298 essiv_tfm = cc->iv_private;
299 r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
300 if (r)
301 err = r;
302
303 return err;
304}
305
306
307static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
308 struct dm_target *ti,
309 u8 *salt, unsigned saltsize)
310{
311 struct crypto_cipher *essiv_tfm;
312 int err;
313
314
315 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
316 if (IS_ERR(essiv_tfm)) {
317 ti->error = "Error allocating crypto tfm for ESSIV";
318 return essiv_tfm;
319 }
320
321 if (crypto_cipher_blocksize(essiv_tfm) !=
322 crypto_ablkcipher_ivsize(any_tfm(cc))) {
323 ti->error = "Block size of ESSIV cipher does "
324 "not match IV size of block cipher";
325 crypto_free_cipher(essiv_tfm);
326 return ERR_PTR(-EINVAL);
327 }
328
329 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
330 if (err) {
331 ti->error = "Failed to set key for ESSIV cipher";
332 crypto_free_cipher(essiv_tfm);
333 return ERR_PTR(err);
334 }
335
336 return essiv_tfm;
337}
338
339static void crypt_iv_essiv_dtr(struct crypt_config *cc)
340{
341 struct crypto_cipher *essiv_tfm;
342 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
343
344 crypto_free_hash(essiv->hash_tfm);
345 essiv->hash_tfm = NULL;
346
347 kzfree(essiv->salt);
348 essiv->salt = NULL;
349
350 essiv_tfm = cc->iv_private;
351
352 if (essiv_tfm)
353 crypto_free_cipher(essiv_tfm);
354
355 cc->iv_private = NULL;
356}
357
358static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
359 const char *opts)
360{
361 struct crypto_cipher *essiv_tfm = NULL;
362 struct crypto_hash *hash_tfm = NULL;
363 u8 *salt = NULL;
364 int err;
365
366 if (!opts) {
367 ti->error = "Digest algorithm missing for ESSIV mode";
368 return -EINVAL;
369 }
370
371
372 hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
373 if (IS_ERR(hash_tfm)) {
374 ti->error = "Error initializing ESSIV hash";
375 err = PTR_ERR(hash_tfm);
376 goto bad;
377 }
378
379 salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL);
380 if (!salt) {
381 ti->error = "Error kmallocing salt storage in ESSIV";
382 err = -ENOMEM;
383 goto bad;
384 }
385
386 cc->iv_gen_private.essiv.salt = salt;
387 cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
388
389 essiv_tfm = setup_essiv_cpu(cc, ti, salt,
390 crypto_hash_digestsize(hash_tfm));
391 if (IS_ERR(essiv_tfm)) {
392 crypt_iv_essiv_dtr(cc);
393 return PTR_ERR(essiv_tfm);
394 }
395 cc->iv_private = essiv_tfm;
396
397 return 0;
398
399bad:
400 if (hash_tfm && !IS_ERR(hash_tfm))
401 crypto_free_hash(hash_tfm);
402 kfree(salt);
403 return err;
404}
405
406static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
407 struct dm_crypt_request *dmreq)
408{
409 struct crypto_cipher *essiv_tfm = cc->iv_private;
410
411 memset(iv, 0, cc->iv_size);
412 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
413 crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
414
415 return 0;
416}
417
418static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
419 const char *opts)
420{
421 unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc));
422 int log = ilog2(bs);
423
424
425
426
427 if (1 << log != bs) {
428 ti->error = "cypher blocksize is not a power of 2";
429 return -EINVAL;
430 }
431
432 if (log > 9) {
433 ti->error = "cypher blocksize is > 512";
434 return -EINVAL;
435 }
436
437 cc->iv_gen_private.benbi.shift = 9 - log;
438
439 return 0;
440}
441
442static void crypt_iv_benbi_dtr(struct crypt_config *cc)
443{
444}
445
446static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
447 struct dm_crypt_request *dmreq)
448{
449 __be64 val;
450
451 memset(iv, 0, cc->iv_size - sizeof(u64));
452
453 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
454 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
455
456 return 0;
457}
458
459static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
460 struct dm_crypt_request *dmreq)
461{
462 memset(iv, 0, cc->iv_size);
463
464 return 0;
465}
466
467static void crypt_iv_lmk_dtr(struct crypt_config *cc)
468{
469 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
470
471 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
472 crypto_free_shash(lmk->hash_tfm);
473 lmk->hash_tfm = NULL;
474
475 kzfree(lmk->seed);
476 lmk->seed = NULL;
477}
478
479static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
480 const char *opts)
481{
482 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
483
484 lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
485 if (IS_ERR(lmk->hash_tfm)) {
486 ti->error = "Error initializing LMK hash";
487 return PTR_ERR(lmk->hash_tfm);
488 }
489
490
491 if (cc->key_parts == cc->tfms_count) {
492 lmk->seed = NULL;
493 return 0;
494 }
495
496 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
497 if (!lmk->seed) {
498 crypt_iv_lmk_dtr(cc);
499 ti->error = "Error kmallocing seed storage in LMK";
500 return -ENOMEM;
501 }
502
503 return 0;
504}
505
506static int crypt_iv_lmk_init(struct crypt_config *cc)
507{
508 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
509 int subkey_size = cc->key_size / cc->key_parts;
510
511
512 if (lmk->seed)
513 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
514 crypto_shash_digestsize(lmk->hash_tfm));
515
516 return 0;
517}
518
519static int crypt_iv_lmk_wipe(struct crypt_config *cc)
520{
521 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
522
523 if (lmk->seed)
524 memset(lmk->seed, 0, LMK_SEED_SIZE);
525
526 return 0;
527}
528
529static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
530 struct dm_crypt_request *dmreq,
531 u8 *data)
532{
533 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
534 SHASH_DESC_ON_STACK(desc, lmk->hash_tfm);
535 struct md5_state md5state;
536 __le32 buf[4];
537 int i, r;
538
539 desc->tfm = lmk->hash_tfm;
540 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
541
542 r = crypto_shash_init(desc);
543 if (r)
544 return r;
545
546 if (lmk->seed) {
547 r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE);
548 if (r)
549 return r;
550 }
551
552
553 r = crypto_shash_update(desc, data + 16, 16 * 31);
554 if (r)
555 return r;
556
557
558 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
559 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
560 buf[2] = cpu_to_le32(4024);
561 buf[3] = 0;
562 r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf));
563 if (r)
564 return r;
565
566
567 r = crypto_shash_export(desc, &md5state);
568 if (r)
569 return r;
570
571 for (i = 0; i < MD5_HASH_WORDS; i++)
572 __cpu_to_le32s(&md5state.hash[i]);
573 memcpy(iv, &md5state.hash, cc->iv_size);
574
575 return 0;
576}
577
578static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
579 struct dm_crypt_request *dmreq)
580{
581 u8 *src;
582 int r = 0;
583
584 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
585 src = kmap_atomic(sg_page(&dmreq->sg_in));
586 r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset);
587 kunmap_atomic(src);
588 } else
589 memset(iv, 0, cc->iv_size);
590
591 return r;
592}
593
594static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
595 struct dm_crypt_request *dmreq)
596{
597 u8 *dst;
598 int r;
599
600 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
601 return 0;
602
603 dst = kmap_atomic(sg_page(&dmreq->sg_out));
604 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset);
605
606
607 if (!r)
608 crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size);
609
610 kunmap_atomic(dst);
611 return r;
612}
613
614static void crypt_iv_tcw_dtr(struct crypt_config *cc)
615{
616 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
617
618 kzfree(tcw->iv_seed);
619 tcw->iv_seed = NULL;
620 kzfree(tcw->whitening);
621 tcw->whitening = NULL;
622
623 if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
624 crypto_free_shash(tcw->crc32_tfm);
625 tcw->crc32_tfm = NULL;
626}
627
628static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
629 const char *opts)
630{
631 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
632
633 if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
634 ti->error = "Wrong key size for TCW";
635 return -EINVAL;
636 }
637
638 tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0);
639 if (IS_ERR(tcw->crc32_tfm)) {
640 ti->error = "Error initializing CRC32 in TCW";
641 return PTR_ERR(tcw->crc32_tfm);
642 }
643
644 tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
645 tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
646 if (!tcw->iv_seed || !tcw->whitening) {
647 crypt_iv_tcw_dtr(cc);
648 ti->error = "Error allocating seed storage in TCW";
649 return -ENOMEM;
650 }
651
652 return 0;
653}
654
655static int crypt_iv_tcw_init(struct crypt_config *cc)
656{
657 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
658 int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
659
660 memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
661 memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
662 TCW_WHITENING_SIZE);
663
664 return 0;
665}
666
667static int crypt_iv_tcw_wipe(struct crypt_config *cc)
668{
669 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
670
671 memset(tcw->iv_seed, 0, cc->iv_size);
672 memset(tcw->whitening, 0, TCW_WHITENING_SIZE);
673
674 return 0;
675}
676
677static int crypt_iv_tcw_whitening(struct crypt_config *cc,
678 struct dm_crypt_request *dmreq,
679 u8 *data)
680{
681 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
682 u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
683 u8 buf[TCW_WHITENING_SIZE];
684 SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm);
685 int i, r;
686
687
688 memcpy(buf, tcw->whitening, TCW_WHITENING_SIZE);
689 crypto_xor(buf, (u8 *)§or, 8);
690 crypto_xor(&buf[8], (u8 *)§or, 8);
691
692
693 desc->tfm = tcw->crc32_tfm;
694 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
695 for (i = 0; i < 4; i++) {
696 r = crypto_shash_init(desc);
697 if (r)
698 goto out;
699 r = crypto_shash_update(desc, &buf[i * 4], 4);
700 if (r)
701 goto out;
702 r = crypto_shash_final(desc, &buf[i * 4]);
703 if (r)
704 goto out;
705 }
706 crypto_xor(&buf[0], &buf[12], 4);
707 crypto_xor(&buf[4], &buf[8], 4);
708
709
710 for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
711 crypto_xor(data + i * 8, buf, 8);
712out:
713 memzero_explicit(buf, sizeof(buf));
714 return r;
715}
716
717static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
718 struct dm_crypt_request *dmreq)
719{
720 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
721 u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
722 u8 *src;
723 int r = 0;
724
725
726 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
727 src = kmap_atomic(sg_page(&dmreq->sg_in));
728 r = crypt_iv_tcw_whitening(cc, dmreq, src + dmreq->sg_in.offset);
729 kunmap_atomic(src);
730 }
731
732
733 memcpy(iv, tcw->iv_seed, cc->iv_size);
734 crypto_xor(iv, (u8 *)§or, 8);
735 if (cc->iv_size > 8)
736 crypto_xor(&iv[8], (u8 *)§or, cc->iv_size - 8);
737
738 return r;
739}
740
741static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
742 struct dm_crypt_request *dmreq)
743{
744 u8 *dst;
745 int r;
746
747 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
748 return 0;
749
750
751 dst = kmap_atomic(sg_page(&dmreq->sg_out));
752 r = crypt_iv_tcw_whitening(cc, dmreq, dst + dmreq->sg_out.offset);
753 kunmap_atomic(dst);
754
755 return r;
756}
757
758static struct crypt_iv_operations crypt_iv_plain_ops = {
759 .generator = crypt_iv_plain_gen
760};
761
762static struct crypt_iv_operations crypt_iv_plain64_ops = {
763 .generator = crypt_iv_plain64_gen
764};
765
766static struct crypt_iv_operations crypt_iv_essiv_ops = {
767 .ctr = crypt_iv_essiv_ctr,
768 .dtr = crypt_iv_essiv_dtr,
769 .init = crypt_iv_essiv_init,
770 .wipe = crypt_iv_essiv_wipe,
771 .generator = crypt_iv_essiv_gen
772};
773
774static struct crypt_iv_operations crypt_iv_benbi_ops = {
775 .ctr = crypt_iv_benbi_ctr,
776 .dtr = crypt_iv_benbi_dtr,
777 .generator = crypt_iv_benbi_gen
778};
779
780static struct crypt_iv_operations crypt_iv_null_ops = {
781 .generator = crypt_iv_null_gen
782};
783
784static struct crypt_iv_operations crypt_iv_lmk_ops = {
785 .ctr = crypt_iv_lmk_ctr,
786 .dtr = crypt_iv_lmk_dtr,
787 .init = crypt_iv_lmk_init,
788 .wipe = crypt_iv_lmk_wipe,
789 .generator = crypt_iv_lmk_gen,
790 .post = crypt_iv_lmk_post
791};
792
793static struct crypt_iv_operations crypt_iv_tcw_ops = {
794 .ctr = crypt_iv_tcw_ctr,
795 .dtr = crypt_iv_tcw_dtr,
796 .init = crypt_iv_tcw_init,
797 .wipe = crypt_iv_tcw_wipe,
798 .generator = crypt_iv_tcw_gen,
799 .post = crypt_iv_tcw_post
800};
801
802static void crypt_convert_init(struct crypt_config *cc,
803 struct convert_context *ctx,
804 struct bio *bio_out, struct bio *bio_in,
805 sector_t sector)
806{
807 ctx->bio_in = bio_in;
808 ctx->bio_out = bio_out;
809 if (bio_in)
810 ctx->iter_in = bio_in->bi_iter;
811 if (bio_out)
812 ctx->iter_out = bio_out->bi_iter;
813 ctx->cc_sector = sector + cc->iv_offset;
814 init_completion(&ctx->restart);
815}
816
817static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
818 struct ablkcipher_request *req)
819{
820 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
821}
822
823static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
824 struct dm_crypt_request *dmreq)
825{
826 return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
827}
828
829static u8 *iv_of_dmreq(struct crypt_config *cc,
830 struct dm_crypt_request *dmreq)
831{
832 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
833 crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
834}
835
836static int crypt_convert_block(struct crypt_config *cc,
837 struct convert_context *ctx,
838 struct ablkcipher_request *req)
839{
840 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
841 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
842 struct dm_crypt_request *dmreq;
843 u8 *iv;
844 int r;
845
846 dmreq = dmreq_of_req(cc, req);
847 iv = iv_of_dmreq(cc, dmreq);
848
849 dmreq->iv_sector = ctx->cc_sector;
850 dmreq->ctx = ctx;
851 sg_init_table(&dmreq->sg_in, 1);
852 sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT,
853 bv_in.bv_offset);
854
855 sg_init_table(&dmreq->sg_out, 1);
856 sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT,
857 bv_out.bv_offset);
858
859 bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT);
860 bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT);
861
862 if (cc->iv_gen_ops) {
863 r = cc->iv_gen_ops->generator(cc, iv, dmreq);
864 if (r < 0)
865 return r;
866 }
867
868 ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
869 1 << SECTOR_SHIFT, iv);
870
871 if (bio_data_dir(ctx->bio_in) == WRITE)
872 r = crypto_ablkcipher_encrypt(req);
873 else
874 r = crypto_ablkcipher_decrypt(req);
875
876 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
877 r = cc->iv_gen_ops->post(cc, iv, dmreq);
878
879 return r;
880}
881
882static void kcryptd_async_done(struct crypto_async_request *async_req,
883 int error);
884
885static void crypt_alloc_req(struct crypt_config *cc,
886 struct convert_context *ctx)
887{
888 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
889
890 if (!ctx->req)
891 ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
892
893 ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
894
895
896
897
898
899 ablkcipher_request_set_callback(ctx->req,
900 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
901 kcryptd_async_done, dmreq_of_req(cc, ctx->req));
902}
903
904static void crypt_free_req(struct crypt_config *cc,
905 struct ablkcipher_request *req, struct bio *base_bio)
906{
907 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
908
909 if ((struct ablkcipher_request *)(io + 1) != req)
910 mempool_free(req, cc->req_pool);
911}
912
913
914
915
916static int crypt_convert(struct crypt_config *cc,
917 struct convert_context *ctx)
918{
919 int r;
920
921 atomic_set(&ctx->cc_pending, 1);
922
923 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
924
925 crypt_alloc_req(cc, ctx);
926
927 atomic_inc(&ctx->cc_pending);
928
929 r = crypt_convert_block(cc, ctx, ctx->req);
930
931 switch (r) {
932
933
934
935
936 case -EBUSY:
937 wait_for_completion(&ctx->restart);
938 reinit_completion(&ctx->restart);
939
940
941
942
943
944 case -EINPROGRESS:
945 ctx->req = NULL;
946 ctx->cc_sector++;
947 continue;
948
949
950
951 case 0:
952 atomic_dec(&ctx->cc_pending);
953 ctx->cc_sector++;
954 cond_resched();
955 continue;
956
957
958 default:
959 atomic_dec(&ctx->cc_pending);
960 return r;
961 }
962 }
963
964 return 0;
965}
966
967static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
987{
988 struct crypt_config *cc = io->cc;
989 struct bio *clone;
990 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
991 gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
992 unsigned i, len, remaining_size;
993 struct page *page;
994 struct bio_vec *bvec;
995
996retry:
997 if (unlikely(gfp_mask & __GFP_WAIT))
998 mutex_lock(&cc->bio_alloc_lock);
999
1000 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
1001 if (!clone)
1002 goto return_clone;
1003
1004 clone_init(io, clone);
1005
1006 remaining_size = size;
1007
1008 for (i = 0; i < nr_iovecs; i++) {
1009 page = mempool_alloc(cc->page_pool, gfp_mask);
1010 if (!page) {
1011 crypt_free_buffer_pages(cc, clone);
1012 bio_put(clone);
1013 gfp_mask |= __GFP_WAIT;
1014 goto retry;
1015 }
1016
1017 len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
1018
1019 bvec = &clone->bi_io_vec[clone->bi_vcnt++];
1020 bvec->bv_page = page;
1021 bvec->bv_len = len;
1022 bvec->bv_offset = 0;
1023
1024 clone->bi_iter.bi_size += len;
1025
1026 remaining_size -= len;
1027 }
1028
1029return_clone:
1030 if (unlikely(gfp_mask & __GFP_WAIT))
1031 mutex_unlock(&cc->bio_alloc_lock);
1032
1033 return clone;
1034}
1035
1036static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
1037{
1038 unsigned int i;
1039 struct bio_vec *bv;
1040
1041 bio_for_each_segment_all(bv, clone, i) {
1042 BUG_ON(!bv->bv_page);
1043 mempool_free(bv->bv_page, cc->page_pool);
1044 bv->bv_page = NULL;
1045 }
1046}
1047
1048static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
1049 struct bio *bio, sector_t sector)
1050{
1051 io->cc = cc;
1052 io->base_bio = bio;
1053 io->sector = sector;
1054 io->error = 0;
1055 io->ctx.req = NULL;
1056 atomic_set(&io->io_pending, 0);
1057}
1058
1059static void crypt_inc_pending(struct dm_crypt_io *io)
1060{
1061 atomic_inc(&io->io_pending);
1062}
1063
1064
1065
1066
1067
1068static void crypt_dec_pending(struct dm_crypt_io *io)
1069{
1070 struct crypt_config *cc = io->cc;
1071 struct bio *base_bio = io->base_bio;
1072 int error = io->error;
1073
1074 if (!atomic_dec_and_test(&io->io_pending))
1075 return;
1076
1077 if (io->ctx.req)
1078 crypt_free_req(cc, io->ctx.req, base_bio);
1079
1080 base_bio->bi_error = error;
1081 bio_endio(base_bio);
1082}
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101static void crypt_endio(struct bio *clone)
1102{
1103 struct dm_crypt_io *io = clone->bi_private;
1104 struct crypt_config *cc = io->cc;
1105 unsigned rw = bio_data_dir(clone);
1106 int error;
1107
1108
1109
1110
1111 if (rw == WRITE)
1112 crypt_free_buffer_pages(cc, clone);
1113
1114 error = clone->bi_error;
1115 bio_put(clone);
1116
1117 if (rw == READ && !error) {
1118 kcryptd_queue_crypt(io);
1119 return;
1120 }
1121
1122 if (unlikely(error))
1123 io->error = error;
1124
1125 crypt_dec_pending(io);
1126}
1127
1128static void clone_init(struct dm_crypt_io *io, struct bio *clone)
1129{
1130 struct crypt_config *cc = io->cc;
1131
1132 clone->bi_private = io;
1133 clone->bi_end_io = crypt_endio;
1134 clone->bi_bdev = cc->dev->bdev;
1135 clone->bi_rw = io->base_bio->bi_rw;
1136}
1137
1138static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
1139{
1140 struct crypt_config *cc = io->cc;
1141 struct bio *clone;
1142
1143
1144
1145
1146
1147
1148
1149 clone = bio_clone_fast(io->base_bio, gfp, cc->bs);
1150 if (!clone)
1151 return 1;
1152
1153 crypt_inc_pending(io);
1154
1155 clone_init(io, clone);
1156 clone->bi_iter.bi_sector = cc->start + io->sector;
1157
1158 generic_make_request(clone);
1159 return 0;
1160}
1161
1162static void kcryptd_io_read_work(struct work_struct *work)
1163{
1164 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1165
1166 crypt_inc_pending(io);
1167 if (kcryptd_io_read(io, GFP_NOIO))
1168 io->error = -ENOMEM;
1169 crypt_dec_pending(io);
1170}
1171
1172static void kcryptd_queue_read(struct dm_crypt_io *io)
1173{
1174 struct crypt_config *cc = io->cc;
1175
1176 INIT_WORK(&io->work, kcryptd_io_read_work);
1177 queue_work(cc->io_queue, &io->work);
1178}
1179
1180static void kcryptd_io_write(struct dm_crypt_io *io)
1181{
1182 struct bio *clone = io->ctx.bio_out;
1183
1184 generic_make_request(clone);
1185}
1186
1187#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
1188
1189static int dmcrypt_write(void *data)
1190{
1191 struct crypt_config *cc = data;
1192 struct dm_crypt_io *io;
1193
1194 while (1) {
1195 struct rb_root write_tree;
1196 struct blk_plug plug;
1197
1198 DECLARE_WAITQUEUE(wait, current);
1199
1200 spin_lock_irq(&cc->write_thread_wait.lock);
1201continue_locked:
1202
1203 if (!RB_EMPTY_ROOT(&cc->write_tree))
1204 goto pop_from_list;
1205
1206 __set_current_state(TASK_INTERRUPTIBLE);
1207 __add_wait_queue(&cc->write_thread_wait, &wait);
1208
1209 spin_unlock_irq(&cc->write_thread_wait.lock);
1210
1211 if (unlikely(kthread_should_stop())) {
1212 set_task_state(current, TASK_RUNNING);
1213 remove_wait_queue(&cc->write_thread_wait, &wait);
1214 break;
1215 }
1216
1217 schedule();
1218
1219 set_task_state(current, TASK_RUNNING);
1220 spin_lock_irq(&cc->write_thread_wait.lock);
1221 __remove_wait_queue(&cc->write_thread_wait, &wait);
1222 goto continue_locked;
1223
1224pop_from_list:
1225 write_tree = cc->write_tree;
1226 cc->write_tree = RB_ROOT;
1227 spin_unlock_irq(&cc->write_thread_wait.lock);
1228
1229 BUG_ON(rb_parent(write_tree.rb_node));
1230
1231
1232
1233
1234
1235 blk_start_plug(&plug);
1236 do {
1237 io = crypt_io_from_node(rb_first(&write_tree));
1238 rb_erase(&io->rb_node, &write_tree);
1239 kcryptd_io_write(io);
1240 } while (!RB_EMPTY_ROOT(&write_tree));
1241 blk_finish_plug(&plug);
1242 }
1243 return 0;
1244}
1245
1246static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
1247{
1248 struct bio *clone = io->ctx.bio_out;
1249 struct crypt_config *cc = io->cc;
1250 unsigned long flags;
1251 sector_t sector;
1252 struct rb_node **rbp, *parent;
1253
1254 if (unlikely(io->error < 0)) {
1255 crypt_free_buffer_pages(cc, clone);
1256 bio_put(clone);
1257 crypt_dec_pending(io);
1258 return;
1259 }
1260
1261
1262 BUG_ON(io->ctx.iter_out.bi_size);
1263
1264 clone->bi_iter.bi_sector = cc->start + io->sector;
1265
1266 if (likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) {
1267 generic_make_request(clone);
1268 return;
1269 }
1270
1271 spin_lock_irqsave(&cc->write_thread_wait.lock, flags);
1272 rbp = &cc->write_tree.rb_node;
1273 parent = NULL;
1274 sector = io->sector;
1275 while (*rbp) {
1276 parent = *rbp;
1277 if (sector < crypt_io_from_node(parent)->sector)
1278 rbp = &(*rbp)->rb_left;
1279 else
1280 rbp = &(*rbp)->rb_right;
1281 }
1282 rb_link_node(&io->rb_node, parent, rbp);
1283 rb_insert_color(&io->rb_node, &cc->write_tree);
1284
1285 wake_up_locked(&cc->write_thread_wait);
1286 spin_unlock_irqrestore(&cc->write_thread_wait.lock, flags);
1287}
1288
1289static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1290{
1291 struct crypt_config *cc = io->cc;
1292 struct bio *clone;
1293 int crypt_finished;
1294 sector_t sector = io->sector;
1295 int r;
1296
1297
1298
1299
1300 crypt_inc_pending(io);
1301 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
1302
1303 clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
1304 if (unlikely(!clone)) {
1305 io->error = -EIO;
1306 goto dec;
1307 }
1308
1309 io->ctx.bio_out = clone;
1310 io->ctx.iter_out = clone->bi_iter;
1311
1312 sector += bio_sectors(clone);
1313
1314 crypt_inc_pending(io);
1315 r = crypt_convert(cc, &io->ctx);
1316 if (r)
1317 io->error = -EIO;
1318 crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
1319
1320
1321 if (crypt_finished) {
1322 kcryptd_crypt_write_io_submit(io, 0);
1323 io->sector = sector;
1324 }
1325
1326dec:
1327 crypt_dec_pending(io);
1328}
1329
1330static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
1331{
1332 crypt_dec_pending(io);
1333}
1334
1335static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
1336{
1337 struct crypt_config *cc = io->cc;
1338 int r = 0;
1339
1340 crypt_inc_pending(io);
1341
1342 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
1343 io->sector);
1344
1345 r = crypt_convert(cc, &io->ctx);
1346 if (r < 0)
1347 io->error = -EIO;
1348
1349 if (atomic_dec_and_test(&io->ctx.cc_pending))
1350 kcryptd_crypt_read_done(io);
1351
1352 crypt_dec_pending(io);
1353}
1354
1355static void kcryptd_async_done(struct crypto_async_request *async_req,
1356 int error)
1357{
1358 struct dm_crypt_request *dmreq = async_req->data;
1359 struct convert_context *ctx = dmreq->ctx;
1360 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
1361 struct crypt_config *cc = io->cc;
1362
1363
1364
1365
1366
1367
1368 if (error == -EINPROGRESS) {
1369 complete(&ctx->restart);
1370 return;
1371 }
1372
1373 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
1374 error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
1375
1376 if (error < 0)
1377 io->error = -EIO;
1378
1379 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
1380
1381 if (!atomic_dec_and_test(&ctx->cc_pending))
1382 return;
1383
1384 if (bio_data_dir(io->base_bio) == READ)
1385 kcryptd_crypt_read_done(io);
1386 else
1387 kcryptd_crypt_write_io_submit(io, 1);
1388}
1389
1390static void kcryptd_crypt(struct work_struct *work)
1391{
1392 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1393
1394 if (bio_data_dir(io->base_bio) == READ)
1395 kcryptd_crypt_read_convert(io);
1396 else
1397 kcryptd_crypt_write_convert(io);
1398}
1399
1400static void kcryptd_queue_crypt(struct dm_crypt_io *io)
1401{
1402 struct crypt_config *cc = io->cc;
1403
1404 INIT_WORK(&io->work, kcryptd_crypt);
1405 queue_work(cc->crypt_queue, &io->work);
1406}
1407
1408
1409
1410
1411static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
1412{
1413 char buffer[3];
1414 unsigned int i;
1415
1416 buffer[2] = '\0';
1417
1418 for (i = 0; i < size; i++) {
1419 buffer[0] = *hex++;
1420 buffer[1] = *hex++;
1421
1422 if (kstrtou8(buffer, 16, &key[i]))
1423 return -EINVAL;
1424 }
1425
1426 if (*hex != '\0')
1427 return -EINVAL;
1428
1429 return 0;
1430}
1431
1432static void crypt_free_tfms(struct crypt_config *cc)
1433{
1434 unsigned i;
1435
1436 if (!cc->tfms)
1437 return;
1438
1439 for (i = 0; i < cc->tfms_count; i++)
1440 if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) {
1441 crypto_free_ablkcipher(cc->tfms[i]);
1442 cc->tfms[i] = NULL;
1443 }
1444
1445 kfree(cc->tfms);
1446 cc->tfms = NULL;
1447}
1448
1449static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
1450{
1451 unsigned i;
1452 int err;
1453
1454 cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *),
1455 GFP_KERNEL);
1456 if (!cc->tfms)
1457 return -ENOMEM;
1458
1459 for (i = 0; i < cc->tfms_count; i++) {
1460 cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
1461 if (IS_ERR(cc->tfms[i])) {
1462 err = PTR_ERR(cc->tfms[i]);
1463 crypt_free_tfms(cc);
1464 return err;
1465 }
1466 }
1467
1468 return 0;
1469}
1470
1471static int crypt_setkey_allcpus(struct crypt_config *cc)
1472{
1473 unsigned subkey_size;
1474 int err = 0, i, r;
1475
1476
1477 subkey_size = (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
1478
1479 for (i = 0; i < cc->tfms_count; i++) {
1480 r = crypto_ablkcipher_setkey(cc->tfms[i],
1481 cc->key + (i * subkey_size),
1482 subkey_size);
1483 if (r)
1484 err = r;
1485 }
1486
1487 return err;
1488}
1489
1490static int crypt_set_key(struct crypt_config *cc, char *key)
1491{
1492 int r = -EINVAL;
1493 int key_string_len = strlen(key);
1494
1495
1496 if (cc->key_size != (key_string_len >> 1))
1497 goto out;
1498
1499
1500 if (!cc->key_size && strcmp(key, "-"))
1501 goto out;
1502
1503 if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
1504 goto out;
1505
1506 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1507
1508 r = crypt_setkey_allcpus(cc);
1509
1510out:
1511
1512 memset(key, '0', key_string_len);
1513
1514 return r;
1515}
1516
1517static int crypt_wipe_key(struct crypt_config *cc)
1518{
1519 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1520 memset(&cc->key, 0, cc->key_size * sizeof(u8));
1521
1522 return crypt_setkey_allcpus(cc);
1523}
1524
1525static void crypt_dtr(struct dm_target *ti)
1526{
1527 struct crypt_config *cc = ti->private;
1528
1529 ti->private = NULL;
1530
1531 if (!cc)
1532 return;
1533
1534 if (cc->write_thread)
1535 kthread_stop(cc->write_thread);
1536
1537 if (cc->io_queue)
1538 destroy_workqueue(cc->io_queue);
1539 if (cc->crypt_queue)
1540 destroy_workqueue(cc->crypt_queue);
1541
1542 crypt_free_tfms(cc);
1543
1544 if (cc->bs)
1545 bioset_free(cc->bs);
1546
1547 if (cc->page_pool)
1548 mempool_destroy(cc->page_pool);
1549 if (cc->req_pool)
1550 mempool_destroy(cc->req_pool);
1551
1552 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
1553 cc->iv_gen_ops->dtr(cc);
1554
1555 if (cc->dev)
1556 dm_put_device(ti, cc->dev);
1557
1558 kzfree(cc->cipher);
1559 kzfree(cc->cipher_string);
1560
1561
1562 kzfree(cc);
1563}
1564
1565static int crypt_ctr_cipher(struct dm_target *ti,
1566 char *cipher_in, char *key)
1567{
1568 struct crypt_config *cc = ti->private;
1569 char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount;
1570 char *cipher_api = NULL;
1571 int ret = -EINVAL;
1572 char dummy;
1573
1574
1575 if (strchr(cipher_in, '(')) {
1576 ti->error = "Bad cipher specification";
1577 return -EINVAL;
1578 }
1579
1580 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
1581 if (!cc->cipher_string)
1582 goto bad_mem;
1583
1584
1585
1586
1587
1588 tmp = cipher_in;
1589 keycount = strsep(&tmp, "-");
1590 cipher = strsep(&keycount, ":");
1591
1592 if (!keycount)
1593 cc->tfms_count = 1;
1594 else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 ||
1595 !is_power_of_2(cc->tfms_count)) {
1596 ti->error = "Bad cipher key count specification";
1597 return -EINVAL;
1598 }
1599 cc->key_parts = cc->tfms_count;
1600 cc->key_extra_size = 0;
1601
1602 cc->cipher = kstrdup(cipher, GFP_KERNEL);
1603 if (!cc->cipher)
1604 goto bad_mem;
1605
1606 chainmode = strsep(&tmp, "-");
1607 ivopts = strsep(&tmp, "-");
1608 ivmode = strsep(&ivopts, ":");
1609
1610 if (tmp)
1611 DMWARN("Ignoring unexpected additional cipher options");
1612
1613
1614
1615
1616
1617 if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) {
1618 chainmode = "cbc";
1619 ivmode = "plain";
1620 }
1621
1622 if (strcmp(chainmode, "ecb") && !ivmode) {
1623 ti->error = "IV mechanism required";
1624 return -EINVAL;
1625 }
1626
1627 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
1628 if (!cipher_api)
1629 goto bad_mem;
1630
1631 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
1632 "%s(%s)", chainmode, cipher);
1633 if (ret < 0) {
1634 kfree(cipher_api);
1635 goto bad_mem;
1636 }
1637
1638
1639 ret = crypt_alloc_tfms(cc, cipher_api);
1640 if (ret < 0) {
1641 ti->error = "Error allocating crypto tfm";
1642 goto bad;
1643 }
1644
1645
1646 cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
1647 if (cc->iv_size)
1648
1649 cc->iv_size = max(cc->iv_size,
1650 (unsigned int)(sizeof(u64) / sizeof(u8)));
1651 else if (ivmode) {
1652 DMWARN("Selected cipher does not support IVs");
1653 ivmode = NULL;
1654 }
1655
1656
1657 if (ivmode == NULL)
1658 cc->iv_gen_ops = NULL;
1659 else if (strcmp(ivmode, "plain") == 0)
1660 cc->iv_gen_ops = &crypt_iv_plain_ops;
1661 else if (strcmp(ivmode, "plain64") == 0)
1662 cc->iv_gen_ops = &crypt_iv_plain64_ops;
1663 else if (strcmp(ivmode, "essiv") == 0)
1664 cc->iv_gen_ops = &crypt_iv_essiv_ops;
1665 else if (strcmp(ivmode, "benbi") == 0)
1666 cc->iv_gen_ops = &crypt_iv_benbi_ops;
1667 else if (strcmp(ivmode, "null") == 0)
1668 cc->iv_gen_ops = &crypt_iv_null_ops;
1669 else if (strcmp(ivmode, "lmk") == 0) {
1670 cc->iv_gen_ops = &crypt_iv_lmk_ops;
1671
1672
1673
1674
1675
1676
1677 if (cc->key_size % cc->key_parts) {
1678 cc->key_parts++;
1679 cc->key_extra_size = cc->key_size / cc->key_parts;
1680 }
1681 } else if (strcmp(ivmode, "tcw") == 0) {
1682 cc->iv_gen_ops = &crypt_iv_tcw_ops;
1683 cc->key_parts += 2;
1684 cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
1685 } else {
1686 ret = -EINVAL;
1687 ti->error = "Invalid IV mode";
1688 goto bad;
1689 }
1690
1691
1692 ret = crypt_set_key(cc, key);
1693 if (ret < 0) {
1694 ti->error = "Error decoding and setting key";
1695 goto bad;
1696 }
1697
1698
1699 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
1700 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
1701 if (ret < 0) {
1702 ti->error = "Error creating IV";
1703 goto bad;
1704 }
1705 }
1706
1707
1708 if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
1709 ret = cc->iv_gen_ops->init(cc);
1710 if (ret < 0) {
1711 ti->error = "Error initialising IV";
1712 goto bad;
1713 }
1714 }
1715
1716 ret = 0;
1717bad:
1718 kfree(cipher_api);
1719 return ret;
1720
1721bad_mem:
1722 ti->error = "Cannot allocate cipher strings";
1723 return -ENOMEM;
1724}
1725
1726
1727
1728
1729
1730static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1731{
1732 struct crypt_config *cc;
1733 unsigned int key_size, opt_params;
1734 unsigned long long tmpll;
1735 int ret;
1736 size_t iv_size_padding;
1737 struct dm_arg_set as;
1738 const char *opt_string;
1739 char dummy;
1740
1741 static struct dm_arg _args[] = {
1742 {0, 3, "Invalid number of feature args"},
1743 };
1744
1745 if (argc < 5) {
1746 ti->error = "Not enough arguments";
1747 return -EINVAL;
1748 }
1749
1750 key_size = strlen(argv[1]) >> 1;
1751
1752 cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
1753 if (!cc) {
1754 ti->error = "Cannot allocate encryption context";
1755 return -ENOMEM;
1756 }
1757 cc->key_size = key_size;
1758
1759 ti->private = cc;
1760 ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
1761 if (ret < 0)
1762 goto bad;
1763
1764 cc->dmreq_start = sizeof(struct ablkcipher_request);
1765 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
1766 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
1767
1768 if (crypto_ablkcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) {
1769
1770 iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
1771 & crypto_ablkcipher_alignmask(any_tfm(cc));
1772 } else {
1773
1774
1775
1776
1777
1778 iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc));
1779 }
1780
1781 ret = -ENOMEM;
1782 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
1783 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size);
1784 if (!cc->req_pool) {
1785 ti->error = "Cannot allocate crypt request mempool";
1786 goto bad;
1787 }
1788
1789 cc->per_bio_data_size = ti->per_bio_data_size =
1790 ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start +
1791 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size,
1792 ARCH_KMALLOC_MINALIGN);
1793
1794 cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0);
1795 if (!cc->page_pool) {
1796 ti->error = "Cannot allocate page mempool";
1797 goto bad;
1798 }
1799
1800 cc->bs = bioset_create(MIN_IOS, 0);
1801 if (!cc->bs) {
1802 ti->error = "Cannot allocate crypt bioset";
1803 goto bad;
1804 }
1805
1806 mutex_init(&cc->bio_alloc_lock);
1807
1808 ret = -EINVAL;
1809 if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) {
1810 ti->error = "Invalid iv_offset sector";
1811 goto bad;
1812 }
1813 cc->iv_offset = tmpll;
1814
1815 ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev);
1816 if (ret) {
1817 ti->error = "Device lookup failed";
1818 goto bad;
1819 }
1820
1821 ret = -EINVAL;
1822 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
1823 ti->error = "Invalid device sector";
1824 goto bad;
1825 }
1826 cc->start = tmpll;
1827
1828 argv += 5;
1829 argc -= 5;
1830
1831
1832 if (argc) {
1833 as.argc = argc;
1834 as.argv = argv;
1835
1836 ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
1837 if (ret)
1838 goto bad;
1839
1840 ret = -EINVAL;
1841 while (opt_params--) {
1842 opt_string = dm_shift_arg(&as);
1843 if (!opt_string) {
1844 ti->error = "Not enough feature arguments";
1845 goto bad;
1846 }
1847
1848 if (!strcasecmp(opt_string, "allow_discards"))
1849 ti->num_discard_bios = 1;
1850
1851 else if (!strcasecmp(opt_string, "same_cpu_crypt"))
1852 set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
1853
1854 else if (!strcasecmp(opt_string, "submit_from_crypt_cpus"))
1855 set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
1856
1857 else {
1858 ti->error = "Invalid feature arguments";
1859 goto bad;
1860 }
1861 }
1862 }
1863
1864 ret = -ENOMEM;
1865 cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1);
1866 if (!cc->io_queue) {
1867 ti->error = "Couldn't create kcryptd io queue";
1868 goto bad;
1869 }
1870
1871 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
1872 cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
1873 else
1874 cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
1875 num_online_cpus());
1876 if (!cc->crypt_queue) {
1877 ti->error = "Couldn't create kcryptd queue";
1878 goto bad;
1879 }
1880
1881 init_waitqueue_head(&cc->write_thread_wait);
1882 cc->write_tree = RB_ROOT;
1883
1884 cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write");
1885 if (IS_ERR(cc->write_thread)) {
1886 ret = PTR_ERR(cc->write_thread);
1887 cc->write_thread = NULL;
1888 ti->error = "Couldn't spawn write thread";
1889 goto bad;
1890 }
1891 wake_up_process(cc->write_thread);
1892
1893 ti->num_flush_bios = 1;
1894 ti->discard_zeroes_data_unsupported = true;
1895
1896 return 0;
1897
1898bad:
1899 crypt_dtr(ti);
1900 return ret;
1901}
1902
1903static int crypt_map(struct dm_target *ti, struct bio *bio)
1904{
1905 struct dm_crypt_io *io;
1906 struct crypt_config *cc = ti->private;
1907
1908
1909
1910
1911
1912
1913 if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
1914 bio->bi_bdev = cc->dev->bdev;
1915 if (bio_sectors(bio))
1916 bio->bi_iter.bi_sector = cc->start +
1917 dm_target_offset(ti, bio->bi_iter.bi_sector);
1918 return DM_MAPIO_REMAPPED;
1919 }
1920
1921 io = dm_per_bio_data(bio, cc->per_bio_data_size);
1922 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
1923 io->ctx.req = (struct ablkcipher_request *)(io + 1);
1924
1925 if (bio_data_dir(io->base_bio) == READ) {
1926 if (kcryptd_io_read(io, GFP_NOWAIT))
1927 kcryptd_queue_read(io);
1928 } else
1929 kcryptd_queue_crypt(io);
1930
1931 return DM_MAPIO_SUBMITTED;
1932}
1933
1934static void crypt_status(struct dm_target *ti, status_type_t type,
1935 unsigned status_flags, char *result, unsigned maxlen)
1936{
1937 struct crypt_config *cc = ti->private;
1938 unsigned i, sz = 0;
1939 int num_feature_args = 0;
1940
1941 switch (type) {
1942 case STATUSTYPE_INFO:
1943 result[0] = '\0';
1944 break;
1945
1946 case STATUSTYPE_TABLE:
1947 DMEMIT("%s ", cc->cipher_string);
1948
1949 if (cc->key_size > 0)
1950 for (i = 0; i < cc->key_size; i++)
1951 DMEMIT("%02x", cc->key[i]);
1952 else
1953 DMEMIT("-");
1954
1955 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
1956 cc->dev->name, (unsigned long long)cc->start);
1957
1958 num_feature_args += !!ti->num_discard_bios;
1959 num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
1960 num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
1961 if (num_feature_args) {
1962 DMEMIT(" %d", num_feature_args);
1963 if (ti->num_discard_bios)
1964 DMEMIT(" allow_discards");
1965 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
1966 DMEMIT(" same_cpu_crypt");
1967 if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
1968 DMEMIT(" submit_from_crypt_cpus");
1969 }
1970
1971 break;
1972 }
1973}
1974
1975static void crypt_postsuspend(struct dm_target *ti)
1976{
1977 struct crypt_config *cc = ti->private;
1978
1979 set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1980}
1981
1982static int crypt_preresume(struct dm_target *ti)
1983{
1984 struct crypt_config *cc = ti->private;
1985
1986 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
1987 DMERR("aborting resume - crypt key is not set.");
1988 return -EAGAIN;
1989 }
1990
1991 return 0;
1992}
1993
1994static void crypt_resume(struct dm_target *ti)
1995{
1996 struct crypt_config *cc = ti->private;
1997
1998 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1999}
2000
2001
2002
2003
2004
2005static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
2006{
2007 struct crypt_config *cc = ti->private;
2008 int ret = -EINVAL;
2009
2010 if (argc < 2)
2011 goto error;
2012
2013 if (!strcasecmp(argv[0], "key")) {
2014 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
2015 DMWARN("not suspended during key manipulation.");
2016 return -EINVAL;
2017 }
2018 if (argc == 3 && !strcasecmp(argv[1], "set")) {
2019 ret = crypt_set_key(cc, argv[2]);
2020 if (ret)
2021 return ret;
2022 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
2023 ret = cc->iv_gen_ops->init(cc);
2024 return ret;
2025 }
2026 if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
2027 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
2028 ret = cc->iv_gen_ops->wipe(cc);
2029 if (ret)
2030 return ret;
2031 }
2032 return crypt_wipe_key(cc);
2033 }
2034 }
2035
2036error:
2037 DMWARN("unrecognised message received.");
2038 return -EINVAL;
2039}
2040
2041static int crypt_iterate_devices(struct dm_target *ti,
2042 iterate_devices_callout_fn fn, void *data)
2043{
2044 struct crypt_config *cc = ti->private;
2045
2046 return fn(ti, cc->dev, cc->start, ti->len, data);
2047}
2048
2049static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
2050{
2051
2052
2053
2054
2055
2056
2057 limits->max_segment_size = PAGE_SIZE;
2058}
2059
2060static struct target_type crypt_target = {
2061 .name = "crypt",
2062 .version = {1, 14, 1},
2063 .module = THIS_MODULE,
2064 .ctr = crypt_ctr,
2065 .dtr = crypt_dtr,
2066 .map = crypt_map,
2067 .status = crypt_status,
2068 .postsuspend = crypt_postsuspend,
2069 .preresume = crypt_preresume,
2070 .resume = crypt_resume,
2071 .message = crypt_message,
2072 .iterate_devices = crypt_iterate_devices,
2073 .io_hints = crypt_io_hints,
2074};
2075
2076static int __init dm_crypt_init(void)
2077{
2078 int r;
2079
2080 r = dm_register_target(&crypt_target);
2081 if (r < 0)
2082 DMERR("register failed %d", r);
2083
2084 return r;
2085}
2086
2087static void __exit dm_crypt_exit(void)
2088{
2089 dm_unregister_target(&crypt_target);
2090}
2091
2092module_init(dm_crypt_init);
2093module_exit(dm_crypt_exit);
2094
2095MODULE_AUTHOR("Jana Saout <jana@saout.de>");
2096MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
2097MODULE_LICENSE("GPL");
2098