1
2
3
4
5
6
7
8
9
10#include <linux/completion.h>
11#include <linux/err.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/bio.h>
16#include <linux/blkdev.h>
17#include <linux/mempool.h>
18#include <linux/slab.h>
19#include <linux/crypto.h>
20#include <linux/workqueue.h>
21#include <linux/kthread.h>
22#include <linux/backing-dev.h>
23#include <linux/atomic.h>
24#include <linux/scatterlist.h>
25#include <linux/rbtree.h>
26#include <asm/page.h>
27#include <asm/unaligned.h>
28#include <crypto/hash.h>
29#include <crypto/md5.h>
30#include <crypto/algapi.h>
31#include <crypto/skcipher.h>
32
33#include <linux/device-mapper.h>
34
35#define DM_MSG_PREFIX "crypt"
36
37
38
39
40struct convert_context {
41 struct completion restart;
42 struct bio *bio_in;
43 struct bio *bio_out;
44 struct bvec_iter iter_in;
45 struct bvec_iter iter_out;
46 sector_t cc_sector;
47 atomic_t cc_pending;
48 struct skcipher_request *req;
49};
50
51
52
53
54struct dm_crypt_io {
55 struct crypt_config *cc;
56 struct bio *base_bio;
57 struct work_struct work;
58
59 struct convert_context ctx;
60
61 atomic_t io_pending;
62 int error;
63 sector_t sector;
64
65 struct rb_node rb_node;
66} CRYPTO_MINALIGN_ATTR;
67
68struct dm_crypt_request {
69 struct convert_context *ctx;
70 struct scatterlist sg_in;
71 struct scatterlist sg_out;
72 sector_t iv_sector;
73};
74
75struct crypt_config;
76
77struct crypt_iv_operations {
78 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
79 const char *opts);
80 void (*dtr)(struct crypt_config *cc);
81 int (*init)(struct crypt_config *cc);
82 int (*wipe)(struct crypt_config *cc);
83 int (*generator)(struct crypt_config *cc, u8 *iv,
84 struct dm_crypt_request *dmreq);
85 int (*post)(struct crypt_config *cc, u8 *iv,
86 struct dm_crypt_request *dmreq);
87};
88
89struct iv_essiv_private {
90 struct crypto_ahash *hash_tfm;
91 u8 *salt;
92};
93
94struct iv_benbi_private {
95 int shift;
96};
97
98#define LMK_SEED_SIZE 64
99struct iv_lmk_private {
100 struct crypto_shash *hash_tfm;
101 u8 *seed;
102};
103
104#define TCW_WHITENING_SIZE 16
105struct iv_tcw_private {
106 struct crypto_shash *crc32_tfm;
107 u8 *iv_seed;
108 u8 *whitening;
109};
110
111
112
113
114
115enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
116 DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
117 DM_CRYPT_EXIT_THREAD};
118
119
120
121
122struct crypt_config {
123 struct dm_dev *dev;
124 sector_t start;
125
126
127
128
129
130 mempool_t *req_pool;
131 mempool_t *page_pool;
132 struct bio_set *bs;
133 struct mutex bio_alloc_lock;
134
135 struct workqueue_struct *io_queue;
136 struct workqueue_struct *crypt_queue;
137
138 struct task_struct *write_thread;
139 wait_queue_head_t write_thread_wait;
140 struct rb_root write_tree;
141
142 char *cipher;
143 char *cipher_string;
144
145 struct crypt_iv_operations *iv_gen_ops;
146 union {
147 struct iv_essiv_private essiv;
148 struct iv_benbi_private benbi;
149 struct iv_lmk_private lmk;
150 struct iv_tcw_private tcw;
151 } iv_gen_private;
152 sector_t iv_offset;
153 unsigned int iv_size;
154
155
156 void *iv_private;
157 struct crypto_skcipher **tfms;
158 unsigned tfms_count;
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173 unsigned int dmreq_start;
174
175 unsigned int per_bio_data_size;
176
177 unsigned long flags;
178 unsigned int key_size;
179 unsigned int key_parts;
180 unsigned int key_extra_size;
181 u8 key[0];
182};
183
184#define MIN_IOS 16
185
186static void clone_init(struct dm_crypt_io *, struct bio *);
187static void kcryptd_queue_crypt(struct dm_crypt_io *io);
188static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
189
190
191
192
193static struct crypto_skcipher *any_tfm(struct crypt_config *cc)
194{
195 return cc->tfms[0];
196}
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
246 struct dm_crypt_request *dmreq)
247{
248 memset(iv, 0, cc->iv_size);
249 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
250
251 return 0;
252}
253
254static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
255 struct dm_crypt_request *dmreq)
256{
257 memset(iv, 0, cc->iv_size);
258 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
259
260 return 0;
261}
262
263
264static int crypt_iv_essiv_init(struct crypt_config *cc)
265{
266 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
267 AHASH_REQUEST_ON_STACK(req, essiv->hash_tfm);
268 struct scatterlist sg;
269 struct crypto_cipher *essiv_tfm;
270 int err;
271
272 sg_init_one(&sg, cc->key, cc->key_size);
273 ahash_request_set_tfm(req, essiv->hash_tfm);
274 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
275 ahash_request_set_crypt(req, &sg, essiv->salt, cc->key_size);
276
277 err = crypto_ahash_digest(req);
278 ahash_request_zero(req);
279 if (err)
280 return err;
281
282 essiv_tfm = cc->iv_private;
283
284 err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
285 crypto_ahash_digestsize(essiv->hash_tfm));
286 if (err)
287 return err;
288
289 return 0;
290}
291
292
293static int crypt_iv_essiv_wipe(struct crypt_config *cc)
294{
295 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
296 unsigned salt_size = crypto_ahash_digestsize(essiv->hash_tfm);
297 struct crypto_cipher *essiv_tfm;
298 int r, err = 0;
299
300 memset(essiv->salt, 0, salt_size);
301
302 essiv_tfm = cc->iv_private;
303 r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
304 if (r)
305 err = r;
306
307 return err;
308}
309
310
311static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
312 struct dm_target *ti,
313 u8 *salt, unsigned saltsize)
314{
315 struct crypto_cipher *essiv_tfm;
316 int err;
317
318
319 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
320 if (IS_ERR(essiv_tfm)) {
321 ti->error = "Error allocating crypto tfm for ESSIV";
322 return essiv_tfm;
323 }
324
325 if (crypto_cipher_blocksize(essiv_tfm) !=
326 crypto_skcipher_ivsize(any_tfm(cc))) {
327 ti->error = "Block size of ESSIV cipher does "
328 "not match IV size of block cipher";
329 crypto_free_cipher(essiv_tfm);
330 return ERR_PTR(-EINVAL);
331 }
332
333 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
334 if (err) {
335 ti->error = "Failed to set key for ESSIV cipher";
336 crypto_free_cipher(essiv_tfm);
337 return ERR_PTR(err);
338 }
339
340 return essiv_tfm;
341}
342
343static void crypt_iv_essiv_dtr(struct crypt_config *cc)
344{
345 struct crypto_cipher *essiv_tfm;
346 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
347
348 crypto_free_ahash(essiv->hash_tfm);
349 essiv->hash_tfm = NULL;
350
351 kzfree(essiv->salt);
352 essiv->salt = NULL;
353
354 essiv_tfm = cc->iv_private;
355
356 if (essiv_tfm)
357 crypto_free_cipher(essiv_tfm);
358
359 cc->iv_private = NULL;
360}
361
362static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
363 const char *opts)
364{
365 struct crypto_cipher *essiv_tfm = NULL;
366 struct crypto_ahash *hash_tfm = NULL;
367 u8 *salt = NULL;
368 int err;
369
370 if (!opts) {
371 ti->error = "Digest algorithm missing for ESSIV mode";
372 return -EINVAL;
373 }
374
375
376 hash_tfm = crypto_alloc_ahash(opts, 0, CRYPTO_ALG_ASYNC);
377 if (IS_ERR(hash_tfm)) {
378 ti->error = "Error initializing ESSIV hash";
379 err = PTR_ERR(hash_tfm);
380 goto bad;
381 }
382
383 salt = kzalloc(crypto_ahash_digestsize(hash_tfm), GFP_KERNEL);
384 if (!salt) {
385 ti->error = "Error kmallocing salt storage in ESSIV";
386 err = -ENOMEM;
387 goto bad;
388 }
389
390 cc->iv_gen_private.essiv.salt = salt;
391 cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
392
393 essiv_tfm = setup_essiv_cpu(cc, ti, salt,
394 crypto_ahash_digestsize(hash_tfm));
395 if (IS_ERR(essiv_tfm)) {
396 crypt_iv_essiv_dtr(cc);
397 return PTR_ERR(essiv_tfm);
398 }
399 cc->iv_private = essiv_tfm;
400
401 return 0;
402
403bad:
404 if (hash_tfm && !IS_ERR(hash_tfm))
405 crypto_free_ahash(hash_tfm);
406 kfree(salt);
407 return err;
408}
409
410static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
411 struct dm_crypt_request *dmreq)
412{
413 struct crypto_cipher *essiv_tfm = cc->iv_private;
414
415 memset(iv, 0, cc->iv_size);
416 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
417 crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
418
419 return 0;
420}
421
422static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
423 const char *opts)
424{
425 unsigned bs = crypto_skcipher_blocksize(any_tfm(cc));
426 int log = ilog2(bs);
427
428
429
430
431 if (1 << log != bs) {
432 ti->error = "cypher blocksize is not a power of 2";
433 return -EINVAL;
434 }
435
436 if (log > 9) {
437 ti->error = "cypher blocksize is > 512";
438 return -EINVAL;
439 }
440
441 cc->iv_gen_private.benbi.shift = 9 - log;
442
443 return 0;
444}
445
446static void crypt_iv_benbi_dtr(struct crypt_config *cc)
447{
448}
449
450static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
451 struct dm_crypt_request *dmreq)
452{
453 __be64 val;
454
455 memset(iv, 0, cc->iv_size - sizeof(u64));
456
457 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
458 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
459
460 return 0;
461}
462
463static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
464 struct dm_crypt_request *dmreq)
465{
466 memset(iv, 0, cc->iv_size);
467
468 return 0;
469}
470
471static void crypt_iv_lmk_dtr(struct crypt_config *cc)
472{
473 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
474
475 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
476 crypto_free_shash(lmk->hash_tfm);
477 lmk->hash_tfm = NULL;
478
479 kzfree(lmk->seed);
480 lmk->seed = NULL;
481}
482
483static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
484 const char *opts)
485{
486 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
487
488 lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
489 if (IS_ERR(lmk->hash_tfm)) {
490 ti->error = "Error initializing LMK hash";
491 return PTR_ERR(lmk->hash_tfm);
492 }
493
494
495 if (cc->key_parts == cc->tfms_count) {
496 lmk->seed = NULL;
497 return 0;
498 }
499
500 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
501 if (!lmk->seed) {
502 crypt_iv_lmk_dtr(cc);
503 ti->error = "Error kmallocing seed storage in LMK";
504 return -ENOMEM;
505 }
506
507 return 0;
508}
509
510static int crypt_iv_lmk_init(struct crypt_config *cc)
511{
512 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
513 int subkey_size = cc->key_size / cc->key_parts;
514
515
516 if (lmk->seed)
517 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
518 crypto_shash_digestsize(lmk->hash_tfm));
519
520 return 0;
521}
522
523static int crypt_iv_lmk_wipe(struct crypt_config *cc)
524{
525 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
526
527 if (lmk->seed)
528 memset(lmk->seed, 0, LMK_SEED_SIZE);
529
530 return 0;
531}
532
533static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
534 struct dm_crypt_request *dmreq,
535 u8 *data)
536{
537 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
538 SHASH_DESC_ON_STACK(desc, lmk->hash_tfm);
539 struct md5_state md5state;
540 __le32 buf[4];
541 int i, r;
542
543 desc->tfm = lmk->hash_tfm;
544 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
545
546 r = crypto_shash_init(desc);
547 if (r)
548 return r;
549
550 if (lmk->seed) {
551 r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE);
552 if (r)
553 return r;
554 }
555
556
557 r = crypto_shash_update(desc, data + 16, 16 * 31);
558 if (r)
559 return r;
560
561
562 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
563 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
564 buf[2] = cpu_to_le32(4024);
565 buf[3] = 0;
566 r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf));
567 if (r)
568 return r;
569
570
571 r = crypto_shash_export(desc, &md5state);
572 if (r)
573 return r;
574
575 for (i = 0; i < MD5_HASH_WORDS; i++)
576 __cpu_to_le32s(&md5state.hash[i]);
577 memcpy(iv, &md5state.hash, cc->iv_size);
578
579 return 0;
580}
581
582static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
583 struct dm_crypt_request *dmreq)
584{
585 u8 *src;
586 int r = 0;
587
588 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
589 src = kmap_atomic(sg_page(&dmreq->sg_in));
590 r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset);
591 kunmap_atomic(src);
592 } else
593 memset(iv, 0, cc->iv_size);
594
595 return r;
596}
597
598static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
599 struct dm_crypt_request *dmreq)
600{
601 u8 *dst;
602 int r;
603
604 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
605 return 0;
606
607 dst = kmap_atomic(sg_page(&dmreq->sg_out));
608 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset);
609
610
611 if (!r)
612 crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size);
613
614 kunmap_atomic(dst);
615 return r;
616}
617
618static void crypt_iv_tcw_dtr(struct crypt_config *cc)
619{
620 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
621
622 kzfree(tcw->iv_seed);
623 tcw->iv_seed = NULL;
624 kzfree(tcw->whitening);
625 tcw->whitening = NULL;
626
627 if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
628 crypto_free_shash(tcw->crc32_tfm);
629 tcw->crc32_tfm = NULL;
630}
631
632static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
633 const char *opts)
634{
635 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
636
637 if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
638 ti->error = "Wrong key size for TCW";
639 return -EINVAL;
640 }
641
642 tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0);
643 if (IS_ERR(tcw->crc32_tfm)) {
644 ti->error = "Error initializing CRC32 in TCW";
645 return PTR_ERR(tcw->crc32_tfm);
646 }
647
648 tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
649 tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
650 if (!tcw->iv_seed || !tcw->whitening) {
651 crypt_iv_tcw_dtr(cc);
652 ti->error = "Error allocating seed storage in TCW";
653 return -ENOMEM;
654 }
655
656 return 0;
657}
658
659static int crypt_iv_tcw_init(struct crypt_config *cc)
660{
661 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
662 int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
663
664 memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
665 memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
666 TCW_WHITENING_SIZE);
667
668 return 0;
669}
670
671static int crypt_iv_tcw_wipe(struct crypt_config *cc)
672{
673 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
674
675 memset(tcw->iv_seed, 0, cc->iv_size);
676 memset(tcw->whitening, 0, TCW_WHITENING_SIZE);
677
678 return 0;
679}
680
681static int crypt_iv_tcw_whitening(struct crypt_config *cc,
682 struct dm_crypt_request *dmreq,
683 u8 *data)
684{
685 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
686 u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
687 u8 buf[TCW_WHITENING_SIZE];
688 SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm);
689 int i, r;
690
691
692 memcpy(buf, tcw->whitening, TCW_WHITENING_SIZE);
693 crypto_xor(buf, (u8 *)§or, 8);
694 crypto_xor(&buf[8], (u8 *)§or, 8);
695
696
697 desc->tfm = tcw->crc32_tfm;
698 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
699 for (i = 0; i < 4; i++) {
700 r = crypto_shash_init(desc);
701 if (r)
702 goto out;
703 r = crypto_shash_update(desc, &buf[i * 4], 4);
704 if (r)
705 goto out;
706 r = crypto_shash_final(desc, &buf[i * 4]);
707 if (r)
708 goto out;
709 }
710 crypto_xor(&buf[0], &buf[12], 4);
711 crypto_xor(&buf[4], &buf[8], 4);
712
713
714 for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
715 crypto_xor(data + i * 8, buf, 8);
716out:
717 memzero_explicit(buf, sizeof(buf));
718 return r;
719}
720
721static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
722 struct dm_crypt_request *dmreq)
723{
724 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
725 u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
726 u8 *src;
727 int r = 0;
728
729
730 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
731 src = kmap_atomic(sg_page(&dmreq->sg_in));
732 r = crypt_iv_tcw_whitening(cc, dmreq, src + dmreq->sg_in.offset);
733 kunmap_atomic(src);
734 }
735
736
737 memcpy(iv, tcw->iv_seed, cc->iv_size);
738 crypto_xor(iv, (u8 *)§or, 8);
739 if (cc->iv_size > 8)
740 crypto_xor(&iv[8], (u8 *)§or, cc->iv_size - 8);
741
742 return r;
743}
744
745static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
746 struct dm_crypt_request *dmreq)
747{
748 u8 *dst;
749 int r;
750
751 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
752 return 0;
753
754
755 dst = kmap_atomic(sg_page(&dmreq->sg_out));
756 r = crypt_iv_tcw_whitening(cc, dmreq, dst + dmreq->sg_out.offset);
757 kunmap_atomic(dst);
758
759 return r;
760}
761
762static struct crypt_iv_operations crypt_iv_plain_ops = {
763 .generator = crypt_iv_plain_gen
764};
765
766static struct crypt_iv_operations crypt_iv_plain64_ops = {
767 .generator = crypt_iv_plain64_gen
768};
769
770static struct crypt_iv_operations crypt_iv_essiv_ops = {
771 .ctr = crypt_iv_essiv_ctr,
772 .dtr = crypt_iv_essiv_dtr,
773 .init = crypt_iv_essiv_init,
774 .wipe = crypt_iv_essiv_wipe,
775 .generator = crypt_iv_essiv_gen
776};
777
778static struct crypt_iv_operations crypt_iv_benbi_ops = {
779 .ctr = crypt_iv_benbi_ctr,
780 .dtr = crypt_iv_benbi_dtr,
781 .generator = crypt_iv_benbi_gen
782};
783
784static struct crypt_iv_operations crypt_iv_null_ops = {
785 .generator = crypt_iv_null_gen
786};
787
788static struct crypt_iv_operations crypt_iv_lmk_ops = {
789 .ctr = crypt_iv_lmk_ctr,
790 .dtr = crypt_iv_lmk_dtr,
791 .init = crypt_iv_lmk_init,
792 .wipe = crypt_iv_lmk_wipe,
793 .generator = crypt_iv_lmk_gen,
794 .post = crypt_iv_lmk_post
795};
796
797static struct crypt_iv_operations crypt_iv_tcw_ops = {
798 .ctr = crypt_iv_tcw_ctr,
799 .dtr = crypt_iv_tcw_dtr,
800 .init = crypt_iv_tcw_init,
801 .wipe = crypt_iv_tcw_wipe,
802 .generator = crypt_iv_tcw_gen,
803 .post = crypt_iv_tcw_post
804};
805
806static void crypt_convert_init(struct crypt_config *cc,
807 struct convert_context *ctx,
808 struct bio *bio_out, struct bio *bio_in,
809 sector_t sector)
810{
811 ctx->bio_in = bio_in;
812 ctx->bio_out = bio_out;
813 if (bio_in)
814 ctx->iter_in = bio_in->bi_iter;
815 if (bio_out)
816 ctx->iter_out = bio_out->bi_iter;
817 ctx->cc_sector = sector + cc->iv_offset;
818 init_completion(&ctx->restart);
819}
820
821static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
822 struct skcipher_request *req)
823{
824 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
825}
826
827static struct skcipher_request *req_of_dmreq(struct crypt_config *cc,
828 struct dm_crypt_request *dmreq)
829{
830 return (struct skcipher_request *)((char *)dmreq - cc->dmreq_start);
831}
832
833static u8 *iv_of_dmreq(struct crypt_config *cc,
834 struct dm_crypt_request *dmreq)
835{
836 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
837 crypto_skcipher_alignmask(any_tfm(cc)) + 1);
838}
839
840static int crypt_convert_block(struct crypt_config *cc,
841 struct convert_context *ctx,
842 struct skcipher_request *req)
843{
844 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
845 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
846 struct dm_crypt_request *dmreq;
847 u8 *iv;
848 int r;
849
850 dmreq = dmreq_of_req(cc, req);
851 iv = iv_of_dmreq(cc, dmreq);
852
853 dmreq->iv_sector = ctx->cc_sector;
854 dmreq->ctx = ctx;
855 sg_init_table(&dmreq->sg_in, 1);
856 sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT,
857 bv_in.bv_offset);
858
859 sg_init_table(&dmreq->sg_out, 1);
860 sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT,
861 bv_out.bv_offset);
862
863 bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT);
864 bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT);
865
866 if (cc->iv_gen_ops) {
867 r = cc->iv_gen_ops->generator(cc, iv, dmreq);
868 if (r < 0)
869 return r;
870 }
871
872 skcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
873 1 << SECTOR_SHIFT, iv);
874
875 if (bio_data_dir(ctx->bio_in) == WRITE)
876 r = crypto_skcipher_encrypt(req);
877 else
878 r = crypto_skcipher_decrypt(req);
879
880 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
881 r = cc->iv_gen_ops->post(cc, iv, dmreq);
882
883 return r;
884}
885
886static void kcryptd_async_done(struct crypto_async_request *async_req,
887 int error);
888
889static void crypt_alloc_req(struct crypt_config *cc,
890 struct convert_context *ctx)
891{
892 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
893
894 if (!ctx->req)
895 ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
896
897 skcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
898
899
900
901
902
903 skcipher_request_set_callback(ctx->req,
904 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
905 kcryptd_async_done, dmreq_of_req(cc, ctx->req));
906}
907
908static void crypt_free_req(struct crypt_config *cc,
909 struct skcipher_request *req, struct bio *base_bio)
910{
911 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
912
913 if ((struct skcipher_request *)(io + 1) != req)
914 mempool_free(req, cc->req_pool);
915}
916
917
918
919
920static int crypt_convert(struct crypt_config *cc,
921 struct convert_context *ctx)
922{
923 int r;
924
925 atomic_set(&ctx->cc_pending, 1);
926
927 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
928
929 crypt_alloc_req(cc, ctx);
930
931 atomic_inc(&ctx->cc_pending);
932
933 r = crypt_convert_block(cc, ctx, ctx->req);
934
935 switch (r) {
936
937
938
939
940 case -EBUSY:
941 wait_for_completion(&ctx->restart);
942 reinit_completion(&ctx->restart);
943
944
945
946
947
948 case -EINPROGRESS:
949 ctx->req = NULL;
950 ctx->cc_sector++;
951 continue;
952
953
954
955 case 0:
956 atomic_dec(&ctx->cc_pending);
957 ctx->cc_sector++;
958 cond_resched();
959 continue;
960
961
962 default:
963 atomic_dec(&ctx->cc_pending);
964 return r;
965 }
966 }
967
968 return 0;
969}
970
971static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
991{
992 struct crypt_config *cc = io->cc;
993 struct bio *clone;
994 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
995 gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
996 unsigned i, len, remaining_size;
997 struct page *page;
998 struct bio_vec *bvec;
999
1000retry:
1001 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
1002 mutex_lock(&cc->bio_alloc_lock);
1003
1004 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
1005 if (!clone)
1006 goto return_clone;
1007
1008 clone_init(io, clone);
1009
1010 remaining_size = size;
1011
1012 for (i = 0; i < nr_iovecs; i++) {
1013 page = mempool_alloc(cc->page_pool, gfp_mask);
1014 if (!page) {
1015 crypt_free_buffer_pages(cc, clone);
1016 bio_put(clone);
1017 gfp_mask |= __GFP_DIRECT_RECLAIM;
1018 goto retry;
1019 }
1020
1021 len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
1022
1023 bvec = &clone->bi_io_vec[clone->bi_vcnt++];
1024 bvec->bv_page = page;
1025 bvec->bv_len = len;
1026 bvec->bv_offset = 0;
1027
1028 clone->bi_iter.bi_size += len;
1029
1030 remaining_size -= len;
1031 }
1032
1033return_clone:
1034 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
1035 mutex_unlock(&cc->bio_alloc_lock);
1036
1037 return clone;
1038}
1039
1040static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
1041{
1042 unsigned int i;
1043 struct bio_vec *bv;
1044
1045 bio_for_each_segment_all(bv, clone, i) {
1046 BUG_ON(!bv->bv_page);
1047 mempool_free(bv->bv_page, cc->page_pool);
1048 bv->bv_page = NULL;
1049 }
1050}
1051
1052static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
1053 struct bio *bio, sector_t sector)
1054{
1055 io->cc = cc;
1056 io->base_bio = bio;
1057 io->sector = sector;
1058 io->error = 0;
1059 io->ctx.req = NULL;
1060 atomic_set(&io->io_pending, 0);
1061}
1062
1063static void crypt_inc_pending(struct dm_crypt_io *io)
1064{
1065 atomic_inc(&io->io_pending);
1066}
1067
1068
1069
1070
1071
1072static void crypt_dec_pending(struct dm_crypt_io *io)
1073{
1074 struct crypt_config *cc = io->cc;
1075 struct bio *base_bio = io->base_bio;
1076 int error = io->error;
1077
1078 if (!atomic_dec_and_test(&io->io_pending))
1079 return;
1080
1081 if (io->ctx.req)
1082 crypt_free_req(cc, io->ctx.req, base_bio);
1083
1084 base_bio->bi_error = error;
1085 bio_endio(base_bio);
1086}
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105static void crypt_endio(struct bio *clone)
1106{
1107 struct dm_crypt_io *io = clone->bi_private;
1108 struct crypt_config *cc = io->cc;
1109 unsigned rw = bio_data_dir(clone);
1110 int error;
1111
1112
1113
1114
1115 if (rw == WRITE)
1116 crypt_free_buffer_pages(cc, clone);
1117
1118 error = clone->bi_error;
1119 bio_put(clone);
1120
1121 if (rw == READ && !error) {
1122 kcryptd_queue_crypt(io);
1123 return;
1124 }
1125
1126 if (unlikely(error))
1127 io->error = error;
1128
1129 crypt_dec_pending(io);
1130}
1131
1132static void clone_init(struct dm_crypt_io *io, struct bio *clone)
1133{
1134 struct crypt_config *cc = io->cc;
1135
1136 clone->bi_private = io;
1137 clone->bi_end_io = crypt_endio;
1138 clone->bi_bdev = cc->dev->bdev;
1139 clone->bi_rw = io->base_bio->bi_rw;
1140}
1141
1142static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
1143{
1144 struct crypt_config *cc = io->cc;
1145 struct bio *clone;
1146
1147
1148
1149
1150
1151
1152
1153 clone = bio_clone_fast(io->base_bio, gfp, cc->bs);
1154 if (!clone)
1155 return 1;
1156
1157 crypt_inc_pending(io);
1158
1159 clone_init(io, clone);
1160 clone->bi_iter.bi_sector = cc->start + io->sector;
1161
1162 generic_make_request(clone);
1163 return 0;
1164}
1165
1166static void kcryptd_io_read_work(struct work_struct *work)
1167{
1168 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1169
1170 crypt_inc_pending(io);
1171 if (kcryptd_io_read(io, GFP_NOIO))
1172 io->error = -ENOMEM;
1173 crypt_dec_pending(io);
1174}
1175
1176static void kcryptd_queue_read(struct dm_crypt_io *io)
1177{
1178 struct crypt_config *cc = io->cc;
1179
1180 INIT_WORK(&io->work, kcryptd_io_read_work);
1181 queue_work(cc->io_queue, &io->work);
1182}
1183
1184static void kcryptd_io_write(struct dm_crypt_io *io)
1185{
1186 struct bio *clone = io->ctx.bio_out;
1187
1188 generic_make_request(clone);
1189}
1190
1191#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
1192
1193static int dmcrypt_write(void *data)
1194{
1195 struct crypt_config *cc = data;
1196 struct dm_crypt_io *io;
1197
1198 while (1) {
1199 struct rb_root write_tree;
1200 struct blk_plug plug;
1201
1202 DECLARE_WAITQUEUE(wait, current);
1203
1204 spin_lock_irq(&cc->write_thread_wait.lock);
1205continue_locked:
1206
1207 if (!RB_EMPTY_ROOT(&cc->write_tree))
1208 goto pop_from_list;
1209
1210 if (unlikely(test_bit(DM_CRYPT_EXIT_THREAD, &cc->flags))) {
1211 spin_unlock_irq(&cc->write_thread_wait.lock);
1212 break;
1213 }
1214
1215 __set_current_state(TASK_INTERRUPTIBLE);
1216 __add_wait_queue(&cc->write_thread_wait, &wait);
1217
1218 spin_unlock_irq(&cc->write_thread_wait.lock);
1219
1220 schedule();
1221
1222 spin_lock_irq(&cc->write_thread_wait.lock);
1223 __remove_wait_queue(&cc->write_thread_wait, &wait);
1224 goto continue_locked;
1225
1226pop_from_list:
1227 write_tree = cc->write_tree;
1228 cc->write_tree = RB_ROOT;
1229 spin_unlock_irq(&cc->write_thread_wait.lock);
1230
1231 BUG_ON(rb_parent(write_tree.rb_node));
1232
1233
1234
1235
1236
1237 blk_start_plug(&plug);
1238 do {
1239 io = crypt_io_from_node(rb_first(&write_tree));
1240 rb_erase(&io->rb_node, &write_tree);
1241 kcryptd_io_write(io);
1242 } while (!RB_EMPTY_ROOT(&write_tree));
1243 blk_finish_plug(&plug);
1244 }
1245 return 0;
1246}
1247
1248static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
1249{
1250 struct bio *clone = io->ctx.bio_out;
1251 struct crypt_config *cc = io->cc;
1252 unsigned long flags;
1253 sector_t sector;
1254 struct rb_node **rbp, *parent;
1255
1256 if (unlikely(io->error < 0)) {
1257 crypt_free_buffer_pages(cc, clone);
1258 bio_put(clone);
1259 crypt_dec_pending(io);
1260 return;
1261 }
1262
1263
1264 BUG_ON(io->ctx.iter_out.bi_size);
1265
1266 clone->bi_iter.bi_sector = cc->start + io->sector;
1267
1268 if (likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) {
1269 generic_make_request(clone);
1270 return;
1271 }
1272
1273 spin_lock_irqsave(&cc->write_thread_wait.lock, flags);
1274 rbp = &cc->write_tree.rb_node;
1275 parent = NULL;
1276 sector = io->sector;
1277 while (*rbp) {
1278 parent = *rbp;
1279 if (sector < crypt_io_from_node(parent)->sector)
1280 rbp = &(*rbp)->rb_left;
1281 else
1282 rbp = &(*rbp)->rb_right;
1283 }
1284 rb_link_node(&io->rb_node, parent, rbp);
1285 rb_insert_color(&io->rb_node, &cc->write_tree);
1286
1287 wake_up_locked(&cc->write_thread_wait);
1288 spin_unlock_irqrestore(&cc->write_thread_wait.lock, flags);
1289}
1290
1291static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1292{
1293 struct crypt_config *cc = io->cc;
1294 struct bio *clone;
1295 int crypt_finished;
1296 sector_t sector = io->sector;
1297 int r;
1298
1299
1300
1301
1302 crypt_inc_pending(io);
1303 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
1304
1305 clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
1306 if (unlikely(!clone)) {
1307 io->error = -EIO;
1308 goto dec;
1309 }
1310
1311 io->ctx.bio_out = clone;
1312 io->ctx.iter_out = clone->bi_iter;
1313
1314 sector += bio_sectors(clone);
1315
1316 crypt_inc_pending(io);
1317 r = crypt_convert(cc, &io->ctx);
1318 if (r)
1319 io->error = -EIO;
1320 crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
1321
1322
1323 if (crypt_finished) {
1324 kcryptd_crypt_write_io_submit(io, 0);
1325 io->sector = sector;
1326 }
1327
1328dec:
1329 crypt_dec_pending(io);
1330}
1331
1332static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
1333{
1334 crypt_dec_pending(io);
1335}
1336
1337static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
1338{
1339 struct crypt_config *cc = io->cc;
1340 int r = 0;
1341
1342 crypt_inc_pending(io);
1343
1344 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
1345 io->sector);
1346
1347 r = crypt_convert(cc, &io->ctx);
1348 if (r < 0)
1349 io->error = -EIO;
1350
1351 if (atomic_dec_and_test(&io->ctx.cc_pending))
1352 kcryptd_crypt_read_done(io);
1353
1354 crypt_dec_pending(io);
1355}
1356
1357static void kcryptd_async_done(struct crypto_async_request *async_req,
1358 int error)
1359{
1360 struct dm_crypt_request *dmreq = async_req->data;
1361 struct convert_context *ctx = dmreq->ctx;
1362 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
1363 struct crypt_config *cc = io->cc;
1364
1365
1366
1367
1368
1369
1370 if (error == -EINPROGRESS) {
1371 complete(&ctx->restart);
1372 return;
1373 }
1374
1375 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
1376 error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
1377
1378 if (error < 0)
1379 io->error = -EIO;
1380
1381 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
1382
1383 if (!atomic_dec_and_test(&ctx->cc_pending))
1384 return;
1385
1386 if (bio_data_dir(io->base_bio) == READ)
1387 kcryptd_crypt_read_done(io);
1388 else
1389 kcryptd_crypt_write_io_submit(io, 1);
1390}
1391
1392static void kcryptd_crypt(struct work_struct *work)
1393{
1394 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1395
1396 if (bio_data_dir(io->base_bio) == READ)
1397 kcryptd_crypt_read_convert(io);
1398 else
1399 kcryptd_crypt_write_convert(io);
1400}
1401
1402static void kcryptd_queue_crypt(struct dm_crypt_io *io)
1403{
1404 struct crypt_config *cc = io->cc;
1405
1406 INIT_WORK(&io->work, kcryptd_crypt);
1407 queue_work(cc->crypt_queue, &io->work);
1408}
1409
1410
1411
1412
1413static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
1414{
1415 char buffer[3];
1416 unsigned int i;
1417
1418 buffer[2] = '\0';
1419
1420 for (i = 0; i < size; i++) {
1421 buffer[0] = *hex++;
1422 buffer[1] = *hex++;
1423
1424 if (kstrtou8(buffer, 16, &key[i]))
1425 return -EINVAL;
1426 }
1427
1428 if (*hex != '\0')
1429 return -EINVAL;
1430
1431 return 0;
1432}
1433
1434static void crypt_free_tfms(struct crypt_config *cc)
1435{
1436 unsigned i;
1437
1438 if (!cc->tfms)
1439 return;
1440
1441 for (i = 0; i < cc->tfms_count; i++)
1442 if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) {
1443 crypto_free_skcipher(cc->tfms[i]);
1444 cc->tfms[i] = NULL;
1445 }
1446
1447 kfree(cc->tfms);
1448 cc->tfms = NULL;
1449}
1450
1451static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
1452{
1453 unsigned i;
1454 int err;
1455
1456 cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_skcipher *),
1457 GFP_KERNEL);
1458 if (!cc->tfms)
1459 return -ENOMEM;
1460
1461 for (i = 0; i < cc->tfms_count; i++) {
1462 cc->tfms[i] = crypto_alloc_skcipher(ciphermode, 0, 0);
1463 if (IS_ERR(cc->tfms[i])) {
1464 err = PTR_ERR(cc->tfms[i]);
1465 crypt_free_tfms(cc);
1466 return err;
1467 }
1468 }
1469
1470 return 0;
1471}
1472
1473static int crypt_setkey_allcpus(struct crypt_config *cc)
1474{
1475 unsigned subkey_size;
1476 int err = 0, i, r;
1477
1478
1479 subkey_size = (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
1480
1481 for (i = 0; i < cc->tfms_count; i++) {
1482 r = crypto_skcipher_setkey(cc->tfms[i],
1483 cc->key + (i * subkey_size),
1484 subkey_size);
1485 if (r)
1486 err = r;
1487 }
1488
1489 return err;
1490}
1491
1492static int crypt_set_key(struct crypt_config *cc, char *key)
1493{
1494 int r = -EINVAL;
1495 int key_string_len = strlen(key);
1496
1497
1498 if (cc->key_size != (key_string_len >> 1))
1499 goto out;
1500
1501
1502 if (!cc->key_size && strcmp(key, "-"))
1503 goto out;
1504
1505 if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
1506 goto out;
1507
1508 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1509
1510 r = crypt_setkey_allcpus(cc);
1511
1512out:
1513
1514 memset(key, '0', key_string_len);
1515
1516 return r;
1517}
1518
1519static int crypt_wipe_key(struct crypt_config *cc)
1520{
1521 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1522 memset(&cc->key, 0, cc->key_size * sizeof(u8));
1523
1524 return crypt_setkey_allcpus(cc);
1525}
1526
1527static void crypt_dtr(struct dm_target *ti)
1528{
1529 struct crypt_config *cc = ti->private;
1530
1531 ti->private = NULL;
1532
1533 if (!cc)
1534 return;
1535
1536 if (cc->write_thread) {
1537 spin_lock_irq(&cc->write_thread_wait.lock);
1538 set_bit(DM_CRYPT_EXIT_THREAD, &cc->flags);
1539 wake_up_locked(&cc->write_thread_wait);
1540 spin_unlock_irq(&cc->write_thread_wait.lock);
1541 kthread_stop(cc->write_thread);
1542 }
1543
1544 if (cc->io_queue)
1545 destroy_workqueue(cc->io_queue);
1546 if (cc->crypt_queue)
1547 destroy_workqueue(cc->crypt_queue);
1548
1549 crypt_free_tfms(cc);
1550
1551 if (cc->bs)
1552 bioset_free(cc->bs);
1553
1554 mempool_destroy(cc->page_pool);
1555 mempool_destroy(cc->req_pool);
1556
1557 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
1558 cc->iv_gen_ops->dtr(cc);
1559
1560 if (cc->dev)
1561 dm_put_device(ti, cc->dev);
1562
1563 kzfree(cc->cipher);
1564 kzfree(cc->cipher_string);
1565
1566
1567 kzfree(cc);
1568}
1569
1570static int crypt_ctr_cipher(struct dm_target *ti,
1571 char *cipher_in, char *key)
1572{
1573 struct crypt_config *cc = ti->private;
1574 char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount;
1575 char *cipher_api = NULL;
1576 int ret = -EINVAL;
1577 char dummy;
1578
1579
1580 if (strchr(cipher_in, '(')) {
1581 ti->error = "Bad cipher specification";
1582 return -EINVAL;
1583 }
1584
1585 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
1586 if (!cc->cipher_string)
1587 goto bad_mem;
1588
1589
1590
1591
1592
1593 tmp = cipher_in;
1594 keycount = strsep(&tmp, "-");
1595 cipher = strsep(&keycount, ":");
1596
1597 if (!keycount)
1598 cc->tfms_count = 1;
1599 else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 ||
1600 !is_power_of_2(cc->tfms_count)) {
1601 ti->error = "Bad cipher key count specification";
1602 return -EINVAL;
1603 }
1604 cc->key_parts = cc->tfms_count;
1605 cc->key_extra_size = 0;
1606
1607 cc->cipher = kstrdup(cipher, GFP_KERNEL);
1608 if (!cc->cipher)
1609 goto bad_mem;
1610
1611 chainmode = strsep(&tmp, "-");
1612 ivopts = strsep(&tmp, "-");
1613 ivmode = strsep(&ivopts, ":");
1614
1615 if (tmp)
1616 DMWARN("Ignoring unexpected additional cipher options");
1617
1618
1619
1620
1621
1622 if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) {
1623 chainmode = "cbc";
1624 ivmode = "plain";
1625 }
1626
1627 if (strcmp(chainmode, "ecb") && !ivmode) {
1628 ti->error = "IV mechanism required";
1629 return -EINVAL;
1630 }
1631
1632 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
1633 if (!cipher_api)
1634 goto bad_mem;
1635
1636 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
1637 "%s(%s)", chainmode, cipher);
1638 if (ret < 0) {
1639 kfree(cipher_api);
1640 goto bad_mem;
1641 }
1642
1643
1644 ret = crypt_alloc_tfms(cc, cipher_api);
1645 if (ret < 0) {
1646 ti->error = "Error allocating crypto tfm";
1647 goto bad;
1648 }
1649
1650
1651 cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
1652 if (cc->iv_size)
1653
1654 cc->iv_size = max(cc->iv_size,
1655 (unsigned int)(sizeof(u64) / sizeof(u8)));
1656 else if (ivmode) {
1657 DMWARN("Selected cipher does not support IVs");
1658 ivmode = NULL;
1659 }
1660
1661
1662 if (ivmode == NULL)
1663 cc->iv_gen_ops = NULL;
1664 else if (strcmp(ivmode, "plain") == 0)
1665 cc->iv_gen_ops = &crypt_iv_plain_ops;
1666 else if (strcmp(ivmode, "plain64") == 0)
1667 cc->iv_gen_ops = &crypt_iv_plain64_ops;
1668 else if (strcmp(ivmode, "essiv") == 0)
1669 cc->iv_gen_ops = &crypt_iv_essiv_ops;
1670 else if (strcmp(ivmode, "benbi") == 0)
1671 cc->iv_gen_ops = &crypt_iv_benbi_ops;
1672 else if (strcmp(ivmode, "null") == 0)
1673 cc->iv_gen_ops = &crypt_iv_null_ops;
1674 else if (strcmp(ivmode, "lmk") == 0) {
1675 cc->iv_gen_ops = &crypt_iv_lmk_ops;
1676
1677
1678
1679
1680
1681
1682 if (cc->key_size % cc->key_parts) {
1683 cc->key_parts++;
1684 cc->key_extra_size = cc->key_size / cc->key_parts;
1685 }
1686 } else if (strcmp(ivmode, "tcw") == 0) {
1687 cc->iv_gen_ops = &crypt_iv_tcw_ops;
1688 cc->key_parts += 2;
1689 cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
1690 } else {
1691 ret = -EINVAL;
1692 ti->error = "Invalid IV mode";
1693 goto bad;
1694 }
1695
1696
1697 ret = crypt_set_key(cc, key);
1698 if (ret < 0) {
1699 ti->error = "Error decoding and setting key";
1700 goto bad;
1701 }
1702
1703
1704 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
1705 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
1706 if (ret < 0) {
1707 ti->error = "Error creating IV";
1708 goto bad;
1709 }
1710 }
1711
1712
1713 if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
1714 ret = cc->iv_gen_ops->init(cc);
1715 if (ret < 0) {
1716 ti->error = "Error initialising IV";
1717 goto bad;
1718 }
1719 }
1720
1721 ret = 0;
1722bad:
1723 kfree(cipher_api);
1724 return ret;
1725
1726bad_mem:
1727 ti->error = "Cannot allocate cipher strings";
1728 return -ENOMEM;
1729}
1730
1731
1732
1733
1734
1735static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1736{
1737 struct crypt_config *cc;
1738 unsigned int key_size, opt_params;
1739 unsigned long long tmpll;
1740 int ret;
1741 size_t iv_size_padding;
1742 struct dm_arg_set as;
1743 const char *opt_string;
1744 char dummy;
1745
1746 static struct dm_arg _args[] = {
1747 {0, 3, "Invalid number of feature args"},
1748 };
1749
1750 if (argc < 5) {
1751 ti->error = "Not enough arguments";
1752 return -EINVAL;
1753 }
1754
1755 key_size = strlen(argv[1]) >> 1;
1756
1757 cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
1758 if (!cc) {
1759 ti->error = "Cannot allocate encryption context";
1760 return -ENOMEM;
1761 }
1762 cc->key_size = key_size;
1763
1764 ti->private = cc;
1765 ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
1766 if (ret < 0)
1767 goto bad;
1768
1769 cc->dmreq_start = sizeof(struct skcipher_request);
1770 cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc));
1771 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
1772
1773 if (crypto_skcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) {
1774
1775 iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
1776 & crypto_skcipher_alignmask(any_tfm(cc));
1777 } else {
1778
1779
1780
1781
1782
1783 iv_size_padding = crypto_skcipher_alignmask(any_tfm(cc));
1784 }
1785
1786 ret = -ENOMEM;
1787 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
1788 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size);
1789 if (!cc->req_pool) {
1790 ti->error = "Cannot allocate crypt request mempool";
1791 goto bad;
1792 }
1793
1794 cc->per_bio_data_size = ti->per_io_data_size =
1795 ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start +
1796 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size,
1797 ARCH_KMALLOC_MINALIGN);
1798
1799 cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0);
1800 if (!cc->page_pool) {
1801 ti->error = "Cannot allocate page mempool";
1802 goto bad;
1803 }
1804
1805 cc->bs = bioset_create(MIN_IOS, 0);
1806 if (!cc->bs) {
1807 ti->error = "Cannot allocate crypt bioset";
1808 goto bad;
1809 }
1810
1811 mutex_init(&cc->bio_alloc_lock);
1812
1813 ret = -EINVAL;
1814 if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) {
1815 ti->error = "Invalid iv_offset sector";
1816 goto bad;
1817 }
1818 cc->iv_offset = tmpll;
1819
1820 ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev);
1821 if (ret) {
1822 ti->error = "Device lookup failed";
1823 goto bad;
1824 }
1825
1826 ret = -EINVAL;
1827 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
1828 ti->error = "Invalid device sector";
1829 goto bad;
1830 }
1831 cc->start = tmpll;
1832
1833 argv += 5;
1834 argc -= 5;
1835
1836
1837 if (argc) {
1838 as.argc = argc;
1839 as.argv = argv;
1840
1841 ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
1842 if (ret)
1843 goto bad;
1844
1845 ret = -EINVAL;
1846 while (opt_params--) {
1847 opt_string = dm_shift_arg(&as);
1848 if (!opt_string) {
1849 ti->error = "Not enough feature arguments";
1850 goto bad;
1851 }
1852
1853 if (!strcasecmp(opt_string, "allow_discards"))
1854 ti->num_discard_bios = 1;
1855
1856 else if (!strcasecmp(opt_string, "same_cpu_crypt"))
1857 set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
1858
1859 else if (!strcasecmp(opt_string, "submit_from_crypt_cpus"))
1860 set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
1861
1862 else {
1863 ti->error = "Invalid feature arguments";
1864 goto bad;
1865 }
1866 }
1867 }
1868
1869 ret = -ENOMEM;
1870 cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1);
1871 if (!cc->io_queue) {
1872 ti->error = "Couldn't create kcryptd io queue";
1873 goto bad;
1874 }
1875
1876 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
1877 cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
1878 else
1879 cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
1880 num_online_cpus());
1881 if (!cc->crypt_queue) {
1882 ti->error = "Couldn't create kcryptd queue";
1883 goto bad;
1884 }
1885
1886 init_waitqueue_head(&cc->write_thread_wait);
1887 cc->write_tree = RB_ROOT;
1888
1889 cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write");
1890 if (IS_ERR(cc->write_thread)) {
1891 ret = PTR_ERR(cc->write_thread);
1892 cc->write_thread = NULL;
1893 ti->error = "Couldn't spawn write thread";
1894 goto bad;
1895 }
1896 wake_up_process(cc->write_thread);
1897
1898 ti->num_flush_bios = 1;
1899 ti->discard_zeroes_data_unsupported = true;
1900
1901 return 0;
1902
1903bad:
1904 crypt_dtr(ti);
1905 return ret;
1906}
1907
1908static int crypt_map(struct dm_target *ti, struct bio *bio)
1909{
1910 struct dm_crypt_io *io;
1911 struct crypt_config *cc = ti->private;
1912
1913
1914
1915
1916
1917
1918 if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
1919 bio->bi_bdev = cc->dev->bdev;
1920 if (bio_sectors(bio))
1921 bio->bi_iter.bi_sector = cc->start +
1922 dm_target_offset(ti, bio->bi_iter.bi_sector);
1923 return DM_MAPIO_REMAPPED;
1924 }
1925
1926 io = dm_per_bio_data(bio, cc->per_bio_data_size);
1927 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
1928 io->ctx.req = (struct skcipher_request *)(io + 1);
1929
1930 if (bio_data_dir(io->base_bio) == READ) {
1931 if (kcryptd_io_read(io, GFP_NOWAIT))
1932 kcryptd_queue_read(io);
1933 } else
1934 kcryptd_queue_crypt(io);
1935
1936 return DM_MAPIO_SUBMITTED;
1937}
1938
1939static void crypt_status(struct dm_target *ti, status_type_t type,
1940 unsigned status_flags, char *result, unsigned maxlen)
1941{
1942 struct crypt_config *cc = ti->private;
1943 unsigned i, sz = 0;
1944 int num_feature_args = 0;
1945
1946 switch (type) {
1947 case STATUSTYPE_INFO:
1948 result[0] = '\0';
1949 break;
1950
1951 case STATUSTYPE_TABLE:
1952 DMEMIT("%s ", cc->cipher_string);
1953
1954 if (cc->key_size > 0)
1955 for (i = 0; i < cc->key_size; i++)
1956 DMEMIT("%02x", cc->key[i]);
1957 else
1958 DMEMIT("-");
1959
1960 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
1961 cc->dev->name, (unsigned long long)cc->start);
1962
1963 num_feature_args += !!ti->num_discard_bios;
1964 num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
1965 num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
1966 if (num_feature_args) {
1967 DMEMIT(" %d", num_feature_args);
1968 if (ti->num_discard_bios)
1969 DMEMIT(" allow_discards");
1970 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
1971 DMEMIT(" same_cpu_crypt");
1972 if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
1973 DMEMIT(" submit_from_crypt_cpus");
1974 }
1975
1976 break;
1977 }
1978}
1979
1980static void crypt_postsuspend(struct dm_target *ti)
1981{
1982 struct crypt_config *cc = ti->private;
1983
1984 set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1985}
1986
1987static int crypt_preresume(struct dm_target *ti)
1988{
1989 struct crypt_config *cc = ti->private;
1990
1991 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
1992 DMERR("aborting resume - crypt key is not set.");
1993 return -EAGAIN;
1994 }
1995
1996 return 0;
1997}
1998
1999static void crypt_resume(struct dm_target *ti)
2000{
2001 struct crypt_config *cc = ti->private;
2002
2003 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
2004}
2005
2006
2007
2008
2009
2010static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
2011{
2012 struct crypt_config *cc = ti->private;
2013 int ret = -EINVAL;
2014
2015 if (argc < 2)
2016 goto error;
2017
2018 if (!strcasecmp(argv[0], "key")) {
2019 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
2020 DMWARN("not suspended during key manipulation.");
2021 return -EINVAL;
2022 }
2023 if (argc == 3 && !strcasecmp(argv[1], "set")) {
2024 ret = crypt_set_key(cc, argv[2]);
2025 if (ret)
2026 return ret;
2027 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
2028 ret = cc->iv_gen_ops->init(cc);
2029 return ret;
2030 }
2031 if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
2032 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
2033 ret = cc->iv_gen_ops->wipe(cc);
2034 if (ret)
2035 return ret;
2036 }
2037 return crypt_wipe_key(cc);
2038 }
2039 }
2040
2041error:
2042 DMWARN("unrecognised message received.");
2043 return -EINVAL;
2044}
2045
2046static int crypt_iterate_devices(struct dm_target *ti,
2047 iterate_devices_callout_fn fn, void *data)
2048{
2049 struct crypt_config *cc = ti->private;
2050
2051 return fn(ti, cc->dev, cc->start, ti->len, data);
2052}
2053
2054static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
2055{
2056
2057
2058
2059
2060
2061
2062 limits->max_segment_size = PAGE_SIZE;
2063}
2064
2065static struct target_type crypt_target = {
2066 .name = "crypt",
2067 .version = {1, 14, 1},
2068 .module = THIS_MODULE,
2069 .ctr = crypt_ctr,
2070 .dtr = crypt_dtr,
2071 .map = crypt_map,
2072 .status = crypt_status,
2073 .postsuspend = crypt_postsuspend,
2074 .preresume = crypt_preresume,
2075 .resume = crypt_resume,
2076 .message = crypt_message,
2077 .iterate_devices = crypt_iterate_devices,
2078 .io_hints = crypt_io_hints,
2079};
2080
2081static int __init dm_crypt_init(void)
2082{
2083 int r;
2084
2085 r = dm_register_target(&crypt_target);
2086 if (r < 0)
2087 DMERR("register failed %d", r);
2088
2089 return r;
2090}
2091
2092static void __exit dm_crypt_exit(void)
2093{
2094 dm_unregister_target(&crypt_target);
2095}
2096
2097module_init(dm_crypt_init);
2098module_exit(dm_crypt_exit);
2099
2100MODULE_AUTHOR("Jana Saout <jana@saout.de>");
2101MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
2102MODULE_LICENSE("GPL");
2103