1
2
3
4
5
6
7
8
9
10#include <linux/completion.h>
11#include <linux/err.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/key.h>
16#include <linux/bio.h>
17#include <linux/blkdev.h>
18#include <linux/mempool.h>
19#include <linux/slab.h>
20#include <linux/crypto.h>
21#include <linux/workqueue.h>
22#include <linux/kthread.h>
23#include <linux/backing-dev.h>
24#include <linux/atomic.h>
25#include <linux/scatterlist.h>
26#include <linux/rbtree.h>
27#include <linux/ctype.h>
28#include <asm/page.h>
29#include <asm/unaligned.h>
30#include <crypto/hash.h>
31#include <crypto/md5.h>
32#include <crypto/algapi.h>
33#include <crypto/skcipher.h>
34#include <crypto/aead.h>
35#include <crypto/authenc.h>
36#include <linux/rtnetlink.h>
37#include <keys/user-type.h>
38
39#include <linux/device-mapper.h>
40
41#define DM_MSG_PREFIX "crypt"
42
43
44
45
46struct convert_context {
47 struct completion restart;
48 struct bio *bio_in;
49 struct bio *bio_out;
50 struct bvec_iter iter_in;
51 struct bvec_iter iter_out;
52 sector_t cc_sector;
53 atomic_t cc_pending;
54 union {
55 struct skcipher_request *req;
56 struct aead_request *req_aead;
57 } r;
58
59};
60
61
62
63
64struct dm_crypt_io {
65 struct crypt_config *cc;
66 struct bio *base_bio;
67 u8 *integrity_metadata;
68 bool integrity_metadata_from_pool;
69 struct work_struct work;
70
71 struct convert_context ctx;
72
73 atomic_t io_pending;
74 int error;
75 sector_t sector;
76
77 struct rb_node rb_node;
78} CRYPTO_MINALIGN_ATTR;
79
80struct dm_crypt_request {
81 struct convert_context *ctx;
82 struct scatterlist sg_in[4];
83 struct scatterlist sg_out[4];
84 sector_t iv_sector;
85};
86
87struct crypt_config;
88
89struct crypt_iv_operations {
90 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
91 const char *opts);
92 void (*dtr)(struct crypt_config *cc);
93 int (*init)(struct crypt_config *cc);
94 int (*wipe)(struct crypt_config *cc);
95 int (*generator)(struct crypt_config *cc, u8 *iv,
96 struct dm_crypt_request *dmreq);
97 int (*post)(struct crypt_config *cc, u8 *iv,
98 struct dm_crypt_request *dmreq);
99};
100
101struct iv_essiv_private {
102 struct crypto_ahash *hash_tfm;
103 u8 *salt;
104};
105
106struct iv_benbi_private {
107 int shift;
108};
109
110#define LMK_SEED_SIZE 64
111struct iv_lmk_private {
112 struct crypto_shash *hash_tfm;
113 u8 *seed;
114};
115
116#define TCW_WHITENING_SIZE 16
117struct iv_tcw_private {
118 struct crypto_shash *crc32_tfm;
119 u8 *iv_seed;
120 u8 *whitening;
121};
122
123
124
125
126
127enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
128 DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
129
130enum cipher_flags {
131 CRYPT_MODE_INTEGRITY_AEAD,
132 CRYPT_IV_LARGE_SECTORS,
133};
134
135
136
137
138struct crypt_config {
139 struct dm_dev *dev;
140 sector_t start;
141
142
143
144
145
146 mempool_t *req_pool;
147 mempool_t *page_pool;
148 mempool_t *tag_pool;
149 unsigned tag_pool_max_sectors;
150
151 struct bio_set *bs;
152 struct mutex bio_alloc_lock;
153
154 struct workqueue_struct *io_queue;
155 struct workqueue_struct *crypt_queue;
156
157 struct task_struct *write_thread;
158 wait_queue_head_t write_thread_wait;
159 struct rb_root write_tree;
160
161 char *cipher;
162 char *cipher_string;
163 char *cipher_auth;
164 char *key_string;
165
166 const struct crypt_iv_operations *iv_gen_ops;
167 union {
168 struct iv_essiv_private essiv;
169 struct iv_benbi_private benbi;
170 struct iv_lmk_private lmk;
171 struct iv_tcw_private tcw;
172 } iv_gen_private;
173 sector_t iv_offset;
174 unsigned int iv_size;
175 unsigned short int sector_size;
176 unsigned char sector_shift;
177
178
179 void *iv_private;
180 union {
181 struct crypto_skcipher **tfms;
182 struct crypto_aead **tfms_aead;
183 } cipher_tfm;
184 unsigned tfms_count;
185 unsigned long cipher_flags;
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200 unsigned int dmreq_start;
201
202 unsigned int per_bio_data_size;
203
204 unsigned long flags;
205 unsigned int key_size;
206 unsigned int key_parts;
207 unsigned int key_extra_size;
208 unsigned int key_mac_size;
209
210 unsigned int integrity_tag_size;
211 unsigned int integrity_iv_size;
212 unsigned int on_disk_tag_size;
213
214 u8 *authenc_key;
215 u8 key[0];
216};
217
218#define MIN_IOS 64
219#define MAX_TAG_SIZE 480
220#define POOL_ENTRY_SIZE 512
221
222static void clone_init(struct dm_crypt_io *, struct bio *);
223static void kcryptd_queue_crypt(struct dm_crypt_io *io);
224static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
225 struct scatterlist *sg);
226
227
228
229
230static struct crypto_skcipher *any_tfm(struct crypt_config *cc)
231{
232 return cc->cipher_tfm.tfms[0];
233}
234
235static struct crypto_aead *any_tfm_aead(struct crypt_config *cc)
236{
237 return cc->cipher_tfm.tfms_aead[0];
238}
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
288 struct dm_crypt_request *dmreq)
289{
290 memset(iv, 0, cc->iv_size);
291 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
292
293 return 0;
294}
295
296static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
297 struct dm_crypt_request *dmreq)
298{
299 memset(iv, 0, cc->iv_size);
300 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
301
302 return 0;
303}
304
305
306static int crypt_iv_essiv_init(struct crypt_config *cc)
307{
308 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
309 AHASH_REQUEST_ON_STACK(req, essiv->hash_tfm);
310 struct scatterlist sg;
311 struct crypto_cipher *essiv_tfm;
312 int err;
313
314 sg_init_one(&sg, cc->key, cc->key_size);
315 ahash_request_set_tfm(req, essiv->hash_tfm);
316 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
317 ahash_request_set_crypt(req, &sg, essiv->salt, cc->key_size);
318
319 err = crypto_ahash_digest(req);
320 ahash_request_zero(req);
321 if (err)
322 return err;
323
324 essiv_tfm = cc->iv_private;
325
326 err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
327 crypto_ahash_digestsize(essiv->hash_tfm));
328 if (err)
329 return err;
330
331 return 0;
332}
333
334
335static int crypt_iv_essiv_wipe(struct crypt_config *cc)
336{
337 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
338 unsigned salt_size = crypto_ahash_digestsize(essiv->hash_tfm);
339 struct crypto_cipher *essiv_tfm;
340 int r, err = 0;
341
342 memset(essiv->salt, 0, salt_size);
343
344 essiv_tfm = cc->iv_private;
345 r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
346 if (r)
347 err = r;
348
349 return err;
350}
351
352
353static struct crypto_cipher *alloc_essiv_cipher(struct crypt_config *cc,
354 struct dm_target *ti,
355 const u8 *salt,
356 unsigned int saltsize)
357{
358 struct crypto_cipher *essiv_tfm;
359 int err;
360
361
362 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
363 if (IS_ERR(essiv_tfm)) {
364 ti->error = "Error allocating crypto tfm for ESSIV";
365 return essiv_tfm;
366 }
367
368 if (crypto_cipher_blocksize(essiv_tfm) != cc->iv_size) {
369 ti->error = "Block size of ESSIV cipher does "
370 "not match IV size of block cipher";
371 crypto_free_cipher(essiv_tfm);
372 return ERR_PTR(-EINVAL);
373 }
374
375 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
376 if (err) {
377 ti->error = "Failed to set key for ESSIV cipher";
378 crypto_free_cipher(essiv_tfm);
379 return ERR_PTR(err);
380 }
381
382 return essiv_tfm;
383}
384
385static void crypt_iv_essiv_dtr(struct crypt_config *cc)
386{
387 struct crypto_cipher *essiv_tfm;
388 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
389
390 crypto_free_ahash(essiv->hash_tfm);
391 essiv->hash_tfm = NULL;
392
393 kzfree(essiv->salt);
394 essiv->salt = NULL;
395
396 essiv_tfm = cc->iv_private;
397
398 if (essiv_tfm)
399 crypto_free_cipher(essiv_tfm);
400
401 cc->iv_private = NULL;
402}
403
404static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
405 const char *opts)
406{
407 struct crypto_cipher *essiv_tfm = NULL;
408 struct crypto_ahash *hash_tfm = NULL;
409 u8 *salt = NULL;
410 int err;
411
412 if (!opts) {
413 ti->error = "Digest algorithm missing for ESSIV mode";
414 return -EINVAL;
415 }
416
417
418 hash_tfm = crypto_alloc_ahash(opts, 0, CRYPTO_ALG_ASYNC);
419 if (IS_ERR(hash_tfm)) {
420 ti->error = "Error initializing ESSIV hash";
421 err = PTR_ERR(hash_tfm);
422 goto bad;
423 }
424
425 salt = kzalloc(crypto_ahash_digestsize(hash_tfm), GFP_KERNEL);
426 if (!salt) {
427 ti->error = "Error kmallocing salt storage in ESSIV";
428 err = -ENOMEM;
429 goto bad;
430 }
431
432 cc->iv_gen_private.essiv.salt = salt;
433 cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
434
435 essiv_tfm = alloc_essiv_cipher(cc, ti, salt,
436 crypto_ahash_digestsize(hash_tfm));
437 if (IS_ERR(essiv_tfm)) {
438 crypt_iv_essiv_dtr(cc);
439 return PTR_ERR(essiv_tfm);
440 }
441 cc->iv_private = essiv_tfm;
442
443 return 0;
444
445bad:
446 if (hash_tfm && !IS_ERR(hash_tfm))
447 crypto_free_ahash(hash_tfm);
448 kfree(salt);
449 return err;
450}
451
452static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
453 struct dm_crypt_request *dmreq)
454{
455 struct crypto_cipher *essiv_tfm = cc->iv_private;
456
457 memset(iv, 0, cc->iv_size);
458 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
459 crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
460
461 return 0;
462}
463
464static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
465 const char *opts)
466{
467 unsigned bs = crypto_skcipher_blocksize(any_tfm(cc));
468 int log = ilog2(bs);
469
470
471
472
473 if (1 << log != bs) {
474 ti->error = "cypher blocksize is not a power of 2";
475 return -EINVAL;
476 }
477
478 if (log > 9) {
479 ti->error = "cypher blocksize is > 512";
480 return -EINVAL;
481 }
482
483 cc->iv_gen_private.benbi.shift = 9 - log;
484
485 return 0;
486}
487
488static void crypt_iv_benbi_dtr(struct crypt_config *cc)
489{
490}
491
492static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
493 struct dm_crypt_request *dmreq)
494{
495 __be64 val;
496
497 memset(iv, 0, cc->iv_size - sizeof(u64));
498
499 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
500 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
501
502 return 0;
503}
504
505static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
506 struct dm_crypt_request *dmreq)
507{
508 memset(iv, 0, cc->iv_size);
509
510 return 0;
511}
512
513static void crypt_iv_lmk_dtr(struct crypt_config *cc)
514{
515 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
516
517 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
518 crypto_free_shash(lmk->hash_tfm);
519 lmk->hash_tfm = NULL;
520
521 kzfree(lmk->seed);
522 lmk->seed = NULL;
523}
524
525static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
526 const char *opts)
527{
528 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
529
530 if (cc->sector_size != (1 << SECTOR_SHIFT)) {
531 ti->error = "Unsupported sector size for LMK";
532 return -EINVAL;
533 }
534
535 lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
536 if (IS_ERR(lmk->hash_tfm)) {
537 ti->error = "Error initializing LMK hash";
538 return PTR_ERR(lmk->hash_tfm);
539 }
540
541
542 if (cc->key_parts == cc->tfms_count) {
543 lmk->seed = NULL;
544 return 0;
545 }
546
547 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
548 if (!lmk->seed) {
549 crypt_iv_lmk_dtr(cc);
550 ti->error = "Error kmallocing seed storage in LMK";
551 return -ENOMEM;
552 }
553
554 return 0;
555}
556
557static int crypt_iv_lmk_init(struct crypt_config *cc)
558{
559 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
560 int subkey_size = cc->key_size / cc->key_parts;
561
562
563 if (lmk->seed)
564 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
565 crypto_shash_digestsize(lmk->hash_tfm));
566
567 return 0;
568}
569
570static int crypt_iv_lmk_wipe(struct crypt_config *cc)
571{
572 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
573
574 if (lmk->seed)
575 memset(lmk->seed, 0, LMK_SEED_SIZE);
576
577 return 0;
578}
579
580static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
581 struct dm_crypt_request *dmreq,
582 u8 *data)
583{
584 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
585 SHASH_DESC_ON_STACK(desc, lmk->hash_tfm);
586 struct md5_state md5state;
587 __le32 buf[4];
588 int i, r;
589
590 desc->tfm = lmk->hash_tfm;
591 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
592
593 r = crypto_shash_init(desc);
594 if (r)
595 return r;
596
597 if (lmk->seed) {
598 r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE);
599 if (r)
600 return r;
601 }
602
603
604 r = crypto_shash_update(desc, data + 16, 16 * 31);
605 if (r)
606 return r;
607
608
609 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
610 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
611 buf[2] = cpu_to_le32(4024);
612 buf[3] = 0;
613 r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf));
614 if (r)
615 return r;
616
617
618 r = crypto_shash_export(desc, &md5state);
619 if (r)
620 return r;
621
622 for (i = 0; i < MD5_HASH_WORDS; i++)
623 __cpu_to_le32s(&md5state.hash[i]);
624 memcpy(iv, &md5state.hash, cc->iv_size);
625
626 return 0;
627}
628
629static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
630 struct dm_crypt_request *dmreq)
631{
632 struct scatterlist *sg;
633 u8 *src;
634 int r = 0;
635
636 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
637 sg = crypt_get_sg_data(cc, dmreq->sg_in);
638 src = kmap_atomic(sg_page(sg));
639 r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset);
640 kunmap_atomic(src);
641 } else
642 memset(iv, 0, cc->iv_size);
643
644 return r;
645}
646
647static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
648 struct dm_crypt_request *dmreq)
649{
650 struct scatterlist *sg;
651 u8 *dst;
652 int r;
653
654 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
655 return 0;
656
657 sg = crypt_get_sg_data(cc, dmreq->sg_out);
658 dst = kmap_atomic(sg_page(sg));
659 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset);
660
661
662 if (!r)
663 crypto_xor(dst + sg->offset, iv, cc->iv_size);
664
665 kunmap_atomic(dst);
666 return r;
667}
668
669static void crypt_iv_tcw_dtr(struct crypt_config *cc)
670{
671 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
672
673 kzfree(tcw->iv_seed);
674 tcw->iv_seed = NULL;
675 kzfree(tcw->whitening);
676 tcw->whitening = NULL;
677
678 if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
679 crypto_free_shash(tcw->crc32_tfm);
680 tcw->crc32_tfm = NULL;
681}
682
683static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
684 const char *opts)
685{
686 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
687
688 if (cc->sector_size != (1 << SECTOR_SHIFT)) {
689 ti->error = "Unsupported sector size for TCW";
690 return -EINVAL;
691 }
692
693 if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
694 ti->error = "Wrong key size for TCW";
695 return -EINVAL;
696 }
697
698 tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0);
699 if (IS_ERR(tcw->crc32_tfm)) {
700 ti->error = "Error initializing CRC32 in TCW";
701 return PTR_ERR(tcw->crc32_tfm);
702 }
703
704 tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
705 tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
706 if (!tcw->iv_seed || !tcw->whitening) {
707 crypt_iv_tcw_dtr(cc);
708 ti->error = "Error allocating seed storage in TCW";
709 return -ENOMEM;
710 }
711
712 return 0;
713}
714
715static int crypt_iv_tcw_init(struct crypt_config *cc)
716{
717 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
718 int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
719
720 memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
721 memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
722 TCW_WHITENING_SIZE);
723
724 return 0;
725}
726
727static int crypt_iv_tcw_wipe(struct crypt_config *cc)
728{
729 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
730
731 memset(tcw->iv_seed, 0, cc->iv_size);
732 memset(tcw->whitening, 0, TCW_WHITENING_SIZE);
733
734 return 0;
735}
736
737static int crypt_iv_tcw_whitening(struct crypt_config *cc,
738 struct dm_crypt_request *dmreq,
739 u8 *data)
740{
741 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
742 __le64 sector = cpu_to_le64(dmreq->iv_sector);
743 u8 buf[TCW_WHITENING_SIZE];
744 SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm);
745 int i, r;
746
747
748 memcpy(buf, tcw->whitening, TCW_WHITENING_SIZE);
749 crypto_xor(buf, (u8 *)§or, 8);
750 crypto_xor(&buf[8], (u8 *)§or, 8);
751
752
753 desc->tfm = tcw->crc32_tfm;
754 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
755 for (i = 0; i < 4; i++) {
756 r = crypto_shash_init(desc);
757 if (r)
758 goto out;
759 r = crypto_shash_update(desc, &buf[i * 4], 4);
760 if (r)
761 goto out;
762 r = crypto_shash_final(desc, &buf[i * 4]);
763 if (r)
764 goto out;
765 }
766 crypto_xor(&buf[0], &buf[12], 4);
767 crypto_xor(&buf[4], &buf[8], 4);
768
769
770 for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
771 crypto_xor(data + i * 8, buf, 8);
772out:
773 memzero_explicit(buf, sizeof(buf));
774 return r;
775}
776
777static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
778 struct dm_crypt_request *dmreq)
779{
780 struct scatterlist *sg;
781 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
782 __le64 sector = cpu_to_le64(dmreq->iv_sector);
783 u8 *src;
784 int r = 0;
785
786
787 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
788 sg = crypt_get_sg_data(cc, dmreq->sg_in);
789 src = kmap_atomic(sg_page(sg));
790 r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset);
791 kunmap_atomic(src);
792 }
793
794
795 memcpy(iv, tcw->iv_seed, cc->iv_size);
796 crypto_xor(iv, (u8 *)§or, 8);
797 if (cc->iv_size > 8)
798 crypto_xor(&iv[8], (u8 *)§or, cc->iv_size - 8);
799
800 return r;
801}
802
803static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
804 struct dm_crypt_request *dmreq)
805{
806 struct scatterlist *sg;
807 u8 *dst;
808 int r;
809
810 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
811 return 0;
812
813
814 sg = crypt_get_sg_data(cc, dmreq->sg_out);
815 dst = kmap_atomic(sg_page(sg));
816 r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset);
817 kunmap_atomic(dst);
818
819 return r;
820}
821
822static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv,
823 struct dm_crypt_request *dmreq)
824{
825
826 get_random_bytes(iv, cc->iv_size);
827 return 0;
828}
829
830static const struct crypt_iv_operations crypt_iv_plain_ops = {
831 .generator = crypt_iv_plain_gen
832};
833
834static const struct crypt_iv_operations crypt_iv_plain64_ops = {
835 .generator = crypt_iv_plain64_gen
836};
837
838static const struct crypt_iv_operations crypt_iv_essiv_ops = {
839 .ctr = crypt_iv_essiv_ctr,
840 .dtr = crypt_iv_essiv_dtr,
841 .init = crypt_iv_essiv_init,
842 .wipe = crypt_iv_essiv_wipe,
843 .generator = crypt_iv_essiv_gen
844};
845
846static const struct crypt_iv_operations crypt_iv_benbi_ops = {
847 .ctr = crypt_iv_benbi_ctr,
848 .dtr = crypt_iv_benbi_dtr,
849 .generator = crypt_iv_benbi_gen
850};
851
852static const struct crypt_iv_operations crypt_iv_null_ops = {
853 .generator = crypt_iv_null_gen
854};
855
856static const struct crypt_iv_operations crypt_iv_lmk_ops = {
857 .ctr = crypt_iv_lmk_ctr,
858 .dtr = crypt_iv_lmk_dtr,
859 .init = crypt_iv_lmk_init,
860 .wipe = crypt_iv_lmk_wipe,
861 .generator = crypt_iv_lmk_gen,
862 .post = crypt_iv_lmk_post
863};
864
865static const struct crypt_iv_operations crypt_iv_tcw_ops = {
866 .ctr = crypt_iv_tcw_ctr,
867 .dtr = crypt_iv_tcw_dtr,
868 .init = crypt_iv_tcw_init,
869 .wipe = crypt_iv_tcw_wipe,
870 .generator = crypt_iv_tcw_gen,
871 .post = crypt_iv_tcw_post
872};
873
874static struct crypt_iv_operations crypt_iv_random_ops = {
875 .generator = crypt_iv_random_gen
876};
877
878
879
880
881static bool crypt_integrity_aead(struct crypt_config *cc)
882{
883 return test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
884}
885
886static bool crypt_integrity_hmac(struct crypt_config *cc)
887{
888 return crypt_integrity_aead(cc) && cc->key_mac_size;
889}
890
891
892static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
893 struct scatterlist *sg)
894{
895 if (unlikely(crypt_integrity_aead(cc)))
896 return &sg[2];
897
898 return sg;
899}
900
901static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
902{
903 struct bio_integrity_payload *bip;
904 unsigned int tag_len;
905 int ret;
906
907 if (!bio_sectors(bio) || !io->cc->on_disk_tag_size)
908 return 0;
909
910 bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
911 if (IS_ERR(bip))
912 return PTR_ERR(bip);
913
914 tag_len = io->cc->on_disk_tag_size * bio_sectors(bio);
915
916 bip->bip_iter.bi_size = tag_len;
917 bip->bip_iter.bi_sector = io->cc->start + io->sector;
918
919
920 bip->bip_flags &= ~BIP_BLOCK_INTEGRITY;
921
922 ret = bio_integrity_add_page(bio, virt_to_page(io->integrity_metadata),
923 tag_len, offset_in_page(io->integrity_metadata));
924 if (unlikely(ret != tag_len))
925 return -ENOMEM;
926
927 return 0;
928}
929
930static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
931{
932#ifdef CONFIG_BLK_DEV_INTEGRITY
933 struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk);
934
935
936 if (!bi || strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) {
937 ti->error = "Integrity profile not supported.";
938 return -EINVAL;
939 }
940
941 if (bi->tag_size != cc->on_disk_tag_size ||
942 bi->tuple_size != cc->on_disk_tag_size) {
943 ti->error = "Integrity profile tag size mismatch.";
944 return -EINVAL;
945 }
946 if (1 << bi->interval_exp != cc->sector_size) {
947 ti->error = "Integrity profile sector size mismatch.";
948 return -EINVAL;
949 }
950
951 if (crypt_integrity_aead(cc)) {
952 cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size;
953 DMINFO("Integrity AEAD, tag size %u, IV size %u.",
954 cc->integrity_tag_size, cc->integrity_iv_size);
955
956 if (crypto_aead_setauthsize(any_tfm_aead(cc), cc->integrity_tag_size)) {
957 ti->error = "Integrity AEAD auth tag size is not supported.";
958 return -EINVAL;
959 }
960 } else if (cc->integrity_iv_size)
961 DMINFO("Additional per-sector space %u bytes for IV.",
962 cc->integrity_iv_size);
963
964 if ((cc->integrity_tag_size + cc->integrity_iv_size) != bi->tag_size) {
965 ti->error = "Not enough space for integrity tag in the profile.";
966 return -EINVAL;
967 }
968
969 return 0;
970#else
971 ti->error = "Integrity profile not supported.";
972 return -EINVAL;
973#endif
974}
975
976static void crypt_convert_init(struct crypt_config *cc,
977 struct convert_context *ctx,
978 struct bio *bio_out, struct bio *bio_in,
979 sector_t sector)
980{
981 ctx->bio_in = bio_in;
982 ctx->bio_out = bio_out;
983 if (bio_in)
984 ctx->iter_in = bio_in->bi_iter;
985 if (bio_out)
986 ctx->iter_out = bio_out->bi_iter;
987 ctx->cc_sector = sector + cc->iv_offset;
988 init_completion(&ctx->restart);
989}
990
991static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
992 void *req)
993{
994 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
995}
996
997static void *req_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq)
998{
999 return (void *)((char *)dmreq - cc->dmreq_start);
1000}
1001
1002static u8 *iv_of_dmreq(struct crypt_config *cc,
1003 struct dm_crypt_request *dmreq)
1004{
1005 if (crypt_integrity_aead(cc))
1006 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
1007 crypto_aead_alignmask(any_tfm_aead(cc)) + 1);
1008 else
1009 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
1010 crypto_skcipher_alignmask(any_tfm(cc)) + 1);
1011}
1012
1013static u8 *org_iv_of_dmreq(struct crypt_config *cc,
1014 struct dm_crypt_request *dmreq)
1015{
1016 return iv_of_dmreq(cc, dmreq) + cc->iv_size;
1017}
1018
1019static uint64_t *org_sector_of_dmreq(struct crypt_config *cc,
1020 struct dm_crypt_request *dmreq)
1021{
1022 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size;
1023 return (uint64_t*) ptr;
1024}
1025
1026static unsigned int *org_tag_of_dmreq(struct crypt_config *cc,
1027 struct dm_crypt_request *dmreq)
1028{
1029 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size +
1030 cc->iv_size + sizeof(uint64_t);
1031 return (unsigned int*)ptr;
1032}
1033
1034static void *tag_from_dmreq(struct crypt_config *cc,
1035 struct dm_crypt_request *dmreq)
1036{
1037 struct convert_context *ctx = dmreq->ctx;
1038 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
1039
1040 return &io->integrity_metadata[*org_tag_of_dmreq(cc, dmreq) *
1041 cc->on_disk_tag_size];
1042}
1043
1044static void *iv_tag_from_dmreq(struct crypt_config *cc,
1045 struct dm_crypt_request *dmreq)
1046{
1047 return tag_from_dmreq(cc, dmreq) + cc->integrity_tag_size;
1048}
1049
1050static int crypt_convert_block_aead(struct crypt_config *cc,
1051 struct convert_context *ctx,
1052 struct aead_request *req,
1053 unsigned int tag_offset)
1054{
1055 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
1056 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
1057 struct dm_crypt_request *dmreq;
1058 u8 *iv, *org_iv, *tag_iv, *tag;
1059 uint64_t *sector;
1060 int r = 0;
1061
1062 BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size);
1063
1064
1065 if (unlikely(bv_in.bv_offset & (cc->sector_size - 1)))
1066 return -EIO;
1067
1068 dmreq = dmreq_of_req(cc, req);
1069 dmreq->iv_sector = ctx->cc_sector;
1070 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
1071 dmreq->iv_sector >>= cc->sector_shift;
1072 dmreq->ctx = ctx;
1073
1074 *org_tag_of_dmreq(cc, dmreq) = tag_offset;
1075
1076 sector = org_sector_of_dmreq(cc, dmreq);
1077 *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
1078
1079 iv = iv_of_dmreq(cc, dmreq);
1080 org_iv = org_iv_of_dmreq(cc, dmreq);
1081 tag = tag_from_dmreq(cc, dmreq);
1082 tag_iv = iv_tag_from_dmreq(cc, dmreq);
1083
1084
1085
1086
1087
1088
1089 sg_init_table(dmreq->sg_in, 4);
1090 sg_set_buf(&dmreq->sg_in[0], sector, sizeof(uint64_t));
1091 sg_set_buf(&dmreq->sg_in[1], org_iv, cc->iv_size);
1092 sg_set_page(&dmreq->sg_in[2], bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
1093 sg_set_buf(&dmreq->sg_in[3], tag, cc->integrity_tag_size);
1094
1095 sg_init_table(dmreq->sg_out, 4);
1096 sg_set_buf(&dmreq->sg_out[0], sector, sizeof(uint64_t));
1097 sg_set_buf(&dmreq->sg_out[1], org_iv, cc->iv_size);
1098 sg_set_page(&dmreq->sg_out[2], bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
1099 sg_set_buf(&dmreq->sg_out[3], tag, cc->integrity_tag_size);
1100
1101 if (cc->iv_gen_ops) {
1102
1103 if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) {
1104 memcpy(org_iv, tag_iv, cc->iv_size);
1105 } else {
1106 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
1107 if (r < 0)
1108 return r;
1109
1110 if (cc->integrity_iv_size)
1111 memcpy(tag_iv, org_iv, cc->iv_size);
1112 }
1113
1114 memcpy(iv, org_iv, cc->iv_size);
1115 }
1116
1117 aead_request_set_ad(req, sizeof(uint64_t) + cc->iv_size);
1118 if (bio_data_dir(ctx->bio_in) == WRITE) {
1119 aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
1120 cc->sector_size, iv);
1121 r = crypto_aead_encrypt(req);
1122 if (cc->integrity_tag_size + cc->integrity_iv_size != cc->on_disk_tag_size)
1123 memset(tag + cc->integrity_tag_size + cc->integrity_iv_size, 0,
1124 cc->on_disk_tag_size - (cc->integrity_tag_size + cc->integrity_iv_size));
1125 } else {
1126 aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
1127 cc->sector_size + cc->integrity_tag_size, iv);
1128 r = crypto_aead_decrypt(req);
1129 }
1130
1131 if (r == -EBADMSG)
1132 DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu",
1133 (unsigned long long)le64_to_cpu(*sector));
1134
1135 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
1136 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
1137
1138 bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
1139 bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
1140
1141 return r;
1142}
1143
1144static int crypt_convert_block_skcipher(struct crypt_config *cc,
1145 struct convert_context *ctx,
1146 struct skcipher_request *req,
1147 unsigned int tag_offset)
1148{
1149 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
1150 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
1151 struct scatterlist *sg_in, *sg_out;
1152 struct dm_crypt_request *dmreq;
1153 u8 *iv, *org_iv, *tag_iv;
1154 uint64_t *sector;
1155 int r = 0;
1156
1157
1158 if (unlikely(bv_in.bv_offset & (cc->sector_size - 1)))
1159 return -EIO;
1160
1161 dmreq = dmreq_of_req(cc, req);
1162 dmreq->iv_sector = ctx->cc_sector;
1163 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
1164 dmreq->iv_sector >>= cc->sector_shift;
1165 dmreq->ctx = ctx;
1166
1167 *org_tag_of_dmreq(cc, dmreq) = tag_offset;
1168
1169 iv = iv_of_dmreq(cc, dmreq);
1170 org_iv = org_iv_of_dmreq(cc, dmreq);
1171 tag_iv = iv_tag_from_dmreq(cc, dmreq);
1172
1173 sector = org_sector_of_dmreq(cc, dmreq);
1174 *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
1175
1176
1177 sg_in = &dmreq->sg_in[0];
1178 sg_out = &dmreq->sg_out[0];
1179
1180 sg_init_table(sg_in, 1);
1181 sg_set_page(sg_in, bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
1182
1183 sg_init_table(sg_out, 1);
1184 sg_set_page(sg_out, bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
1185
1186 if (cc->iv_gen_ops) {
1187
1188 if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) {
1189 memcpy(org_iv, tag_iv, cc->integrity_iv_size);
1190 } else {
1191 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
1192 if (r < 0)
1193 return r;
1194
1195 if (cc->integrity_iv_size)
1196 memcpy(tag_iv, org_iv, cc->integrity_iv_size);
1197 }
1198
1199 memcpy(iv, org_iv, cc->iv_size);
1200 }
1201
1202 skcipher_request_set_crypt(req, sg_in, sg_out, cc->sector_size, iv);
1203
1204 if (bio_data_dir(ctx->bio_in) == WRITE)
1205 r = crypto_skcipher_encrypt(req);
1206 else
1207 r = crypto_skcipher_decrypt(req);
1208
1209 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
1210 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
1211
1212 bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
1213 bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
1214
1215 return r;
1216}
1217
1218static void kcryptd_async_done(struct crypto_async_request *async_req,
1219 int error);
1220
1221static void crypt_alloc_req_skcipher(struct crypt_config *cc,
1222 struct convert_context *ctx)
1223{
1224 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
1225
1226 if (!ctx->r.req)
1227 ctx->r.req = mempool_alloc(cc->req_pool, GFP_NOIO);
1228
1229 skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]);
1230
1231
1232
1233
1234
1235 skcipher_request_set_callback(ctx->r.req,
1236 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
1237 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
1238}
1239
1240static void crypt_alloc_req_aead(struct crypt_config *cc,
1241 struct convert_context *ctx)
1242{
1243 if (!ctx->r.req_aead)
1244 ctx->r.req_aead = mempool_alloc(cc->req_pool, GFP_NOIO);
1245
1246 aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]);
1247
1248
1249
1250
1251
1252 aead_request_set_callback(ctx->r.req_aead,
1253 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
1254 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
1255}
1256
1257static void crypt_alloc_req(struct crypt_config *cc,
1258 struct convert_context *ctx)
1259{
1260 if (crypt_integrity_aead(cc))
1261 crypt_alloc_req_aead(cc, ctx);
1262 else
1263 crypt_alloc_req_skcipher(cc, ctx);
1264}
1265
1266static void crypt_free_req_skcipher(struct crypt_config *cc,
1267 struct skcipher_request *req, struct bio *base_bio)
1268{
1269 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
1270
1271 if ((struct skcipher_request *)(io + 1) != req)
1272 mempool_free(req, cc->req_pool);
1273}
1274
1275static void crypt_free_req_aead(struct crypt_config *cc,
1276 struct aead_request *req, struct bio *base_bio)
1277{
1278 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
1279
1280 if ((struct aead_request *)(io + 1) != req)
1281 mempool_free(req, cc->req_pool);
1282}
1283
1284static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_bio)
1285{
1286 if (crypt_integrity_aead(cc))
1287 crypt_free_req_aead(cc, req, base_bio);
1288 else
1289 crypt_free_req_skcipher(cc, req, base_bio);
1290}
1291
1292
1293
1294
1295static int crypt_convert(struct crypt_config *cc,
1296 struct convert_context *ctx)
1297{
1298 unsigned int tag_offset = 0;
1299 unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
1300 int r;
1301
1302 atomic_set(&ctx->cc_pending, 1);
1303
1304 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
1305
1306 crypt_alloc_req(cc, ctx);
1307 atomic_inc(&ctx->cc_pending);
1308
1309 if (crypt_integrity_aead(cc))
1310 r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, tag_offset);
1311 else
1312 r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, tag_offset);
1313
1314 switch (r) {
1315
1316
1317
1318
1319 case -EBUSY:
1320 wait_for_completion(&ctx->restart);
1321 reinit_completion(&ctx->restart);
1322
1323
1324
1325
1326
1327 case -EINPROGRESS:
1328 ctx->r.req = NULL;
1329 ctx->cc_sector += sector_step;
1330 tag_offset++;
1331 continue;
1332
1333
1334
1335 case 0:
1336 atomic_dec(&ctx->cc_pending);
1337 ctx->cc_sector += sector_step;
1338 tag_offset++;
1339 cond_resched();
1340 continue;
1341
1342
1343
1344 case -EBADMSG:
1345 atomic_dec(&ctx->cc_pending);
1346 return -EILSEQ;
1347
1348
1349
1350 default:
1351 atomic_dec(&ctx->cc_pending);
1352 return -EIO;
1353 }
1354 }
1355
1356 return 0;
1357}
1358
1359static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
1379{
1380 struct crypt_config *cc = io->cc;
1381 struct bio *clone;
1382 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1383 gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
1384 unsigned i, len, remaining_size;
1385 struct page *page;
1386
1387retry:
1388 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
1389 mutex_lock(&cc->bio_alloc_lock);
1390
1391 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
1392 if (!clone)
1393 goto out;
1394
1395 clone_init(io, clone);
1396
1397 remaining_size = size;
1398
1399 for (i = 0; i < nr_iovecs; i++) {
1400 page = mempool_alloc(cc->page_pool, gfp_mask);
1401 if (!page) {
1402 crypt_free_buffer_pages(cc, clone);
1403 bio_put(clone);
1404 gfp_mask |= __GFP_DIRECT_RECLAIM;
1405 goto retry;
1406 }
1407
1408 len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
1409
1410 bio_add_page(clone, page, len, 0);
1411
1412 remaining_size -= len;
1413 }
1414
1415
1416 if (dm_crypt_integrity_io_alloc(io, clone)) {
1417 crypt_free_buffer_pages(cc, clone);
1418 bio_put(clone);
1419 clone = NULL;
1420 }
1421out:
1422 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
1423 mutex_unlock(&cc->bio_alloc_lock);
1424
1425 return clone;
1426}
1427
1428static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
1429{
1430 unsigned int i;
1431 struct bio_vec *bv;
1432
1433 bio_for_each_segment_all(bv, clone, i) {
1434 BUG_ON(!bv->bv_page);
1435 mempool_free(bv->bv_page, cc->page_pool);
1436 bv->bv_page = NULL;
1437 }
1438}
1439
1440static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
1441 struct bio *bio, sector_t sector)
1442{
1443 io->cc = cc;
1444 io->base_bio = bio;
1445 io->sector = sector;
1446 io->error = 0;
1447 io->ctx.r.req = NULL;
1448 io->integrity_metadata = NULL;
1449 io->integrity_metadata_from_pool = false;
1450 atomic_set(&io->io_pending, 0);
1451}
1452
1453static void crypt_inc_pending(struct dm_crypt_io *io)
1454{
1455 atomic_inc(&io->io_pending);
1456}
1457
1458
1459
1460
1461
1462static void crypt_dec_pending(struct dm_crypt_io *io)
1463{
1464 struct crypt_config *cc = io->cc;
1465 struct bio *base_bio = io->base_bio;
1466 int error = io->error;
1467
1468 if (!atomic_dec_and_test(&io->io_pending))
1469 return;
1470
1471 if (io->ctx.r.req)
1472 crypt_free_req(cc, io->ctx.r.req, base_bio);
1473
1474 if (unlikely(io->integrity_metadata_from_pool))
1475 mempool_free(io->integrity_metadata, io->cc->tag_pool);
1476 else
1477 kfree(io->integrity_metadata);
1478
1479 base_bio->bi_error = error;
1480 bio_endio(base_bio);
1481}
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500static void crypt_endio(struct bio *clone)
1501{
1502 struct dm_crypt_io *io = clone->bi_private;
1503 struct crypt_config *cc = io->cc;
1504 unsigned rw = bio_data_dir(clone);
1505 int error;
1506
1507
1508
1509
1510 if (rw == WRITE)
1511 crypt_free_buffer_pages(cc, clone);
1512
1513 error = clone->bi_error;
1514 bio_put(clone);
1515
1516 if (rw == READ && !error) {
1517 kcryptd_queue_crypt(io);
1518 return;
1519 }
1520
1521 if (unlikely(error))
1522 io->error = error;
1523
1524 crypt_dec_pending(io);
1525}
1526
1527static void clone_init(struct dm_crypt_io *io, struct bio *clone)
1528{
1529 struct crypt_config *cc = io->cc;
1530
1531 clone->bi_private = io;
1532 clone->bi_end_io = crypt_endio;
1533 clone->bi_bdev = cc->dev->bdev;
1534 clone->bi_opf = io->base_bio->bi_opf;
1535}
1536
1537static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
1538{
1539 struct crypt_config *cc = io->cc;
1540 struct bio *clone;
1541
1542
1543
1544
1545
1546
1547
1548 clone = bio_clone_fast(io->base_bio, gfp, cc->bs);
1549 if (!clone)
1550 return 1;
1551
1552 crypt_inc_pending(io);
1553
1554 clone_init(io, clone);
1555 clone->bi_iter.bi_sector = cc->start + io->sector;
1556
1557 if (dm_crypt_integrity_io_alloc(io, clone)) {
1558 crypt_dec_pending(io);
1559 bio_put(clone);
1560 return 1;
1561 }
1562
1563 generic_make_request(clone);
1564 return 0;
1565}
1566
1567static void kcryptd_io_read_work(struct work_struct *work)
1568{
1569 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1570
1571 crypt_inc_pending(io);
1572 if (kcryptd_io_read(io, GFP_NOIO))
1573 io->error = -ENOMEM;
1574 crypt_dec_pending(io);
1575}
1576
1577static void kcryptd_queue_read(struct dm_crypt_io *io)
1578{
1579 struct crypt_config *cc = io->cc;
1580
1581 INIT_WORK(&io->work, kcryptd_io_read_work);
1582 queue_work(cc->io_queue, &io->work);
1583}
1584
1585static void kcryptd_io_write(struct dm_crypt_io *io)
1586{
1587 struct bio *clone = io->ctx.bio_out;
1588
1589 generic_make_request(clone);
1590}
1591
1592#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
1593
1594static int dmcrypt_write(void *data)
1595{
1596 struct crypt_config *cc = data;
1597 struct dm_crypt_io *io;
1598
1599 while (1) {
1600 struct rb_root write_tree;
1601 struct blk_plug plug;
1602
1603 DECLARE_WAITQUEUE(wait, current);
1604
1605 spin_lock_irq(&cc->write_thread_wait.lock);
1606continue_locked:
1607
1608 if (!RB_EMPTY_ROOT(&cc->write_tree))
1609 goto pop_from_list;
1610
1611 set_current_state(TASK_INTERRUPTIBLE);
1612 __add_wait_queue(&cc->write_thread_wait, &wait);
1613
1614 spin_unlock_irq(&cc->write_thread_wait.lock);
1615
1616 if (unlikely(kthread_should_stop())) {
1617 set_current_state(TASK_RUNNING);
1618 remove_wait_queue(&cc->write_thread_wait, &wait);
1619 break;
1620 }
1621
1622 schedule();
1623
1624 set_current_state(TASK_RUNNING);
1625 spin_lock_irq(&cc->write_thread_wait.lock);
1626 __remove_wait_queue(&cc->write_thread_wait, &wait);
1627 goto continue_locked;
1628
1629pop_from_list:
1630 write_tree = cc->write_tree;
1631 cc->write_tree = RB_ROOT;
1632 spin_unlock_irq(&cc->write_thread_wait.lock);
1633
1634 BUG_ON(rb_parent(write_tree.rb_node));
1635
1636
1637
1638
1639
1640 blk_start_plug(&plug);
1641 do {
1642 io = crypt_io_from_node(rb_first(&write_tree));
1643 rb_erase(&io->rb_node, &write_tree);
1644 kcryptd_io_write(io);
1645 } while (!RB_EMPTY_ROOT(&write_tree));
1646 blk_finish_plug(&plug);
1647 }
1648 return 0;
1649}
1650
1651static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
1652{
1653 struct bio *clone = io->ctx.bio_out;
1654 struct crypt_config *cc = io->cc;
1655 unsigned long flags;
1656 sector_t sector;
1657 struct rb_node **rbp, *parent;
1658
1659 if (unlikely(io->error < 0)) {
1660 crypt_free_buffer_pages(cc, clone);
1661 bio_put(clone);
1662 crypt_dec_pending(io);
1663 return;
1664 }
1665
1666
1667 BUG_ON(io->ctx.iter_out.bi_size);
1668
1669 clone->bi_iter.bi_sector = cc->start + io->sector;
1670
1671 if (likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) {
1672 generic_make_request(clone);
1673 return;
1674 }
1675
1676 spin_lock_irqsave(&cc->write_thread_wait.lock, flags);
1677 rbp = &cc->write_tree.rb_node;
1678 parent = NULL;
1679 sector = io->sector;
1680 while (*rbp) {
1681 parent = *rbp;
1682 if (sector < crypt_io_from_node(parent)->sector)
1683 rbp = &(*rbp)->rb_left;
1684 else
1685 rbp = &(*rbp)->rb_right;
1686 }
1687 rb_link_node(&io->rb_node, parent, rbp);
1688 rb_insert_color(&io->rb_node, &cc->write_tree);
1689
1690 wake_up_locked(&cc->write_thread_wait);
1691 spin_unlock_irqrestore(&cc->write_thread_wait.lock, flags);
1692}
1693
1694static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1695{
1696 struct crypt_config *cc = io->cc;
1697 struct bio *clone;
1698 int crypt_finished;
1699 sector_t sector = io->sector;
1700 int r;
1701
1702
1703
1704
1705 crypt_inc_pending(io);
1706 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
1707
1708 clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
1709 if (unlikely(!clone)) {
1710 io->error = -EIO;
1711 goto dec;
1712 }
1713
1714 io->ctx.bio_out = clone;
1715 io->ctx.iter_out = clone->bi_iter;
1716
1717 sector += bio_sectors(clone);
1718
1719 crypt_inc_pending(io);
1720 r = crypt_convert(cc, &io->ctx);
1721 if (r < 0)
1722 io->error = r;
1723 crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
1724
1725
1726 if (crypt_finished) {
1727 kcryptd_crypt_write_io_submit(io, 0);
1728 io->sector = sector;
1729 }
1730
1731dec:
1732 crypt_dec_pending(io);
1733}
1734
1735static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
1736{
1737 crypt_dec_pending(io);
1738}
1739
1740static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
1741{
1742 struct crypt_config *cc = io->cc;
1743 int r = 0;
1744
1745 crypt_inc_pending(io);
1746
1747 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
1748 io->sector);
1749
1750 r = crypt_convert(cc, &io->ctx);
1751 if (r < 0)
1752 io->error = r;
1753
1754 if (atomic_dec_and_test(&io->ctx.cc_pending))
1755 kcryptd_crypt_read_done(io);
1756
1757 crypt_dec_pending(io);
1758}
1759
1760static void kcryptd_async_done(struct crypto_async_request *async_req,
1761 int error)
1762{
1763 struct dm_crypt_request *dmreq = async_req->data;
1764 struct convert_context *ctx = dmreq->ctx;
1765 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
1766 struct crypt_config *cc = io->cc;
1767
1768
1769
1770
1771
1772
1773 if (error == -EINPROGRESS) {
1774 complete(&ctx->restart);
1775 return;
1776 }
1777
1778 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
1779 error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq);
1780
1781 if (error == -EBADMSG) {
1782 DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu",
1783 (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq)));
1784 io->error = -EILSEQ;
1785 } else if (error < 0)
1786 io->error = -EIO;
1787
1788 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
1789
1790 if (!atomic_dec_and_test(&ctx->cc_pending))
1791 return;
1792
1793 if (bio_data_dir(io->base_bio) == READ)
1794 kcryptd_crypt_read_done(io);
1795 else
1796 kcryptd_crypt_write_io_submit(io, 1);
1797}
1798
1799static void kcryptd_crypt(struct work_struct *work)
1800{
1801 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1802
1803 if (bio_data_dir(io->base_bio) == READ)
1804 kcryptd_crypt_read_convert(io);
1805 else
1806 kcryptd_crypt_write_convert(io);
1807}
1808
1809static void kcryptd_queue_crypt(struct dm_crypt_io *io)
1810{
1811 struct crypt_config *cc = io->cc;
1812
1813 INIT_WORK(&io->work, kcryptd_crypt);
1814 queue_work(cc->crypt_queue, &io->work);
1815}
1816
1817static void crypt_free_tfms_aead(struct crypt_config *cc)
1818{
1819 if (!cc->cipher_tfm.tfms_aead)
1820 return;
1821
1822 if (cc->cipher_tfm.tfms_aead[0] && !IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
1823 crypto_free_aead(cc->cipher_tfm.tfms_aead[0]);
1824 cc->cipher_tfm.tfms_aead[0] = NULL;
1825 }
1826
1827 kfree(cc->cipher_tfm.tfms_aead);
1828 cc->cipher_tfm.tfms_aead = NULL;
1829}
1830
1831static void crypt_free_tfms_skcipher(struct crypt_config *cc)
1832{
1833 unsigned i;
1834
1835 if (!cc->cipher_tfm.tfms)
1836 return;
1837
1838 for (i = 0; i < cc->tfms_count; i++)
1839 if (cc->cipher_tfm.tfms[i] && !IS_ERR(cc->cipher_tfm.tfms[i])) {
1840 crypto_free_skcipher(cc->cipher_tfm.tfms[i]);
1841 cc->cipher_tfm.tfms[i] = NULL;
1842 }
1843
1844 kfree(cc->cipher_tfm.tfms);
1845 cc->cipher_tfm.tfms = NULL;
1846}
1847
1848static void crypt_free_tfms(struct crypt_config *cc)
1849{
1850 if (crypt_integrity_aead(cc))
1851 crypt_free_tfms_aead(cc);
1852 else
1853 crypt_free_tfms_skcipher(cc);
1854}
1855
1856static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode)
1857{
1858 unsigned i;
1859 int err;
1860
1861 cc->cipher_tfm.tfms = kzalloc(cc->tfms_count *
1862 sizeof(struct crypto_skcipher *), GFP_KERNEL);
1863 if (!cc->cipher_tfm.tfms)
1864 return -ENOMEM;
1865
1866 for (i = 0; i < cc->tfms_count; i++) {
1867 cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0, 0);
1868 if (IS_ERR(cc->cipher_tfm.tfms[i])) {
1869 err = PTR_ERR(cc->cipher_tfm.tfms[i]);
1870 crypt_free_tfms(cc);
1871 return err;
1872 }
1873 }
1874
1875 return 0;
1876}
1877
1878static int crypt_alloc_tfms_aead(struct crypt_config *cc, char *ciphermode)
1879{
1880 int err;
1881
1882 cc->cipher_tfm.tfms = kmalloc(sizeof(struct crypto_aead *), GFP_KERNEL);
1883 if (!cc->cipher_tfm.tfms)
1884 return -ENOMEM;
1885
1886 cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0, 0);
1887 if (IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
1888 err = PTR_ERR(cc->cipher_tfm.tfms_aead[0]);
1889 crypt_free_tfms(cc);
1890 return err;
1891 }
1892
1893 return 0;
1894}
1895
1896static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
1897{
1898 if (crypt_integrity_aead(cc))
1899 return crypt_alloc_tfms_aead(cc, ciphermode);
1900 else
1901 return crypt_alloc_tfms_skcipher(cc, ciphermode);
1902}
1903
1904static unsigned crypt_subkey_size(struct crypt_config *cc)
1905{
1906 return (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
1907}
1908
1909static unsigned crypt_authenckey_size(struct crypt_config *cc)
1910{
1911 return crypt_subkey_size(cc) + RTA_SPACE(sizeof(struct crypto_authenc_key_param));
1912}
1913
1914
1915
1916
1917
1918
1919static void crypt_copy_authenckey(char *p, const void *key,
1920 unsigned enckeylen, unsigned authkeylen)
1921{
1922 struct crypto_authenc_key_param *param;
1923 struct rtattr *rta;
1924
1925 rta = (struct rtattr *)p;
1926 param = RTA_DATA(rta);
1927 param->enckeylen = cpu_to_be32(enckeylen);
1928 rta->rta_len = RTA_LENGTH(sizeof(*param));
1929 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
1930 p += RTA_SPACE(sizeof(*param));
1931 memcpy(p, key + enckeylen, authkeylen);
1932 p += authkeylen;
1933 memcpy(p, key, enckeylen);
1934}
1935
1936static int crypt_setkey(struct crypt_config *cc)
1937{
1938 unsigned subkey_size;
1939 int err = 0, i, r;
1940
1941
1942 subkey_size = crypt_subkey_size(cc);
1943
1944 if (crypt_integrity_hmac(cc))
1945 crypt_copy_authenckey(cc->authenc_key, cc->key,
1946 subkey_size - cc->key_mac_size,
1947 cc->key_mac_size);
1948 for (i = 0; i < cc->tfms_count; i++) {
1949 if (crypt_integrity_hmac(cc))
1950 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
1951 cc->authenc_key, crypt_authenckey_size(cc));
1952 else if (crypt_integrity_aead(cc))
1953 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
1954 cc->key + (i * subkey_size),
1955 subkey_size);
1956 else
1957 r = crypto_skcipher_setkey(cc->cipher_tfm.tfms[i],
1958 cc->key + (i * subkey_size),
1959 subkey_size);
1960 if (r)
1961 err = r;
1962 }
1963
1964 if (crypt_integrity_hmac(cc))
1965 memzero_explicit(cc->authenc_key, crypt_authenckey_size(cc));
1966
1967 return err;
1968}
1969
1970#ifdef CONFIG_KEYS
1971
1972static bool contains_whitespace(const char *str)
1973{
1974 while (*str)
1975 if (isspace(*str++))
1976 return true;
1977 return false;
1978}
1979
1980static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
1981{
1982 char *new_key_string, *key_desc;
1983 int ret;
1984 struct key *key;
1985 const struct user_key_payload *ukp;
1986
1987
1988
1989
1990
1991 if (contains_whitespace(key_string)) {
1992 DMERR("whitespace chars not allowed in key string");
1993 return -EINVAL;
1994 }
1995
1996
1997 key_desc = strpbrk(key_string, ":");
1998 if (!key_desc || key_desc == key_string || !strlen(key_desc + 1))
1999 return -EINVAL;
2000
2001 if (strncmp(key_string, "logon:", key_desc - key_string + 1) &&
2002 strncmp(key_string, "user:", key_desc - key_string + 1))
2003 return -EINVAL;
2004
2005 new_key_string = kstrdup(key_string, GFP_KERNEL);
2006 if (!new_key_string)
2007 return -ENOMEM;
2008
2009 key = request_key(key_string[0] == 'l' ? &key_type_logon : &key_type_user,
2010 key_desc + 1, NULL);
2011 if (IS_ERR(key)) {
2012 kzfree(new_key_string);
2013 return PTR_ERR(key);
2014 }
2015
2016 down_read(&key->sem);
2017
2018 ukp = user_key_payload_locked(key);
2019 if (!ukp) {
2020 up_read(&key->sem);
2021 key_put(key);
2022 kzfree(new_key_string);
2023 return -EKEYREVOKED;
2024 }
2025
2026 if (cc->key_size != ukp->datalen) {
2027 up_read(&key->sem);
2028 key_put(key);
2029 kzfree(new_key_string);
2030 return -EINVAL;
2031 }
2032
2033 memcpy(cc->key, ukp->data, cc->key_size);
2034
2035 up_read(&key->sem);
2036 key_put(key);
2037
2038
2039 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2040
2041 ret = crypt_setkey(cc);
2042
2043
2044 memset(cc->key, 0, cc->key_size * sizeof(u8));
2045
2046 if (!ret) {
2047 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2048 kzfree(cc->key_string);
2049 cc->key_string = new_key_string;
2050 } else
2051 kzfree(new_key_string);
2052
2053 return ret;
2054}
2055
2056static int get_key_size(char **key_string)
2057{
2058 char *colon, dummy;
2059 int ret;
2060
2061 if (*key_string[0] != ':')
2062 return strlen(*key_string) >> 1;
2063
2064
2065 colon = strpbrk(*key_string + 1, ":");
2066 if (!colon)
2067 return -EINVAL;
2068
2069 if (sscanf(*key_string + 1, "%u%c", &ret, &dummy) != 2 || dummy != ':')
2070 return -EINVAL;
2071
2072 *key_string = colon;
2073
2074
2075
2076 return ret;
2077}
2078
2079#else
2080
2081static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
2082{
2083 return -EINVAL;
2084}
2085
2086static int get_key_size(char **key_string)
2087{
2088 return (*key_string[0] == ':') ? -EINVAL : strlen(*key_string) >> 1;
2089}
2090
2091#endif
2092
2093static int crypt_set_key(struct crypt_config *cc, char *key)
2094{
2095 int r = -EINVAL;
2096 int key_string_len = strlen(key);
2097
2098
2099 if (!cc->key_size && strcmp(key, "-"))
2100 goto out;
2101
2102
2103 if (key[0] == ':') {
2104 r = crypt_set_keyring_key(cc, key + 1);
2105 goto out;
2106 }
2107
2108
2109 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2110
2111
2112 kzfree(cc->key_string);
2113 cc->key_string = NULL;
2114
2115
2116 if (cc->key_size && hex2bin(cc->key, key, cc->key_size) < 0)
2117 goto out;
2118
2119 r = crypt_setkey(cc);
2120 if (!r)
2121 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2122
2123out:
2124
2125 memset(key, '0', key_string_len);
2126
2127 return r;
2128}
2129
2130static int crypt_wipe_key(struct crypt_config *cc)
2131{
2132 int r;
2133
2134 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2135 get_random_bytes(&cc->key, cc->key_size);
2136 kzfree(cc->key_string);
2137 cc->key_string = NULL;
2138 r = crypt_setkey(cc);
2139 memset(&cc->key, 0, cc->key_size * sizeof(u8));
2140
2141 return r;
2142}
2143
2144static void crypt_dtr(struct dm_target *ti)
2145{
2146 struct crypt_config *cc = ti->private;
2147
2148 ti->private = NULL;
2149
2150 if (!cc)
2151 return;
2152
2153 if (cc->write_thread)
2154 kthread_stop(cc->write_thread);
2155
2156 if (cc->io_queue)
2157 destroy_workqueue(cc->io_queue);
2158 if (cc->crypt_queue)
2159 destroy_workqueue(cc->crypt_queue);
2160
2161 crypt_free_tfms(cc);
2162
2163 if (cc->bs)
2164 bioset_free(cc->bs);
2165
2166 mempool_destroy(cc->page_pool);
2167 mempool_destroy(cc->req_pool);
2168 mempool_destroy(cc->tag_pool);
2169
2170 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
2171 cc->iv_gen_ops->dtr(cc);
2172
2173 if (cc->dev)
2174 dm_put_device(ti, cc->dev);
2175
2176 kzfree(cc->cipher);
2177 kzfree(cc->cipher_string);
2178 kzfree(cc->key_string);
2179 kzfree(cc->cipher_auth);
2180 kzfree(cc->authenc_key);
2181
2182
2183 kzfree(cc);
2184}
2185
2186static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode)
2187{
2188 struct crypt_config *cc = ti->private;
2189
2190 if (crypt_integrity_aead(cc))
2191 cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
2192 else
2193 cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
2194
2195 if (cc->iv_size)
2196
2197 cc->iv_size = max(cc->iv_size,
2198 (unsigned int)(sizeof(u64) / sizeof(u8)));
2199 else if (ivmode) {
2200 DMWARN("Selected cipher does not support IVs");
2201 ivmode = NULL;
2202 }
2203
2204
2205 if (ivmode == NULL)
2206 cc->iv_gen_ops = NULL;
2207 else if (strcmp(ivmode, "plain") == 0)
2208 cc->iv_gen_ops = &crypt_iv_plain_ops;
2209 else if (strcmp(ivmode, "plain64") == 0)
2210 cc->iv_gen_ops = &crypt_iv_plain64_ops;
2211 else if (strcmp(ivmode, "essiv") == 0)
2212 cc->iv_gen_ops = &crypt_iv_essiv_ops;
2213 else if (strcmp(ivmode, "benbi") == 0)
2214 cc->iv_gen_ops = &crypt_iv_benbi_ops;
2215 else if (strcmp(ivmode, "null") == 0)
2216 cc->iv_gen_ops = &crypt_iv_null_ops;
2217 else if (strcmp(ivmode, "lmk") == 0) {
2218 cc->iv_gen_ops = &crypt_iv_lmk_ops;
2219
2220
2221
2222
2223
2224
2225 if (cc->key_size % cc->key_parts) {
2226 cc->key_parts++;
2227 cc->key_extra_size = cc->key_size / cc->key_parts;
2228 }
2229 } else if (strcmp(ivmode, "tcw") == 0) {
2230 cc->iv_gen_ops = &crypt_iv_tcw_ops;
2231 cc->key_parts += 2;
2232 cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
2233 } else if (strcmp(ivmode, "random") == 0) {
2234 cc->iv_gen_ops = &crypt_iv_random_ops;
2235
2236 cc->integrity_iv_size = cc->iv_size;
2237 } else {
2238 ti->error = "Invalid IV mode";
2239 return -EINVAL;
2240 }
2241
2242 return 0;
2243}
2244
2245
2246
2247
2248
2249
2250static int crypt_ctr_blkdev_cipher(struct crypt_config *cc)
2251{
2252 const char *alg_name = NULL;
2253 char *start, *end;
2254
2255 if (crypt_integrity_aead(cc)) {
2256 alg_name = crypto_tfm_alg_name(crypto_aead_tfm(any_tfm_aead(cc)));
2257 if (!alg_name)
2258 return -EINVAL;
2259 if (crypt_integrity_hmac(cc)) {
2260 alg_name = strchr(alg_name, ',');
2261 if (!alg_name)
2262 return -EINVAL;
2263 }
2264 alg_name++;
2265 } else {
2266 alg_name = crypto_tfm_alg_name(crypto_skcipher_tfm(any_tfm(cc)));
2267 if (!alg_name)
2268 return -EINVAL;
2269 }
2270
2271 start = strchr(alg_name, '(');
2272 end = strchr(alg_name, ')');
2273
2274 if (!start && !end) {
2275 cc->cipher = kstrdup(alg_name, GFP_KERNEL);
2276 return cc->cipher ? 0 : -ENOMEM;
2277 }
2278
2279 if (!start || !end || ++start >= end)
2280 return -EINVAL;
2281
2282 cc->cipher = kzalloc(end - start + 1, GFP_KERNEL);
2283 if (!cc->cipher)
2284 return -ENOMEM;
2285
2286 strncpy(cc->cipher, start, end - start);
2287
2288 return 0;
2289}
2290
2291
2292
2293
2294
2295
2296static int crypt_ctr_auth_cipher(struct crypt_config *cc, char *cipher_api)
2297{
2298 char *start, *end, *mac_alg = NULL;
2299 struct crypto_ahash *mac;
2300
2301 if (!strstarts(cipher_api, "authenc("))
2302 return 0;
2303
2304 start = strchr(cipher_api, '(');
2305 end = strchr(cipher_api, ',');
2306 if (!start || !end || ++start > end)
2307 return -EINVAL;
2308
2309 mac_alg = kzalloc(end - start + 1, GFP_KERNEL);
2310 if (!mac_alg)
2311 return -ENOMEM;
2312 strncpy(mac_alg, start, end - start);
2313
2314 mac = crypto_alloc_ahash(mac_alg, 0, 0);
2315 kfree(mac_alg);
2316
2317 if (IS_ERR(mac))
2318 return PTR_ERR(mac);
2319
2320 cc->key_mac_size = crypto_ahash_digestsize(mac);
2321 crypto_free_ahash(mac);
2322
2323 cc->authenc_key = kmalloc(crypt_authenckey_size(cc), GFP_KERNEL);
2324 if (!cc->authenc_key)
2325 return -ENOMEM;
2326
2327 return 0;
2328}
2329
2330static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key,
2331 char **ivmode, char **ivopts)
2332{
2333 struct crypt_config *cc = ti->private;
2334 char *tmp, *cipher_api;
2335 int ret = -EINVAL;
2336
2337 cc->tfms_count = 1;
2338
2339
2340
2341
2342
2343 tmp = &cipher_in[strlen("capi:")];
2344 cipher_api = strsep(&tmp, "-");
2345 *ivmode = strsep(&tmp, ":");
2346 *ivopts = tmp;
2347
2348 if (*ivmode && !strcmp(*ivmode, "lmk"))
2349 cc->tfms_count = 64;
2350
2351 cc->key_parts = cc->tfms_count;
2352
2353
2354 ret = crypt_alloc_tfms(cc, cipher_api);
2355 if (ret < 0) {
2356 ti->error = "Error allocating crypto tfm";
2357 return ret;
2358 }
2359
2360
2361 if (crypt_integrity_aead(cc)) {
2362 ret = crypt_ctr_auth_cipher(cc, cipher_api);
2363 if (ret < 0) {
2364 ti->error = "Invalid AEAD cipher spec";
2365 return -ENOMEM;
2366 }
2367 cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
2368 } else
2369 cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
2370
2371 ret = crypt_ctr_blkdev_cipher(cc);
2372 if (ret < 0) {
2373 ti->error = "Cannot allocate cipher string";
2374 return -ENOMEM;
2375 }
2376
2377 return 0;
2378}
2379
2380static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key,
2381 char **ivmode, char **ivopts)
2382{
2383 struct crypt_config *cc = ti->private;
2384 char *tmp, *cipher, *chainmode, *keycount;
2385 char *cipher_api = NULL;
2386 int ret = -EINVAL;
2387 char dummy;
2388
2389 if (strchr(cipher_in, '(') || crypt_integrity_aead(cc)) {
2390 ti->error = "Bad cipher specification";
2391 return -EINVAL;
2392 }
2393
2394
2395
2396
2397
2398 tmp = cipher_in;
2399 keycount = strsep(&tmp, "-");
2400 cipher = strsep(&keycount, ":");
2401
2402 if (!keycount)
2403 cc->tfms_count = 1;
2404 else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 ||
2405 !is_power_of_2(cc->tfms_count)) {
2406 ti->error = "Bad cipher key count specification";
2407 return -EINVAL;
2408 }
2409 cc->key_parts = cc->tfms_count;
2410
2411 cc->cipher = kstrdup(cipher, GFP_KERNEL);
2412 if (!cc->cipher)
2413 goto bad_mem;
2414
2415 chainmode = strsep(&tmp, "-");
2416 *ivopts = strsep(&tmp, "-");
2417 *ivmode = strsep(&*ivopts, ":");
2418
2419 if (tmp)
2420 DMWARN("Ignoring unexpected additional cipher options");
2421
2422
2423
2424
2425
2426 if (!chainmode || (!strcmp(chainmode, "plain") && !*ivmode)) {
2427 chainmode = "cbc";
2428 *ivmode = "plain";
2429 }
2430
2431 if (strcmp(chainmode, "ecb") && !*ivmode) {
2432 ti->error = "IV mechanism required";
2433 return -EINVAL;
2434 }
2435
2436 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
2437 if (!cipher_api)
2438 goto bad_mem;
2439
2440 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
2441 "%s(%s)", chainmode, cipher);
2442 if (ret < 0) {
2443 kfree(cipher_api);
2444 goto bad_mem;
2445 }
2446
2447
2448 ret = crypt_alloc_tfms(cc, cipher_api);
2449 if (ret < 0) {
2450 ti->error = "Error allocating crypto tfm";
2451 kfree(cipher_api);
2452 return ret;
2453 }
2454
2455 return 0;
2456bad_mem:
2457 ti->error = "Cannot allocate cipher strings";
2458 return -ENOMEM;
2459}
2460
2461static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key)
2462{
2463 struct crypt_config *cc = ti->private;
2464 char *ivmode = NULL, *ivopts = NULL;
2465 int ret;
2466
2467 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
2468 if (!cc->cipher_string) {
2469 ti->error = "Cannot allocate cipher strings";
2470 return -ENOMEM;
2471 }
2472
2473 if (strstarts(cipher_in, "capi:"))
2474 ret = crypt_ctr_cipher_new(ti, cipher_in, key, &ivmode, &ivopts);
2475 else
2476 ret = crypt_ctr_cipher_old(ti, cipher_in, key, &ivmode, &ivopts);
2477 if (ret)
2478 return ret;
2479
2480
2481 ret = crypt_ctr_ivmode(ti, ivmode);
2482 if (ret < 0)
2483 return ret;
2484
2485
2486 ret = crypt_set_key(cc, key);
2487 if (ret < 0) {
2488 ti->error = "Error decoding and setting key";
2489 return ret;
2490 }
2491
2492
2493 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
2494 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
2495 if (ret < 0) {
2496 ti->error = "Error creating IV";
2497 return ret;
2498 }
2499 }
2500
2501
2502 if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
2503 ret = cc->iv_gen_ops->init(cc);
2504 if (ret < 0) {
2505 ti->error = "Error initialising IV";
2506 return ret;
2507 }
2508 }
2509
2510 return ret;
2511}
2512
2513static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **argv)
2514{
2515 struct crypt_config *cc = ti->private;
2516 struct dm_arg_set as;
2517 static struct dm_arg _args[] = {
2518 {0, 6, "Invalid number of feature args"},
2519 };
2520 unsigned int opt_params, val;
2521 const char *opt_string, *sval;
2522 char dummy;
2523 int ret;
2524
2525
2526 as.argc = argc;
2527 as.argv = argv;
2528
2529 ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
2530 if (ret)
2531 return ret;
2532
2533 while (opt_params--) {
2534 opt_string = dm_shift_arg(&as);
2535 if (!opt_string) {
2536 ti->error = "Not enough feature arguments";
2537 return -EINVAL;
2538 }
2539
2540 if (!strcasecmp(opt_string, "allow_discards"))
2541 ti->num_discard_bios = 1;
2542
2543 else if (!strcasecmp(opt_string, "same_cpu_crypt"))
2544 set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
2545
2546 else if (!strcasecmp(opt_string, "submit_from_crypt_cpus"))
2547 set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
2548 else if (sscanf(opt_string, "integrity:%u:", &val) == 1) {
2549 if (val == 0 || val > MAX_TAG_SIZE) {
2550 ti->error = "Invalid integrity arguments";
2551 return -EINVAL;
2552 }
2553 cc->on_disk_tag_size = val;
2554 sval = strchr(opt_string + strlen("integrity:"), ':') + 1;
2555 if (!strcasecmp(sval, "aead")) {
2556 set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
2557 } else if (strcasecmp(sval, "none")) {
2558 ti->error = "Unknown integrity profile";
2559 return -EINVAL;
2560 }
2561
2562 cc->cipher_auth = kstrdup(sval, GFP_KERNEL);
2563 if (!cc->cipher_auth)
2564 return -ENOMEM;
2565 } else if (sscanf(opt_string, "sector_size:%hu%c", &cc->sector_size, &dummy) == 1) {
2566 if (cc->sector_size < (1 << SECTOR_SHIFT) ||
2567 cc->sector_size > 4096 ||
2568 (cc->sector_size & (cc->sector_size - 1))) {
2569 ti->error = "Invalid feature value for sector_size";
2570 return -EINVAL;
2571 }
2572 cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
2573 } else if (!strcasecmp(opt_string, "iv_large_sectors"))
2574 set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
2575 else {
2576 ti->error = "Invalid feature arguments";
2577 return -EINVAL;
2578 }
2579 }
2580
2581 return 0;
2582}
2583
2584
2585
2586
2587
2588static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2589{
2590 struct crypt_config *cc;
2591 int key_size;
2592 unsigned int align_mask;
2593 unsigned long long tmpll;
2594 int ret;
2595 size_t iv_size_padding, additional_req_size;
2596 char dummy;
2597
2598 if (argc < 5) {
2599 ti->error = "Not enough arguments";
2600 return -EINVAL;
2601 }
2602
2603 key_size = get_key_size(&argv[1]);
2604 if (key_size < 0) {
2605 ti->error = "Cannot parse key size";
2606 return -EINVAL;
2607 }
2608
2609 cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
2610 if (!cc) {
2611 ti->error = "Cannot allocate encryption context";
2612 return -ENOMEM;
2613 }
2614 cc->key_size = key_size;
2615 cc->sector_size = (1 << SECTOR_SHIFT);
2616 cc->sector_shift = 0;
2617
2618 ti->private = cc;
2619
2620
2621 if (argc > 5) {
2622 ret = crypt_ctr_optional(ti, argc - 5, &argv[5]);
2623 if (ret)
2624 goto bad;
2625 }
2626
2627 ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
2628 if (ret < 0)
2629 goto bad;
2630
2631 if (crypt_integrity_aead(cc)) {
2632 cc->dmreq_start = sizeof(struct aead_request);
2633 cc->dmreq_start += crypto_aead_reqsize(any_tfm_aead(cc));
2634 align_mask = crypto_aead_alignmask(any_tfm_aead(cc));
2635 } else {
2636 cc->dmreq_start = sizeof(struct skcipher_request);
2637 cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc));
2638 align_mask = crypto_skcipher_alignmask(any_tfm(cc));
2639 }
2640 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
2641
2642 if (align_mask < CRYPTO_MINALIGN) {
2643
2644 iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
2645 & align_mask;
2646 } else {
2647
2648
2649
2650
2651
2652 iv_size_padding = align_mask;
2653 }
2654
2655 ret = -ENOMEM;
2656
2657
2658 additional_req_size = sizeof(struct dm_crypt_request) +
2659 iv_size_padding + cc->iv_size +
2660 cc->iv_size +
2661 sizeof(uint64_t) +
2662 sizeof(unsigned int);
2663
2664 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + additional_req_size);
2665 if (!cc->req_pool) {
2666 ti->error = "Cannot allocate crypt request mempool";
2667 goto bad;
2668 }
2669
2670 cc->per_bio_data_size = ti->per_io_data_size =
2671 ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size,
2672 ARCH_KMALLOC_MINALIGN);
2673
2674 cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0);
2675 if (!cc->page_pool) {
2676 ti->error = "Cannot allocate page mempool";
2677 goto bad;
2678 }
2679
2680 cc->bs = bioset_create(MIN_IOS, 0);
2681 if (!cc->bs) {
2682 ti->error = "Cannot allocate crypt bioset";
2683 goto bad;
2684 }
2685
2686 mutex_init(&cc->bio_alloc_lock);
2687
2688 ret = -EINVAL;
2689 if ((sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) ||
2690 (tmpll & ((cc->sector_size >> SECTOR_SHIFT) - 1))) {
2691 ti->error = "Invalid iv_offset sector";
2692 goto bad;
2693 }
2694 cc->iv_offset = tmpll;
2695
2696 ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev);
2697 if (ret) {
2698 ti->error = "Device lookup failed";
2699 goto bad;
2700 }
2701
2702 ret = -EINVAL;
2703 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
2704 ti->error = "Invalid device sector";
2705 goto bad;
2706 }
2707 cc->start = tmpll;
2708
2709 if (crypt_integrity_aead(cc) || cc->integrity_iv_size) {
2710 ret = crypt_integrity_ctr(cc, ti);
2711 if (ret)
2712 goto bad;
2713
2714 cc->tag_pool_max_sectors = POOL_ENTRY_SIZE / cc->on_disk_tag_size;
2715 if (!cc->tag_pool_max_sectors)
2716 cc->tag_pool_max_sectors = 1;
2717
2718 cc->tag_pool = mempool_create_kmalloc_pool(MIN_IOS,
2719 cc->tag_pool_max_sectors * cc->on_disk_tag_size);
2720 if (!cc->tag_pool) {
2721 ti->error = "Cannot allocate integrity tags mempool";
2722 goto bad;
2723 }
2724
2725 cc->tag_pool_max_sectors <<= cc->sector_shift;
2726 }
2727
2728 ret = -ENOMEM;
2729 cc->io_queue = alloc_workqueue("kcryptd_io", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
2730 if (!cc->io_queue) {
2731 ti->error = "Couldn't create kcryptd io queue";
2732 goto bad;
2733 }
2734
2735 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
2736 cc->crypt_queue = alloc_workqueue("kcryptd", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
2737 else
2738 cc->crypt_queue = alloc_workqueue("kcryptd",
2739 WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
2740 num_online_cpus());
2741 if (!cc->crypt_queue) {
2742 ti->error = "Couldn't create kcryptd queue";
2743 goto bad;
2744 }
2745
2746 init_waitqueue_head(&cc->write_thread_wait);
2747 cc->write_tree = RB_ROOT;
2748
2749 cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write");
2750 if (IS_ERR(cc->write_thread)) {
2751 ret = PTR_ERR(cc->write_thread);
2752 cc->write_thread = NULL;
2753 ti->error = "Couldn't spawn write thread";
2754 goto bad;
2755 }
2756 wake_up_process(cc->write_thread);
2757
2758 ti->num_flush_bios = 1;
2759
2760 return 0;
2761
2762bad:
2763 crypt_dtr(ti);
2764 return ret;
2765}
2766
2767static int crypt_map(struct dm_target *ti, struct bio *bio)
2768{
2769 struct dm_crypt_io *io;
2770 struct crypt_config *cc = ti->private;
2771
2772
2773
2774
2775
2776
2777 if (unlikely(bio->bi_opf & REQ_PREFLUSH ||
2778 bio_op(bio) == REQ_OP_DISCARD)) {
2779 bio->bi_bdev = cc->dev->bdev;
2780 if (bio_sectors(bio))
2781 bio->bi_iter.bi_sector = cc->start +
2782 dm_target_offset(ti, bio->bi_iter.bi_sector);
2783 return DM_MAPIO_REMAPPED;
2784 }
2785
2786
2787
2788
2789 if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_PAGES << PAGE_SHIFT)) &&
2790 (bio_data_dir(bio) == WRITE || cc->on_disk_tag_size))
2791 dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT));
2792
2793
2794
2795
2796
2797 if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0))
2798 return -EIO;
2799
2800 if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1)))
2801 return -EIO;
2802
2803 io = dm_per_bio_data(bio, cc->per_bio_data_size);
2804 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
2805
2806 if (cc->on_disk_tag_size) {
2807 unsigned tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift);
2808
2809 if (unlikely(tag_len > KMALLOC_MAX_SIZE) ||
2810 unlikely(!(io->integrity_metadata = kmalloc(tag_len,
2811 GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
2812 if (bio_sectors(bio) > cc->tag_pool_max_sectors)
2813 dm_accept_partial_bio(bio, cc->tag_pool_max_sectors);
2814 io->integrity_metadata = mempool_alloc(cc->tag_pool, GFP_NOIO);
2815 io->integrity_metadata_from_pool = true;
2816 }
2817 }
2818
2819 if (crypt_integrity_aead(cc))
2820 io->ctx.r.req_aead = (struct aead_request *)(io + 1);
2821 else
2822 io->ctx.r.req = (struct skcipher_request *)(io + 1);
2823
2824 if (bio_data_dir(io->base_bio) == READ) {
2825 if (kcryptd_io_read(io, GFP_NOWAIT))
2826 kcryptd_queue_read(io);
2827 } else
2828 kcryptd_queue_crypt(io);
2829
2830 return DM_MAPIO_SUBMITTED;
2831}
2832
2833static void crypt_status(struct dm_target *ti, status_type_t type,
2834 unsigned status_flags, char *result, unsigned maxlen)
2835{
2836 struct crypt_config *cc = ti->private;
2837 unsigned i, sz = 0;
2838 int num_feature_args = 0;
2839
2840 switch (type) {
2841 case STATUSTYPE_INFO:
2842 result[0] = '\0';
2843 break;
2844
2845 case STATUSTYPE_TABLE:
2846 DMEMIT("%s ", cc->cipher_string);
2847
2848 if (cc->key_size > 0) {
2849 if (cc->key_string)
2850 DMEMIT(":%u:%s", cc->key_size, cc->key_string);
2851 else
2852 for (i = 0; i < cc->key_size; i++)
2853 DMEMIT("%02x", cc->key[i]);
2854 } else
2855 DMEMIT("-");
2856
2857 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
2858 cc->dev->name, (unsigned long long)cc->start);
2859
2860 num_feature_args += !!ti->num_discard_bios;
2861 num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
2862 num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
2863 num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT);
2864 num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
2865 if (cc->on_disk_tag_size)
2866 num_feature_args++;
2867 if (num_feature_args) {
2868 DMEMIT(" %d", num_feature_args);
2869 if (ti->num_discard_bios)
2870 DMEMIT(" allow_discards");
2871 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
2872 DMEMIT(" same_cpu_crypt");
2873 if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
2874 DMEMIT(" submit_from_crypt_cpus");
2875 if (cc->on_disk_tag_size)
2876 DMEMIT(" integrity:%u:%s", cc->on_disk_tag_size, cc->cipher_auth);
2877 if (cc->sector_size != (1 << SECTOR_SHIFT))
2878 DMEMIT(" sector_size:%d", cc->sector_size);
2879 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
2880 DMEMIT(" iv_large_sectors");
2881 }
2882
2883 break;
2884 }
2885}
2886
2887static void crypt_postsuspend(struct dm_target *ti)
2888{
2889 struct crypt_config *cc = ti->private;
2890
2891 set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
2892}
2893
2894static int crypt_preresume(struct dm_target *ti)
2895{
2896 struct crypt_config *cc = ti->private;
2897
2898 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
2899 DMERR("aborting resume - crypt key is not set.");
2900 return -EAGAIN;
2901 }
2902
2903 return 0;
2904}
2905
2906static void crypt_resume(struct dm_target *ti)
2907{
2908 struct crypt_config *cc = ti->private;
2909
2910 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
2911}
2912
2913
2914
2915
2916
2917static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
2918{
2919 struct crypt_config *cc = ti->private;
2920 int key_size, ret = -EINVAL;
2921
2922 if (argc < 2)
2923 goto error;
2924
2925 if (!strcasecmp(argv[0], "key")) {
2926 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
2927 DMWARN("not suspended during key manipulation.");
2928 return -EINVAL;
2929 }
2930 if (argc == 3 && !strcasecmp(argv[1], "set")) {
2931
2932 key_size = get_key_size(&argv[2]);
2933 if (key_size < 0 || cc->key_size != key_size) {
2934 memset(argv[2], '0', strlen(argv[2]));
2935 return -EINVAL;
2936 }
2937
2938 ret = crypt_set_key(cc, argv[2]);
2939 if (ret)
2940 return ret;
2941 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
2942 ret = cc->iv_gen_ops->init(cc);
2943 return ret;
2944 }
2945 if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
2946 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
2947 ret = cc->iv_gen_ops->wipe(cc);
2948 if (ret)
2949 return ret;
2950 }
2951 return crypt_wipe_key(cc);
2952 }
2953 }
2954
2955error:
2956 DMWARN("unrecognised message received.");
2957 return -EINVAL;
2958}
2959
2960static int crypt_iterate_devices(struct dm_target *ti,
2961 iterate_devices_callout_fn fn, void *data)
2962{
2963 struct crypt_config *cc = ti->private;
2964
2965 return fn(ti, cc->dev, cc->start, ti->len, data);
2966}
2967
2968static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
2969{
2970 struct crypt_config *cc = ti->private;
2971
2972
2973
2974
2975
2976
2977
2978 limits->max_segment_size = PAGE_SIZE;
2979
2980 if (cc->sector_size != (1 << SECTOR_SHIFT)) {
2981 limits->logical_block_size = cc->sector_size;
2982 limits->physical_block_size = cc->sector_size;
2983 blk_limits_io_min(limits, cc->sector_size);
2984 }
2985}
2986
2987static struct target_type crypt_target = {
2988 .name = "crypt",
2989 .version = {1, 17, 0},
2990 .module = THIS_MODULE,
2991 .ctr = crypt_ctr,
2992 .dtr = crypt_dtr,
2993 .map = crypt_map,
2994 .status = crypt_status,
2995 .postsuspend = crypt_postsuspend,
2996 .preresume = crypt_preresume,
2997 .resume = crypt_resume,
2998 .message = crypt_message,
2999 .iterate_devices = crypt_iterate_devices,
3000 .io_hints = crypt_io_hints,
3001};
3002
3003static int __init dm_crypt_init(void)
3004{
3005 int r;
3006
3007 r = dm_register_target(&crypt_target);
3008 if (r < 0)
3009 DMERR("register failed %d", r);
3010
3011 return r;
3012}
3013
3014static void __exit dm_crypt_exit(void)
3015{
3016 dm_unregister_target(&crypt_target);
3017}
3018
3019module_init(dm_crypt_init);
3020module_exit(dm_crypt_exit);
3021
3022MODULE_AUTHOR("Jana Saout <jana@saout.de>");
3023MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
3024MODULE_LICENSE("GPL");
3025