1
2
3
4
5
6
7
8
9
10#include <linux/completion.h>
11#include <linux/err.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/key.h>
16#include <linux/bio.h>
17#include <linux/blkdev.h>
18#include <linux/mempool.h>
19#include <linux/slab.h>
20#include <linux/crypto.h>
21#include <linux/workqueue.h>
22#include <linux/kthread.h>
23#include <linux/backing-dev.h>
24#include <linux/atomic.h>
25#include <linux/scatterlist.h>
26#include <linux/rbtree.h>
27#include <linux/ctype.h>
28#include <asm/page.h>
29#include <asm/unaligned.h>
30#include <crypto/hash.h>
31#include <crypto/md5.h>
32#include <crypto/algapi.h>
33#include <crypto/skcipher.h>
34#include <crypto/aead.h>
35#include <crypto/authenc.h>
36#include <linux/rtnetlink.h>
37#include <keys/user-type.h>
38
39#include <linux/device-mapper.h>
40
41#define DM_MSG_PREFIX "crypt"
42
43
44
45
46struct convert_context {
47 struct completion restart;
48 struct bio *bio_in;
49 struct bio *bio_out;
50 struct bvec_iter iter_in;
51 struct bvec_iter iter_out;
52 sector_t cc_sector;
53 atomic_t cc_pending;
54 union {
55 struct skcipher_request *req;
56 struct aead_request *req_aead;
57 } r;
58
59};
60
61
62
63
64struct dm_crypt_io {
65 struct crypt_config *cc;
66 struct bio *base_bio;
67 u8 *integrity_metadata;
68 bool integrity_metadata_from_pool;
69 struct work_struct work;
70
71 struct convert_context ctx;
72
73 atomic_t io_pending;
74 blk_status_t error;
75 sector_t sector;
76
77 struct rb_node rb_node;
78} CRYPTO_MINALIGN_ATTR;
79
80struct dm_crypt_request {
81 struct convert_context *ctx;
82 struct scatterlist sg_in[4];
83 struct scatterlist sg_out[4];
84 sector_t iv_sector;
85};
86
87struct crypt_config;
88
89struct crypt_iv_operations {
90 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
91 const char *opts);
92 void (*dtr)(struct crypt_config *cc);
93 int (*init)(struct crypt_config *cc);
94 int (*wipe)(struct crypt_config *cc);
95 int (*generator)(struct crypt_config *cc, u8 *iv,
96 struct dm_crypt_request *dmreq);
97 int (*post)(struct crypt_config *cc, u8 *iv,
98 struct dm_crypt_request *dmreq);
99};
100
101struct iv_essiv_private {
102 struct crypto_ahash *hash_tfm;
103 u8 *salt;
104};
105
106struct iv_benbi_private {
107 int shift;
108};
109
110#define LMK_SEED_SIZE 64
111struct iv_lmk_private {
112 struct crypto_shash *hash_tfm;
113 u8 *seed;
114};
115
116#define TCW_WHITENING_SIZE 16
117struct iv_tcw_private {
118 struct crypto_shash *crc32_tfm;
119 u8 *iv_seed;
120 u8 *whitening;
121};
122
123
124
125
126
127enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
128 DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
129
130enum cipher_flags {
131 CRYPT_MODE_INTEGRITY_AEAD,
132 CRYPT_IV_LARGE_SECTORS,
133};
134
135
136
137
138struct crypt_config {
139 struct dm_dev *dev;
140 sector_t start;
141
142
143
144
145
146 mempool_t *req_pool;
147 mempool_t *page_pool;
148 mempool_t *tag_pool;
149 unsigned tag_pool_max_sectors;
150
151 struct bio_set *bs;
152 struct mutex bio_alloc_lock;
153
154 struct workqueue_struct *io_queue;
155 struct workqueue_struct *crypt_queue;
156
157 struct task_struct *write_thread;
158 wait_queue_head_t write_thread_wait;
159 struct rb_root write_tree;
160
161 char *cipher;
162 char *cipher_string;
163 char *cipher_auth;
164 char *key_string;
165
166 const struct crypt_iv_operations *iv_gen_ops;
167 union {
168 struct iv_essiv_private essiv;
169 struct iv_benbi_private benbi;
170 struct iv_lmk_private lmk;
171 struct iv_tcw_private tcw;
172 } iv_gen_private;
173 sector_t iv_offset;
174 unsigned int iv_size;
175 unsigned short int sector_size;
176 unsigned char sector_shift;
177
178
179 void *iv_private;
180 union {
181 struct crypto_skcipher **tfms;
182 struct crypto_aead **tfms_aead;
183 } cipher_tfm;
184 unsigned tfms_count;
185 unsigned long cipher_flags;
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200 unsigned int dmreq_start;
201
202 unsigned int per_bio_data_size;
203
204 unsigned long flags;
205 unsigned int key_size;
206 unsigned int key_parts;
207 unsigned int key_extra_size;
208 unsigned int key_mac_size;
209
210 unsigned int integrity_tag_size;
211 unsigned int integrity_iv_size;
212 unsigned int on_disk_tag_size;
213
214 u8 *authenc_key;
215 u8 key[0];
216};
217
218#define MIN_IOS 64
219#define MAX_TAG_SIZE 480
220#define POOL_ENTRY_SIZE 512
221
222static void clone_init(struct dm_crypt_io *, struct bio *);
223static void kcryptd_queue_crypt(struct dm_crypt_io *io);
224static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
225 struct scatterlist *sg);
226
227
228
229
230static struct crypto_skcipher *any_tfm(struct crypt_config *cc)
231{
232 return cc->cipher_tfm.tfms[0];
233}
234
235static struct crypto_aead *any_tfm_aead(struct crypt_config *cc)
236{
237 return cc->cipher_tfm.tfms_aead[0];
238}
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
291 struct dm_crypt_request *dmreq)
292{
293 memset(iv, 0, cc->iv_size);
294 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
295
296 return 0;
297}
298
299static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
300 struct dm_crypt_request *dmreq)
301{
302 memset(iv, 0, cc->iv_size);
303 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
304
305 return 0;
306}
307
308static int crypt_iv_plain64be_gen(struct crypt_config *cc, u8 *iv,
309 struct dm_crypt_request *dmreq)
310{
311 memset(iv, 0, cc->iv_size);
312
313 *(__be64 *)&iv[cc->iv_size - sizeof(u64)] = cpu_to_be64(dmreq->iv_sector);
314
315 return 0;
316}
317
318
319static int crypt_iv_essiv_init(struct crypt_config *cc)
320{
321 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
322 AHASH_REQUEST_ON_STACK(req, essiv->hash_tfm);
323 struct scatterlist sg;
324 struct crypto_cipher *essiv_tfm;
325 int err;
326
327 sg_init_one(&sg, cc->key, cc->key_size);
328 ahash_request_set_tfm(req, essiv->hash_tfm);
329 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
330 ahash_request_set_crypt(req, &sg, essiv->salt, cc->key_size);
331
332 err = crypto_ahash_digest(req);
333 ahash_request_zero(req);
334 if (err)
335 return err;
336
337 essiv_tfm = cc->iv_private;
338
339 err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
340 crypto_ahash_digestsize(essiv->hash_tfm));
341 if (err)
342 return err;
343
344 return 0;
345}
346
347
348static int crypt_iv_essiv_wipe(struct crypt_config *cc)
349{
350 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
351 unsigned salt_size = crypto_ahash_digestsize(essiv->hash_tfm);
352 struct crypto_cipher *essiv_tfm;
353 int r, err = 0;
354
355 memset(essiv->salt, 0, salt_size);
356
357 essiv_tfm = cc->iv_private;
358 r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
359 if (r)
360 err = r;
361
362 return err;
363}
364
365
366static struct crypto_cipher *alloc_essiv_cipher(struct crypt_config *cc,
367 struct dm_target *ti,
368 const u8 *salt,
369 unsigned int saltsize)
370{
371 struct crypto_cipher *essiv_tfm;
372 int err;
373
374
375 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
376 if (IS_ERR(essiv_tfm)) {
377 ti->error = "Error allocating crypto tfm for ESSIV";
378 return essiv_tfm;
379 }
380
381 if (crypto_cipher_blocksize(essiv_tfm) != cc->iv_size) {
382 ti->error = "Block size of ESSIV cipher does "
383 "not match IV size of block cipher";
384 crypto_free_cipher(essiv_tfm);
385 return ERR_PTR(-EINVAL);
386 }
387
388 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
389 if (err) {
390 ti->error = "Failed to set key for ESSIV cipher";
391 crypto_free_cipher(essiv_tfm);
392 return ERR_PTR(err);
393 }
394
395 return essiv_tfm;
396}
397
398static void crypt_iv_essiv_dtr(struct crypt_config *cc)
399{
400 struct crypto_cipher *essiv_tfm;
401 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
402
403 crypto_free_ahash(essiv->hash_tfm);
404 essiv->hash_tfm = NULL;
405
406 kzfree(essiv->salt);
407 essiv->salt = NULL;
408
409 essiv_tfm = cc->iv_private;
410
411 if (essiv_tfm)
412 crypto_free_cipher(essiv_tfm);
413
414 cc->iv_private = NULL;
415}
416
417static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
418 const char *opts)
419{
420 struct crypto_cipher *essiv_tfm = NULL;
421 struct crypto_ahash *hash_tfm = NULL;
422 u8 *salt = NULL;
423 int err;
424
425 if (!opts) {
426 ti->error = "Digest algorithm missing for ESSIV mode";
427 return -EINVAL;
428 }
429
430
431 hash_tfm = crypto_alloc_ahash(opts, 0, CRYPTO_ALG_ASYNC);
432 if (IS_ERR(hash_tfm)) {
433 ti->error = "Error initializing ESSIV hash";
434 err = PTR_ERR(hash_tfm);
435 goto bad;
436 }
437
438 salt = kzalloc(crypto_ahash_digestsize(hash_tfm), GFP_KERNEL);
439 if (!salt) {
440 ti->error = "Error kmallocing salt storage in ESSIV";
441 err = -ENOMEM;
442 goto bad;
443 }
444
445 cc->iv_gen_private.essiv.salt = salt;
446 cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
447
448 essiv_tfm = alloc_essiv_cipher(cc, ti, salt,
449 crypto_ahash_digestsize(hash_tfm));
450 if (IS_ERR(essiv_tfm)) {
451 crypt_iv_essiv_dtr(cc);
452 return PTR_ERR(essiv_tfm);
453 }
454 cc->iv_private = essiv_tfm;
455
456 return 0;
457
458bad:
459 if (hash_tfm && !IS_ERR(hash_tfm))
460 crypto_free_ahash(hash_tfm);
461 kfree(salt);
462 return err;
463}
464
465static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
466 struct dm_crypt_request *dmreq)
467{
468 struct crypto_cipher *essiv_tfm = cc->iv_private;
469
470 memset(iv, 0, cc->iv_size);
471 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
472 crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
473
474 return 0;
475}
476
477static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
478 const char *opts)
479{
480 unsigned bs = crypto_skcipher_blocksize(any_tfm(cc));
481 int log = ilog2(bs);
482
483
484
485
486 if (1 << log != bs) {
487 ti->error = "cypher blocksize is not a power of 2";
488 return -EINVAL;
489 }
490
491 if (log > 9) {
492 ti->error = "cypher blocksize is > 512";
493 return -EINVAL;
494 }
495
496 cc->iv_gen_private.benbi.shift = 9 - log;
497
498 return 0;
499}
500
501static void crypt_iv_benbi_dtr(struct crypt_config *cc)
502{
503}
504
505static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
506 struct dm_crypt_request *dmreq)
507{
508 __be64 val;
509
510 memset(iv, 0, cc->iv_size - sizeof(u64));
511
512 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
513 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
514
515 return 0;
516}
517
518static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
519 struct dm_crypt_request *dmreq)
520{
521 memset(iv, 0, cc->iv_size);
522
523 return 0;
524}
525
526static void crypt_iv_lmk_dtr(struct crypt_config *cc)
527{
528 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
529
530 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
531 crypto_free_shash(lmk->hash_tfm);
532 lmk->hash_tfm = NULL;
533
534 kzfree(lmk->seed);
535 lmk->seed = NULL;
536}
537
538static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
539 const char *opts)
540{
541 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
542
543 if (cc->sector_size != (1 << SECTOR_SHIFT)) {
544 ti->error = "Unsupported sector size for LMK";
545 return -EINVAL;
546 }
547
548 lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
549 if (IS_ERR(lmk->hash_tfm)) {
550 ti->error = "Error initializing LMK hash";
551 return PTR_ERR(lmk->hash_tfm);
552 }
553
554
555 if (cc->key_parts == cc->tfms_count) {
556 lmk->seed = NULL;
557 return 0;
558 }
559
560 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
561 if (!lmk->seed) {
562 crypt_iv_lmk_dtr(cc);
563 ti->error = "Error kmallocing seed storage in LMK";
564 return -ENOMEM;
565 }
566
567 return 0;
568}
569
570static int crypt_iv_lmk_init(struct crypt_config *cc)
571{
572 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
573 int subkey_size = cc->key_size / cc->key_parts;
574
575
576 if (lmk->seed)
577 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
578 crypto_shash_digestsize(lmk->hash_tfm));
579
580 return 0;
581}
582
583static int crypt_iv_lmk_wipe(struct crypt_config *cc)
584{
585 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
586
587 if (lmk->seed)
588 memset(lmk->seed, 0, LMK_SEED_SIZE);
589
590 return 0;
591}
592
593static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
594 struct dm_crypt_request *dmreq,
595 u8 *data)
596{
597 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
598 SHASH_DESC_ON_STACK(desc, lmk->hash_tfm);
599 struct md5_state md5state;
600 __le32 buf[4];
601 int i, r;
602
603 desc->tfm = lmk->hash_tfm;
604 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
605
606 r = crypto_shash_init(desc);
607 if (r)
608 return r;
609
610 if (lmk->seed) {
611 r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE);
612 if (r)
613 return r;
614 }
615
616
617 r = crypto_shash_update(desc, data + 16, 16 * 31);
618 if (r)
619 return r;
620
621
622 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
623 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
624 buf[2] = cpu_to_le32(4024);
625 buf[3] = 0;
626 r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf));
627 if (r)
628 return r;
629
630
631 r = crypto_shash_export(desc, &md5state);
632 if (r)
633 return r;
634
635 for (i = 0; i < MD5_HASH_WORDS; i++)
636 __cpu_to_le32s(&md5state.hash[i]);
637 memcpy(iv, &md5state.hash, cc->iv_size);
638
639 return 0;
640}
641
642static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
643 struct dm_crypt_request *dmreq)
644{
645 struct scatterlist *sg;
646 u8 *src;
647 int r = 0;
648
649 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
650 sg = crypt_get_sg_data(cc, dmreq->sg_in);
651 src = kmap_atomic(sg_page(sg));
652 r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset);
653 kunmap_atomic(src);
654 } else
655 memset(iv, 0, cc->iv_size);
656
657 return r;
658}
659
660static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
661 struct dm_crypt_request *dmreq)
662{
663 struct scatterlist *sg;
664 u8 *dst;
665 int r;
666
667 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
668 return 0;
669
670 sg = crypt_get_sg_data(cc, dmreq->sg_out);
671 dst = kmap_atomic(sg_page(sg));
672 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset);
673
674
675 if (!r)
676 crypto_xor(dst + sg->offset, iv, cc->iv_size);
677
678 kunmap_atomic(dst);
679 return r;
680}
681
682static void crypt_iv_tcw_dtr(struct crypt_config *cc)
683{
684 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
685
686 kzfree(tcw->iv_seed);
687 tcw->iv_seed = NULL;
688 kzfree(tcw->whitening);
689 tcw->whitening = NULL;
690
691 if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
692 crypto_free_shash(tcw->crc32_tfm);
693 tcw->crc32_tfm = NULL;
694}
695
696static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
697 const char *opts)
698{
699 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
700
701 if (cc->sector_size != (1 << SECTOR_SHIFT)) {
702 ti->error = "Unsupported sector size for TCW";
703 return -EINVAL;
704 }
705
706 if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
707 ti->error = "Wrong key size for TCW";
708 return -EINVAL;
709 }
710
711 tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0);
712 if (IS_ERR(tcw->crc32_tfm)) {
713 ti->error = "Error initializing CRC32 in TCW";
714 return PTR_ERR(tcw->crc32_tfm);
715 }
716
717 tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
718 tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
719 if (!tcw->iv_seed || !tcw->whitening) {
720 crypt_iv_tcw_dtr(cc);
721 ti->error = "Error allocating seed storage in TCW";
722 return -ENOMEM;
723 }
724
725 return 0;
726}
727
728static int crypt_iv_tcw_init(struct crypt_config *cc)
729{
730 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
731 int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
732
733 memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
734 memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
735 TCW_WHITENING_SIZE);
736
737 return 0;
738}
739
740static int crypt_iv_tcw_wipe(struct crypt_config *cc)
741{
742 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
743
744 memset(tcw->iv_seed, 0, cc->iv_size);
745 memset(tcw->whitening, 0, TCW_WHITENING_SIZE);
746
747 return 0;
748}
749
750static int crypt_iv_tcw_whitening(struct crypt_config *cc,
751 struct dm_crypt_request *dmreq,
752 u8 *data)
753{
754 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
755 __le64 sector = cpu_to_le64(dmreq->iv_sector);
756 u8 buf[TCW_WHITENING_SIZE];
757 SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm);
758 int i, r;
759
760
761 crypto_xor_cpy(buf, tcw->whitening, (u8 *)§or, 8);
762 crypto_xor_cpy(&buf[8], tcw->whitening + 8, (u8 *)§or, 8);
763
764
765 desc->tfm = tcw->crc32_tfm;
766 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
767 for (i = 0; i < 4; i++) {
768 r = crypto_shash_init(desc);
769 if (r)
770 goto out;
771 r = crypto_shash_update(desc, &buf[i * 4], 4);
772 if (r)
773 goto out;
774 r = crypto_shash_final(desc, &buf[i * 4]);
775 if (r)
776 goto out;
777 }
778 crypto_xor(&buf[0], &buf[12], 4);
779 crypto_xor(&buf[4], &buf[8], 4);
780
781
782 for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
783 crypto_xor(data + i * 8, buf, 8);
784out:
785 memzero_explicit(buf, sizeof(buf));
786 return r;
787}
788
789static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
790 struct dm_crypt_request *dmreq)
791{
792 struct scatterlist *sg;
793 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
794 __le64 sector = cpu_to_le64(dmreq->iv_sector);
795 u8 *src;
796 int r = 0;
797
798
799 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
800 sg = crypt_get_sg_data(cc, dmreq->sg_in);
801 src = kmap_atomic(sg_page(sg));
802 r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset);
803 kunmap_atomic(src);
804 }
805
806
807 crypto_xor_cpy(iv, tcw->iv_seed, (u8 *)§or, 8);
808 if (cc->iv_size > 8)
809 crypto_xor_cpy(&iv[8], tcw->iv_seed + 8, (u8 *)§or,
810 cc->iv_size - 8);
811
812 return r;
813}
814
815static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
816 struct dm_crypt_request *dmreq)
817{
818 struct scatterlist *sg;
819 u8 *dst;
820 int r;
821
822 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
823 return 0;
824
825
826 sg = crypt_get_sg_data(cc, dmreq->sg_out);
827 dst = kmap_atomic(sg_page(sg));
828 r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset);
829 kunmap_atomic(dst);
830
831 return r;
832}
833
834static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv,
835 struct dm_crypt_request *dmreq)
836{
837
838 get_random_bytes(iv, cc->iv_size);
839 return 0;
840}
841
842static const struct crypt_iv_operations crypt_iv_plain_ops = {
843 .generator = crypt_iv_plain_gen
844};
845
846static const struct crypt_iv_operations crypt_iv_plain64_ops = {
847 .generator = crypt_iv_plain64_gen
848};
849
850static const struct crypt_iv_operations crypt_iv_plain64be_ops = {
851 .generator = crypt_iv_plain64be_gen
852};
853
854static const struct crypt_iv_operations crypt_iv_essiv_ops = {
855 .ctr = crypt_iv_essiv_ctr,
856 .dtr = crypt_iv_essiv_dtr,
857 .init = crypt_iv_essiv_init,
858 .wipe = crypt_iv_essiv_wipe,
859 .generator = crypt_iv_essiv_gen
860};
861
862static const struct crypt_iv_operations crypt_iv_benbi_ops = {
863 .ctr = crypt_iv_benbi_ctr,
864 .dtr = crypt_iv_benbi_dtr,
865 .generator = crypt_iv_benbi_gen
866};
867
868static const struct crypt_iv_operations crypt_iv_null_ops = {
869 .generator = crypt_iv_null_gen
870};
871
872static const struct crypt_iv_operations crypt_iv_lmk_ops = {
873 .ctr = crypt_iv_lmk_ctr,
874 .dtr = crypt_iv_lmk_dtr,
875 .init = crypt_iv_lmk_init,
876 .wipe = crypt_iv_lmk_wipe,
877 .generator = crypt_iv_lmk_gen,
878 .post = crypt_iv_lmk_post
879};
880
881static const struct crypt_iv_operations crypt_iv_tcw_ops = {
882 .ctr = crypt_iv_tcw_ctr,
883 .dtr = crypt_iv_tcw_dtr,
884 .init = crypt_iv_tcw_init,
885 .wipe = crypt_iv_tcw_wipe,
886 .generator = crypt_iv_tcw_gen,
887 .post = crypt_iv_tcw_post
888};
889
890static struct crypt_iv_operations crypt_iv_random_ops = {
891 .generator = crypt_iv_random_gen
892};
893
894
895
896
897static bool crypt_integrity_aead(struct crypt_config *cc)
898{
899 return test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
900}
901
902static bool crypt_integrity_hmac(struct crypt_config *cc)
903{
904 return crypt_integrity_aead(cc) && cc->key_mac_size;
905}
906
907
908static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
909 struct scatterlist *sg)
910{
911 if (unlikely(crypt_integrity_aead(cc)))
912 return &sg[2];
913
914 return sg;
915}
916
917static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
918{
919 struct bio_integrity_payload *bip;
920 unsigned int tag_len;
921 int ret;
922
923 if (!bio_sectors(bio) || !io->cc->on_disk_tag_size)
924 return 0;
925
926 bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
927 if (IS_ERR(bip))
928 return PTR_ERR(bip);
929
930 tag_len = io->cc->on_disk_tag_size * bio_sectors(bio);
931
932 bip->bip_iter.bi_size = tag_len;
933 bip->bip_iter.bi_sector = io->cc->start + io->sector;
934
935 ret = bio_integrity_add_page(bio, virt_to_page(io->integrity_metadata),
936 tag_len, offset_in_page(io->integrity_metadata));
937 if (unlikely(ret != tag_len))
938 return -ENOMEM;
939
940 return 0;
941}
942
943static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
944{
945#ifdef CONFIG_BLK_DEV_INTEGRITY
946 struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk);
947
948
949 if (!bi || strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) {
950 ti->error = "Integrity profile not supported.";
951 return -EINVAL;
952 }
953
954 if (bi->tag_size != cc->on_disk_tag_size ||
955 bi->tuple_size != cc->on_disk_tag_size) {
956 ti->error = "Integrity profile tag size mismatch.";
957 return -EINVAL;
958 }
959 if (1 << bi->interval_exp != cc->sector_size) {
960 ti->error = "Integrity profile sector size mismatch.";
961 return -EINVAL;
962 }
963
964 if (crypt_integrity_aead(cc)) {
965 cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size;
966 DMINFO("Integrity AEAD, tag size %u, IV size %u.",
967 cc->integrity_tag_size, cc->integrity_iv_size);
968
969 if (crypto_aead_setauthsize(any_tfm_aead(cc), cc->integrity_tag_size)) {
970 ti->error = "Integrity AEAD auth tag size is not supported.";
971 return -EINVAL;
972 }
973 } else if (cc->integrity_iv_size)
974 DMINFO("Additional per-sector space %u bytes for IV.",
975 cc->integrity_iv_size);
976
977 if ((cc->integrity_tag_size + cc->integrity_iv_size) != bi->tag_size) {
978 ti->error = "Not enough space for integrity tag in the profile.";
979 return -EINVAL;
980 }
981
982 return 0;
983#else
984 ti->error = "Integrity profile not supported.";
985 return -EINVAL;
986#endif
987}
988
989static void crypt_convert_init(struct crypt_config *cc,
990 struct convert_context *ctx,
991 struct bio *bio_out, struct bio *bio_in,
992 sector_t sector)
993{
994 ctx->bio_in = bio_in;
995 ctx->bio_out = bio_out;
996 if (bio_in)
997 ctx->iter_in = bio_in->bi_iter;
998 if (bio_out)
999 ctx->iter_out = bio_out->bi_iter;
1000 ctx->cc_sector = sector + cc->iv_offset;
1001 init_completion(&ctx->restart);
1002}
1003
1004static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
1005 void *req)
1006{
1007 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
1008}
1009
1010static void *req_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq)
1011{
1012 return (void *)((char *)dmreq - cc->dmreq_start);
1013}
1014
1015static u8 *iv_of_dmreq(struct crypt_config *cc,
1016 struct dm_crypt_request *dmreq)
1017{
1018 if (crypt_integrity_aead(cc))
1019 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
1020 crypto_aead_alignmask(any_tfm_aead(cc)) + 1);
1021 else
1022 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
1023 crypto_skcipher_alignmask(any_tfm(cc)) + 1);
1024}
1025
1026static u8 *org_iv_of_dmreq(struct crypt_config *cc,
1027 struct dm_crypt_request *dmreq)
1028{
1029 return iv_of_dmreq(cc, dmreq) + cc->iv_size;
1030}
1031
1032static uint64_t *org_sector_of_dmreq(struct crypt_config *cc,
1033 struct dm_crypt_request *dmreq)
1034{
1035 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size;
1036 return (uint64_t*) ptr;
1037}
1038
1039static unsigned int *org_tag_of_dmreq(struct crypt_config *cc,
1040 struct dm_crypt_request *dmreq)
1041{
1042 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size +
1043 cc->iv_size + sizeof(uint64_t);
1044 return (unsigned int*)ptr;
1045}
1046
1047static void *tag_from_dmreq(struct crypt_config *cc,
1048 struct dm_crypt_request *dmreq)
1049{
1050 struct convert_context *ctx = dmreq->ctx;
1051 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
1052
1053 return &io->integrity_metadata[*org_tag_of_dmreq(cc, dmreq) *
1054 cc->on_disk_tag_size];
1055}
1056
1057static void *iv_tag_from_dmreq(struct crypt_config *cc,
1058 struct dm_crypt_request *dmreq)
1059{
1060 return tag_from_dmreq(cc, dmreq) + cc->integrity_tag_size;
1061}
1062
1063static int crypt_convert_block_aead(struct crypt_config *cc,
1064 struct convert_context *ctx,
1065 struct aead_request *req,
1066 unsigned int tag_offset)
1067{
1068 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
1069 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
1070 struct dm_crypt_request *dmreq;
1071 u8 *iv, *org_iv, *tag_iv, *tag;
1072 uint64_t *sector;
1073 int r = 0;
1074
1075 BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size);
1076
1077
1078 if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
1079 return -EIO;
1080
1081 dmreq = dmreq_of_req(cc, req);
1082 dmreq->iv_sector = ctx->cc_sector;
1083 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
1084 dmreq->iv_sector >>= cc->sector_shift;
1085 dmreq->ctx = ctx;
1086
1087 *org_tag_of_dmreq(cc, dmreq) = tag_offset;
1088
1089 sector = org_sector_of_dmreq(cc, dmreq);
1090 *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
1091
1092 iv = iv_of_dmreq(cc, dmreq);
1093 org_iv = org_iv_of_dmreq(cc, dmreq);
1094 tag = tag_from_dmreq(cc, dmreq);
1095 tag_iv = iv_tag_from_dmreq(cc, dmreq);
1096
1097
1098
1099
1100
1101
1102 sg_init_table(dmreq->sg_in, 4);
1103 sg_set_buf(&dmreq->sg_in[0], sector, sizeof(uint64_t));
1104 sg_set_buf(&dmreq->sg_in[1], org_iv, cc->iv_size);
1105 sg_set_page(&dmreq->sg_in[2], bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
1106 sg_set_buf(&dmreq->sg_in[3], tag, cc->integrity_tag_size);
1107
1108 sg_init_table(dmreq->sg_out, 4);
1109 sg_set_buf(&dmreq->sg_out[0], sector, sizeof(uint64_t));
1110 sg_set_buf(&dmreq->sg_out[1], org_iv, cc->iv_size);
1111 sg_set_page(&dmreq->sg_out[2], bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
1112 sg_set_buf(&dmreq->sg_out[3], tag, cc->integrity_tag_size);
1113
1114 if (cc->iv_gen_ops) {
1115
1116 if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) {
1117 memcpy(org_iv, tag_iv, cc->iv_size);
1118 } else {
1119 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
1120 if (r < 0)
1121 return r;
1122
1123 if (cc->integrity_iv_size)
1124 memcpy(tag_iv, org_iv, cc->iv_size);
1125 }
1126
1127 memcpy(iv, org_iv, cc->iv_size);
1128 }
1129
1130 aead_request_set_ad(req, sizeof(uint64_t) + cc->iv_size);
1131 if (bio_data_dir(ctx->bio_in) == WRITE) {
1132 aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
1133 cc->sector_size, iv);
1134 r = crypto_aead_encrypt(req);
1135 if (cc->integrity_tag_size + cc->integrity_iv_size != cc->on_disk_tag_size)
1136 memset(tag + cc->integrity_tag_size + cc->integrity_iv_size, 0,
1137 cc->on_disk_tag_size - (cc->integrity_tag_size + cc->integrity_iv_size));
1138 } else {
1139 aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
1140 cc->sector_size + cc->integrity_tag_size, iv);
1141 r = crypto_aead_decrypt(req);
1142 }
1143
1144 if (r == -EBADMSG)
1145 DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu",
1146 (unsigned long long)le64_to_cpu(*sector));
1147
1148 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
1149 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
1150
1151 bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
1152 bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
1153
1154 return r;
1155}
1156
1157static int crypt_convert_block_skcipher(struct crypt_config *cc,
1158 struct convert_context *ctx,
1159 struct skcipher_request *req,
1160 unsigned int tag_offset)
1161{
1162 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
1163 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
1164 struct scatterlist *sg_in, *sg_out;
1165 struct dm_crypt_request *dmreq;
1166 u8 *iv, *org_iv, *tag_iv;
1167 uint64_t *sector;
1168 int r = 0;
1169
1170
1171 if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
1172 return -EIO;
1173
1174 dmreq = dmreq_of_req(cc, req);
1175 dmreq->iv_sector = ctx->cc_sector;
1176 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
1177 dmreq->iv_sector >>= cc->sector_shift;
1178 dmreq->ctx = ctx;
1179
1180 *org_tag_of_dmreq(cc, dmreq) = tag_offset;
1181
1182 iv = iv_of_dmreq(cc, dmreq);
1183 org_iv = org_iv_of_dmreq(cc, dmreq);
1184 tag_iv = iv_tag_from_dmreq(cc, dmreq);
1185
1186 sector = org_sector_of_dmreq(cc, dmreq);
1187 *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
1188
1189
1190 sg_in = &dmreq->sg_in[0];
1191 sg_out = &dmreq->sg_out[0];
1192
1193 sg_init_table(sg_in, 1);
1194 sg_set_page(sg_in, bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
1195
1196 sg_init_table(sg_out, 1);
1197 sg_set_page(sg_out, bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
1198
1199 if (cc->iv_gen_ops) {
1200
1201 if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) {
1202 memcpy(org_iv, tag_iv, cc->integrity_iv_size);
1203 } else {
1204 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
1205 if (r < 0)
1206 return r;
1207
1208 if (cc->integrity_iv_size)
1209 memcpy(tag_iv, org_iv, cc->integrity_iv_size);
1210 }
1211
1212 memcpy(iv, org_iv, cc->iv_size);
1213 }
1214
1215 skcipher_request_set_crypt(req, sg_in, sg_out, cc->sector_size, iv);
1216
1217 if (bio_data_dir(ctx->bio_in) == WRITE)
1218 r = crypto_skcipher_encrypt(req);
1219 else
1220 r = crypto_skcipher_decrypt(req);
1221
1222 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
1223 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
1224
1225 bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
1226 bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
1227
1228 return r;
1229}
1230
1231static void kcryptd_async_done(struct crypto_async_request *async_req,
1232 int error);
1233
1234static void crypt_alloc_req_skcipher(struct crypt_config *cc,
1235 struct convert_context *ctx)
1236{
1237 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
1238
1239 if (!ctx->r.req)
1240 ctx->r.req = mempool_alloc(cc->req_pool, GFP_NOIO);
1241
1242 skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]);
1243
1244
1245
1246
1247
1248 skcipher_request_set_callback(ctx->r.req,
1249 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
1250 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
1251}
1252
1253static void crypt_alloc_req_aead(struct crypt_config *cc,
1254 struct convert_context *ctx)
1255{
1256 if (!ctx->r.req_aead)
1257 ctx->r.req_aead = mempool_alloc(cc->req_pool, GFP_NOIO);
1258
1259 aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]);
1260
1261
1262
1263
1264
1265 aead_request_set_callback(ctx->r.req_aead,
1266 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
1267 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
1268}
1269
1270static void crypt_alloc_req(struct crypt_config *cc,
1271 struct convert_context *ctx)
1272{
1273 if (crypt_integrity_aead(cc))
1274 crypt_alloc_req_aead(cc, ctx);
1275 else
1276 crypt_alloc_req_skcipher(cc, ctx);
1277}
1278
1279static void crypt_free_req_skcipher(struct crypt_config *cc,
1280 struct skcipher_request *req, struct bio *base_bio)
1281{
1282 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
1283
1284 if ((struct skcipher_request *)(io + 1) != req)
1285 mempool_free(req, cc->req_pool);
1286}
1287
1288static void crypt_free_req_aead(struct crypt_config *cc,
1289 struct aead_request *req, struct bio *base_bio)
1290{
1291 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
1292
1293 if ((struct aead_request *)(io + 1) != req)
1294 mempool_free(req, cc->req_pool);
1295}
1296
1297static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_bio)
1298{
1299 if (crypt_integrity_aead(cc))
1300 crypt_free_req_aead(cc, req, base_bio);
1301 else
1302 crypt_free_req_skcipher(cc, req, base_bio);
1303}
1304
1305
1306
1307
1308static blk_status_t crypt_convert(struct crypt_config *cc,
1309 struct convert_context *ctx)
1310{
1311 unsigned int tag_offset = 0;
1312 unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
1313 int r;
1314
1315 atomic_set(&ctx->cc_pending, 1);
1316
1317 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
1318
1319 crypt_alloc_req(cc, ctx);
1320 atomic_inc(&ctx->cc_pending);
1321
1322 if (crypt_integrity_aead(cc))
1323 r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, tag_offset);
1324 else
1325 r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, tag_offset);
1326
1327 switch (r) {
1328
1329
1330
1331
1332 case -EBUSY:
1333 wait_for_completion(&ctx->restart);
1334 reinit_completion(&ctx->restart);
1335
1336
1337
1338
1339
1340 case -EINPROGRESS:
1341 ctx->r.req = NULL;
1342 ctx->cc_sector += sector_step;
1343 tag_offset++;
1344 continue;
1345
1346
1347
1348 case 0:
1349 atomic_dec(&ctx->cc_pending);
1350 ctx->cc_sector += sector_step;
1351 tag_offset++;
1352 cond_resched();
1353 continue;
1354
1355
1356
1357 case -EBADMSG:
1358 atomic_dec(&ctx->cc_pending);
1359 return BLK_STS_PROTECTION;
1360
1361
1362
1363 default:
1364 atomic_dec(&ctx->cc_pending);
1365 return BLK_STS_IOERR;
1366 }
1367 }
1368
1369 return 0;
1370}
1371
1372static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
1392{
1393 struct crypt_config *cc = io->cc;
1394 struct bio *clone;
1395 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1396 gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
1397 unsigned i, len, remaining_size;
1398 struct page *page;
1399
1400retry:
1401 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
1402 mutex_lock(&cc->bio_alloc_lock);
1403
1404 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
1405 if (!clone)
1406 goto out;
1407
1408 clone_init(io, clone);
1409
1410 remaining_size = size;
1411
1412 for (i = 0; i < nr_iovecs; i++) {
1413 page = mempool_alloc(cc->page_pool, gfp_mask);
1414 if (!page) {
1415 crypt_free_buffer_pages(cc, clone);
1416 bio_put(clone);
1417 gfp_mask |= __GFP_DIRECT_RECLAIM;
1418 goto retry;
1419 }
1420
1421 len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
1422
1423 bio_add_page(clone, page, len, 0);
1424
1425 remaining_size -= len;
1426 }
1427
1428
1429 if (dm_crypt_integrity_io_alloc(io, clone)) {
1430 crypt_free_buffer_pages(cc, clone);
1431 bio_put(clone);
1432 clone = NULL;
1433 }
1434out:
1435 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
1436 mutex_unlock(&cc->bio_alloc_lock);
1437
1438 return clone;
1439}
1440
1441static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
1442{
1443 unsigned int i;
1444 struct bio_vec *bv;
1445
1446 bio_for_each_segment_all(bv, clone, i) {
1447 BUG_ON(!bv->bv_page);
1448 mempool_free(bv->bv_page, cc->page_pool);
1449 }
1450}
1451
1452static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
1453 struct bio *bio, sector_t sector)
1454{
1455 io->cc = cc;
1456 io->base_bio = bio;
1457 io->sector = sector;
1458 io->error = 0;
1459 io->ctx.r.req = NULL;
1460 io->integrity_metadata = NULL;
1461 io->integrity_metadata_from_pool = false;
1462 atomic_set(&io->io_pending, 0);
1463}
1464
1465static void crypt_inc_pending(struct dm_crypt_io *io)
1466{
1467 atomic_inc(&io->io_pending);
1468}
1469
1470
1471
1472
1473
1474static void crypt_dec_pending(struct dm_crypt_io *io)
1475{
1476 struct crypt_config *cc = io->cc;
1477 struct bio *base_bio = io->base_bio;
1478 blk_status_t error = io->error;
1479
1480 if (!atomic_dec_and_test(&io->io_pending))
1481 return;
1482
1483 if (io->ctx.r.req)
1484 crypt_free_req(cc, io->ctx.r.req, base_bio);
1485
1486 if (unlikely(io->integrity_metadata_from_pool))
1487 mempool_free(io->integrity_metadata, io->cc->tag_pool);
1488 else
1489 kfree(io->integrity_metadata);
1490
1491 base_bio->bi_status = error;
1492 bio_endio(base_bio);
1493}
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512static void crypt_endio(struct bio *clone)
1513{
1514 struct dm_crypt_io *io = clone->bi_private;
1515 struct crypt_config *cc = io->cc;
1516 unsigned rw = bio_data_dir(clone);
1517 blk_status_t error;
1518
1519
1520
1521
1522 if (rw == WRITE)
1523 crypt_free_buffer_pages(cc, clone);
1524
1525 error = clone->bi_status;
1526 bio_put(clone);
1527
1528 if (rw == READ && !error) {
1529 kcryptd_queue_crypt(io);
1530 return;
1531 }
1532
1533 if (unlikely(error))
1534 io->error = error;
1535
1536 crypt_dec_pending(io);
1537}
1538
1539static void clone_init(struct dm_crypt_io *io, struct bio *clone)
1540{
1541 struct crypt_config *cc = io->cc;
1542
1543 clone->bi_private = io;
1544 clone->bi_end_io = crypt_endio;
1545 bio_set_dev(clone, cc->dev->bdev);
1546 clone->bi_opf = io->base_bio->bi_opf;
1547}
1548
1549static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
1550{
1551 struct crypt_config *cc = io->cc;
1552 struct bio *clone;
1553
1554
1555
1556
1557
1558
1559
1560 clone = bio_clone_fast(io->base_bio, gfp, cc->bs);
1561 if (!clone)
1562 return 1;
1563
1564 crypt_inc_pending(io);
1565
1566 clone_init(io, clone);
1567 clone->bi_iter.bi_sector = cc->start + io->sector;
1568
1569 if (dm_crypt_integrity_io_alloc(io, clone)) {
1570 crypt_dec_pending(io);
1571 bio_put(clone);
1572 return 1;
1573 }
1574
1575 generic_make_request(clone);
1576 return 0;
1577}
1578
1579static void kcryptd_io_read_work(struct work_struct *work)
1580{
1581 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1582
1583 crypt_inc_pending(io);
1584 if (kcryptd_io_read(io, GFP_NOIO))
1585 io->error = BLK_STS_RESOURCE;
1586 crypt_dec_pending(io);
1587}
1588
1589static void kcryptd_queue_read(struct dm_crypt_io *io)
1590{
1591 struct crypt_config *cc = io->cc;
1592
1593 INIT_WORK(&io->work, kcryptd_io_read_work);
1594 queue_work(cc->io_queue, &io->work);
1595}
1596
1597static void kcryptd_io_write(struct dm_crypt_io *io)
1598{
1599 struct bio *clone = io->ctx.bio_out;
1600
1601 generic_make_request(clone);
1602}
1603
1604#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
1605
1606static int dmcrypt_write(void *data)
1607{
1608 struct crypt_config *cc = data;
1609 struct dm_crypt_io *io;
1610
1611 while (1) {
1612 struct rb_root write_tree;
1613 struct blk_plug plug;
1614
1615 DECLARE_WAITQUEUE(wait, current);
1616
1617 spin_lock_irq(&cc->write_thread_wait.lock);
1618continue_locked:
1619
1620 if (!RB_EMPTY_ROOT(&cc->write_tree))
1621 goto pop_from_list;
1622
1623 set_current_state(TASK_INTERRUPTIBLE);
1624 __add_wait_queue(&cc->write_thread_wait, &wait);
1625
1626 spin_unlock_irq(&cc->write_thread_wait.lock);
1627
1628 if (unlikely(kthread_should_stop())) {
1629 set_current_state(TASK_RUNNING);
1630 remove_wait_queue(&cc->write_thread_wait, &wait);
1631 break;
1632 }
1633
1634 schedule();
1635
1636 set_current_state(TASK_RUNNING);
1637 spin_lock_irq(&cc->write_thread_wait.lock);
1638 __remove_wait_queue(&cc->write_thread_wait, &wait);
1639 goto continue_locked;
1640
1641pop_from_list:
1642 write_tree = cc->write_tree;
1643 cc->write_tree = RB_ROOT;
1644 spin_unlock_irq(&cc->write_thread_wait.lock);
1645
1646 BUG_ON(rb_parent(write_tree.rb_node));
1647
1648
1649
1650
1651
1652 blk_start_plug(&plug);
1653 do {
1654 io = crypt_io_from_node(rb_first(&write_tree));
1655 rb_erase(&io->rb_node, &write_tree);
1656 kcryptd_io_write(io);
1657 } while (!RB_EMPTY_ROOT(&write_tree));
1658 blk_finish_plug(&plug);
1659 }
1660 return 0;
1661}
1662
1663static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
1664{
1665 struct bio *clone = io->ctx.bio_out;
1666 struct crypt_config *cc = io->cc;
1667 unsigned long flags;
1668 sector_t sector;
1669 struct rb_node **rbp, *parent;
1670
1671 if (unlikely(io->error)) {
1672 crypt_free_buffer_pages(cc, clone);
1673 bio_put(clone);
1674 crypt_dec_pending(io);
1675 return;
1676 }
1677
1678
1679 BUG_ON(io->ctx.iter_out.bi_size);
1680
1681 clone->bi_iter.bi_sector = cc->start + io->sector;
1682
1683 if (likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) {
1684 generic_make_request(clone);
1685 return;
1686 }
1687
1688 spin_lock_irqsave(&cc->write_thread_wait.lock, flags);
1689 rbp = &cc->write_tree.rb_node;
1690 parent = NULL;
1691 sector = io->sector;
1692 while (*rbp) {
1693 parent = *rbp;
1694 if (sector < crypt_io_from_node(parent)->sector)
1695 rbp = &(*rbp)->rb_left;
1696 else
1697 rbp = &(*rbp)->rb_right;
1698 }
1699 rb_link_node(&io->rb_node, parent, rbp);
1700 rb_insert_color(&io->rb_node, &cc->write_tree);
1701
1702 wake_up_locked(&cc->write_thread_wait);
1703 spin_unlock_irqrestore(&cc->write_thread_wait.lock, flags);
1704}
1705
1706static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1707{
1708 struct crypt_config *cc = io->cc;
1709 struct bio *clone;
1710 int crypt_finished;
1711 sector_t sector = io->sector;
1712 blk_status_t r;
1713
1714
1715
1716
1717 crypt_inc_pending(io);
1718 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
1719
1720 clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
1721 if (unlikely(!clone)) {
1722 io->error = BLK_STS_IOERR;
1723 goto dec;
1724 }
1725
1726 io->ctx.bio_out = clone;
1727 io->ctx.iter_out = clone->bi_iter;
1728
1729 sector += bio_sectors(clone);
1730
1731 crypt_inc_pending(io);
1732 r = crypt_convert(cc, &io->ctx);
1733 if (r)
1734 io->error = r;
1735 crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
1736
1737
1738 if (crypt_finished) {
1739 kcryptd_crypt_write_io_submit(io, 0);
1740 io->sector = sector;
1741 }
1742
1743dec:
1744 crypt_dec_pending(io);
1745}
1746
1747static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
1748{
1749 crypt_dec_pending(io);
1750}
1751
1752static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
1753{
1754 struct crypt_config *cc = io->cc;
1755 blk_status_t r;
1756
1757 crypt_inc_pending(io);
1758
1759 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
1760 io->sector);
1761
1762 r = crypt_convert(cc, &io->ctx);
1763 if (r)
1764 io->error = r;
1765
1766 if (atomic_dec_and_test(&io->ctx.cc_pending))
1767 kcryptd_crypt_read_done(io);
1768
1769 crypt_dec_pending(io);
1770}
1771
1772static void kcryptd_async_done(struct crypto_async_request *async_req,
1773 int error)
1774{
1775 struct dm_crypt_request *dmreq = async_req->data;
1776 struct convert_context *ctx = dmreq->ctx;
1777 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
1778 struct crypt_config *cc = io->cc;
1779
1780
1781
1782
1783
1784
1785 if (error == -EINPROGRESS) {
1786 complete(&ctx->restart);
1787 return;
1788 }
1789
1790 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
1791 error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq);
1792
1793 if (error == -EBADMSG) {
1794 DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu",
1795 (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq)));
1796 io->error = BLK_STS_PROTECTION;
1797 } else if (error < 0)
1798 io->error = BLK_STS_IOERR;
1799
1800 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
1801
1802 if (!atomic_dec_and_test(&ctx->cc_pending))
1803 return;
1804
1805 if (bio_data_dir(io->base_bio) == READ)
1806 kcryptd_crypt_read_done(io);
1807 else
1808 kcryptd_crypt_write_io_submit(io, 1);
1809}
1810
1811static void kcryptd_crypt(struct work_struct *work)
1812{
1813 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1814
1815 if (bio_data_dir(io->base_bio) == READ)
1816 kcryptd_crypt_read_convert(io);
1817 else
1818 kcryptd_crypt_write_convert(io);
1819}
1820
1821static void kcryptd_queue_crypt(struct dm_crypt_io *io)
1822{
1823 struct crypt_config *cc = io->cc;
1824
1825 INIT_WORK(&io->work, kcryptd_crypt);
1826 queue_work(cc->crypt_queue, &io->work);
1827}
1828
1829static void crypt_free_tfms_aead(struct crypt_config *cc)
1830{
1831 if (!cc->cipher_tfm.tfms_aead)
1832 return;
1833
1834 if (cc->cipher_tfm.tfms_aead[0] && !IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
1835 crypto_free_aead(cc->cipher_tfm.tfms_aead[0]);
1836 cc->cipher_tfm.tfms_aead[0] = NULL;
1837 }
1838
1839 kfree(cc->cipher_tfm.tfms_aead);
1840 cc->cipher_tfm.tfms_aead = NULL;
1841}
1842
1843static void crypt_free_tfms_skcipher(struct crypt_config *cc)
1844{
1845 unsigned i;
1846
1847 if (!cc->cipher_tfm.tfms)
1848 return;
1849
1850 for (i = 0; i < cc->tfms_count; i++)
1851 if (cc->cipher_tfm.tfms[i] && !IS_ERR(cc->cipher_tfm.tfms[i])) {
1852 crypto_free_skcipher(cc->cipher_tfm.tfms[i]);
1853 cc->cipher_tfm.tfms[i] = NULL;
1854 }
1855
1856 kfree(cc->cipher_tfm.tfms);
1857 cc->cipher_tfm.tfms = NULL;
1858}
1859
1860static void crypt_free_tfms(struct crypt_config *cc)
1861{
1862 if (crypt_integrity_aead(cc))
1863 crypt_free_tfms_aead(cc);
1864 else
1865 crypt_free_tfms_skcipher(cc);
1866}
1867
1868static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode)
1869{
1870 unsigned i;
1871 int err;
1872
1873 cc->cipher_tfm.tfms = kzalloc(cc->tfms_count *
1874 sizeof(struct crypto_skcipher *), GFP_KERNEL);
1875 if (!cc->cipher_tfm.tfms)
1876 return -ENOMEM;
1877
1878 for (i = 0; i < cc->tfms_count; i++) {
1879 cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0, 0);
1880 if (IS_ERR(cc->cipher_tfm.tfms[i])) {
1881 err = PTR_ERR(cc->cipher_tfm.tfms[i]);
1882 crypt_free_tfms(cc);
1883 return err;
1884 }
1885 }
1886
1887 return 0;
1888}
1889
1890static int crypt_alloc_tfms_aead(struct crypt_config *cc, char *ciphermode)
1891{
1892 int err;
1893
1894 cc->cipher_tfm.tfms = kmalloc(sizeof(struct crypto_aead *), GFP_KERNEL);
1895 if (!cc->cipher_tfm.tfms)
1896 return -ENOMEM;
1897
1898 cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0, 0);
1899 if (IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
1900 err = PTR_ERR(cc->cipher_tfm.tfms_aead[0]);
1901 crypt_free_tfms(cc);
1902 return err;
1903 }
1904
1905 return 0;
1906}
1907
1908static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
1909{
1910 if (crypt_integrity_aead(cc))
1911 return crypt_alloc_tfms_aead(cc, ciphermode);
1912 else
1913 return crypt_alloc_tfms_skcipher(cc, ciphermode);
1914}
1915
1916static unsigned crypt_subkey_size(struct crypt_config *cc)
1917{
1918 return (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
1919}
1920
1921static unsigned crypt_authenckey_size(struct crypt_config *cc)
1922{
1923 return crypt_subkey_size(cc) + RTA_SPACE(sizeof(struct crypto_authenc_key_param));
1924}
1925
1926
1927
1928
1929
1930
1931static void crypt_copy_authenckey(char *p, const void *key,
1932 unsigned enckeylen, unsigned authkeylen)
1933{
1934 struct crypto_authenc_key_param *param;
1935 struct rtattr *rta;
1936
1937 rta = (struct rtattr *)p;
1938 param = RTA_DATA(rta);
1939 param->enckeylen = cpu_to_be32(enckeylen);
1940 rta->rta_len = RTA_LENGTH(sizeof(*param));
1941 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
1942 p += RTA_SPACE(sizeof(*param));
1943 memcpy(p, key + enckeylen, authkeylen);
1944 p += authkeylen;
1945 memcpy(p, key, enckeylen);
1946}
1947
1948static int crypt_setkey(struct crypt_config *cc)
1949{
1950 unsigned subkey_size;
1951 int err = 0, i, r;
1952
1953
1954 subkey_size = crypt_subkey_size(cc);
1955
1956 if (crypt_integrity_hmac(cc)) {
1957 if (subkey_size < cc->key_mac_size)
1958 return -EINVAL;
1959
1960 crypt_copy_authenckey(cc->authenc_key, cc->key,
1961 subkey_size - cc->key_mac_size,
1962 cc->key_mac_size);
1963 }
1964
1965 for (i = 0; i < cc->tfms_count; i++) {
1966 if (crypt_integrity_hmac(cc))
1967 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
1968 cc->authenc_key, crypt_authenckey_size(cc));
1969 else if (crypt_integrity_aead(cc))
1970 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
1971 cc->key + (i * subkey_size),
1972 subkey_size);
1973 else
1974 r = crypto_skcipher_setkey(cc->cipher_tfm.tfms[i],
1975 cc->key + (i * subkey_size),
1976 subkey_size);
1977 if (r)
1978 err = r;
1979 }
1980
1981 if (crypt_integrity_hmac(cc))
1982 memzero_explicit(cc->authenc_key, crypt_authenckey_size(cc));
1983
1984 return err;
1985}
1986
1987#ifdef CONFIG_KEYS
1988
1989static bool contains_whitespace(const char *str)
1990{
1991 while (*str)
1992 if (isspace(*str++))
1993 return true;
1994 return false;
1995}
1996
1997static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
1998{
1999 char *new_key_string, *key_desc;
2000 int ret;
2001 struct key *key;
2002 const struct user_key_payload *ukp;
2003
2004
2005
2006
2007
2008 if (contains_whitespace(key_string)) {
2009 DMERR("whitespace chars not allowed in key string");
2010 return -EINVAL;
2011 }
2012
2013
2014 key_desc = strpbrk(key_string, ":");
2015 if (!key_desc || key_desc == key_string || !strlen(key_desc + 1))
2016 return -EINVAL;
2017
2018 if (strncmp(key_string, "logon:", key_desc - key_string + 1) &&
2019 strncmp(key_string, "user:", key_desc - key_string + 1))
2020 return -EINVAL;
2021
2022 new_key_string = kstrdup(key_string, GFP_KERNEL);
2023 if (!new_key_string)
2024 return -ENOMEM;
2025
2026 key = request_key(key_string[0] == 'l' ? &key_type_logon : &key_type_user,
2027 key_desc + 1, NULL);
2028 if (IS_ERR(key)) {
2029 kzfree(new_key_string);
2030 return PTR_ERR(key);
2031 }
2032
2033 down_read(&key->sem);
2034
2035 ukp = user_key_payload_locked(key);
2036 if (!ukp) {
2037 up_read(&key->sem);
2038 key_put(key);
2039 kzfree(new_key_string);
2040 return -EKEYREVOKED;
2041 }
2042
2043 if (cc->key_size != ukp->datalen) {
2044 up_read(&key->sem);
2045 key_put(key);
2046 kzfree(new_key_string);
2047 return -EINVAL;
2048 }
2049
2050 memcpy(cc->key, ukp->data, cc->key_size);
2051
2052 up_read(&key->sem);
2053 key_put(key);
2054
2055
2056 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2057
2058 ret = crypt_setkey(cc);
2059
2060 if (!ret) {
2061 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2062 kzfree(cc->key_string);
2063 cc->key_string = new_key_string;
2064 } else
2065 kzfree(new_key_string);
2066
2067 return ret;
2068}
2069
2070static int get_key_size(char **key_string)
2071{
2072 char *colon, dummy;
2073 int ret;
2074
2075 if (*key_string[0] != ':')
2076 return strlen(*key_string) >> 1;
2077
2078
2079 colon = strpbrk(*key_string + 1, ":");
2080 if (!colon)
2081 return -EINVAL;
2082
2083 if (sscanf(*key_string + 1, "%u%c", &ret, &dummy) != 2 || dummy != ':')
2084 return -EINVAL;
2085
2086 *key_string = colon;
2087
2088
2089
2090 return ret;
2091}
2092
2093#else
2094
2095static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
2096{
2097 return -EINVAL;
2098}
2099
2100static int get_key_size(char **key_string)
2101{
2102 return (*key_string[0] == ':') ? -EINVAL : strlen(*key_string) >> 1;
2103}
2104
2105#endif
2106
2107static int crypt_set_key(struct crypt_config *cc, char *key)
2108{
2109 int r = -EINVAL;
2110 int key_string_len = strlen(key);
2111
2112
2113 if (!cc->key_size && strcmp(key, "-"))
2114 goto out;
2115
2116
2117 if (key[0] == ':') {
2118 r = crypt_set_keyring_key(cc, key + 1);
2119 goto out;
2120 }
2121
2122
2123 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2124
2125
2126 kzfree(cc->key_string);
2127 cc->key_string = NULL;
2128
2129
2130 if (cc->key_size && hex2bin(cc->key, key, cc->key_size) < 0)
2131 goto out;
2132
2133 r = crypt_setkey(cc);
2134 if (!r)
2135 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2136
2137out:
2138
2139 memset(key, '0', key_string_len);
2140
2141 return r;
2142}
2143
2144static int crypt_wipe_key(struct crypt_config *cc)
2145{
2146 int r;
2147
2148 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2149 get_random_bytes(&cc->key, cc->key_size);
2150 kzfree(cc->key_string);
2151 cc->key_string = NULL;
2152 r = crypt_setkey(cc);
2153 memset(&cc->key, 0, cc->key_size * sizeof(u8));
2154
2155 return r;
2156}
2157
2158static void crypt_dtr(struct dm_target *ti)
2159{
2160 struct crypt_config *cc = ti->private;
2161
2162 ti->private = NULL;
2163
2164 if (!cc)
2165 return;
2166
2167 if (cc->write_thread)
2168 kthread_stop(cc->write_thread);
2169
2170 if (cc->io_queue)
2171 destroy_workqueue(cc->io_queue);
2172 if (cc->crypt_queue)
2173 destroy_workqueue(cc->crypt_queue);
2174
2175 crypt_free_tfms(cc);
2176
2177 if (cc->bs)
2178 bioset_free(cc->bs);
2179
2180 mempool_destroy(cc->page_pool);
2181 mempool_destroy(cc->req_pool);
2182 mempool_destroy(cc->tag_pool);
2183
2184 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
2185 cc->iv_gen_ops->dtr(cc);
2186
2187 if (cc->dev)
2188 dm_put_device(ti, cc->dev);
2189
2190 kzfree(cc->cipher);
2191 kzfree(cc->cipher_string);
2192 kzfree(cc->key_string);
2193 kzfree(cc->cipher_auth);
2194 kzfree(cc->authenc_key);
2195
2196 mutex_destroy(&cc->bio_alloc_lock);
2197
2198
2199 kzfree(cc);
2200}
2201
2202static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode)
2203{
2204 struct crypt_config *cc = ti->private;
2205
2206 if (crypt_integrity_aead(cc))
2207 cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
2208 else
2209 cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
2210
2211 if (cc->iv_size)
2212
2213 cc->iv_size = max(cc->iv_size,
2214 (unsigned int)(sizeof(u64) / sizeof(u8)));
2215 else if (ivmode) {
2216 DMWARN("Selected cipher does not support IVs");
2217 ivmode = NULL;
2218 }
2219
2220
2221 if (ivmode == NULL)
2222 cc->iv_gen_ops = NULL;
2223 else if (strcmp(ivmode, "plain") == 0)
2224 cc->iv_gen_ops = &crypt_iv_plain_ops;
2225 else if (strcmp(ivmode, "plain64") == 0)
2226 cc->iv_gen_ops = &crypt_iv_plain64_ops;
2227 else if (strcmp(ivmode, "plain64be") == 0)
2228 cc->iv_gen_ops = &crypt_iv_plain64be_ops;
2229 else if (strcmp(ivmode, "essiv") == 0)
2230 cc->iv_gen_ops = &crypt_iv_essiv_ops;
2231 else if (strcmp(ivmode, "benbi") == 0)
2232 cc->iv_gen_ops = &crypt_iv_benbi_ops;
2233 else if (strcmp(ivmode, "null") == 0)
2234 cc->iv_gen_ops = &crypt_iv_null_ops;
2235 else if (strcmp(ivmode, "lmk") == 0) {
2236 cc->iv_gen_ops = &crypt_iv_lmk_ops;
2237
2238
2239
2240
2241
2242
2243 if (cc->key_size % cc->key_parts) {
2244 cc->key_parts++;
2245 cc->key_extra_size = cc->key_size / cc->key_parts;
2246 }
2247 } else if (strcmp(ivmode, "tcw") == 0) {
2248 cc->iv_gen_ops = &crypt_iv_tcw_ops;
2249 cc->key_parts += 2;
2250 cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
2251 } else if (strcmp(ivmode, "random") == 0) {
2252 cc->iv_gen_ops = &crypt_iv_random_ops;
2253
2254 cc->integrity_iv_size = cc->iv_size;
2255 } else {
2256 ti->error = "Invalid IV mode";
2257 return -EINVAL;
2258 }
2259
2260 return 0;
2261}
2262
2263
2264
2265
2266
2267
2268static int crypt_ctr_blkdev_cipher(struct crypt_config *cc)
2269{
2270 const char *alg_name = NULL;
2271 char *start, *end;
2272
2273 if (crypt_integrity_aead(cc)) {
2274 alg_name = crypto_tfm_alg_name(crypto_aead_tfm(any_tfm_aead(cc)));
2275 if (!alg_name)
2276 return -EINVAL;
2277 if (crypt_integrity_hmac(cc)) {
2278 alg_name = strchr(alg_name, ',');
2279 if (!alg_name)
2280 return -EINVAL;
2281 }
2282 alg_name++;
2283 } else {
2284 alg_name = crypto_tfm_alg_name(crypto_skcipher_tfm(any_tfm(cc)));
2285 if (!alg_name)
2286 return -EINVAL;
2287 }
2288
2289 start = strchr(alg_name, '(');
2290 end = strchr(alg_name, ')');
2291
2292 if (!start && !end) {
2293 cc->cipher = kstrdup(alg_name, GFP_KERNEL);
2294 return cc->cipher ? 0 : -ENOMEM;
2295 }
2296
2297 if (!start || !end || ++start >= end)
2298 return -EINVAL;
2299
2300 cc->cipher = kzalloc(end - start + 1, GFP_KERNEL);
2301 if (!cc->cipher)
2302 return -ENOMEM;
2303
2304 strncpy(cc->cipher, start, end - start);
2305
2306 return 0;
2307}
2308
2309
2310
2311
2312
2313
2314static int crypt_ctr_auth_cipher(struct crypt_config *cc, char *cipher_api)
2315{
2316 char *start, *end, *mac_alg = NULL;
2317 struct crypto_ahash *mac;
2318
2319 if (!strstarts(cipher_api, "authenc("))
2320 return 0;
2321
2322 start = strchr(cipher_api, '(');
2323 end = strchr(cipher_api, ',');
2324 if (!start || !end || ++start > end)
2325 return -EINVAL;
2326
2327 mac_alg = kzalloc(end - start + 1, GFP_KERNEL);
2328 if (!mac_alg)
2329 return -ENOMEM;
2330 strncpy(mac_alg, start, end - start);
2331
2332 mac = crypto_alloc_ahash(mac_alg, 0, 0);
2333 kfree(mac_alg);
2334
2335 if (IS_ERR(mac))
2336 return PTR_ERR(mac);
2337
2338 cc->key_mac_size = crypto_ahash_digestsize(mac);
2339 crypto_free_ahash(mac);
2340
2341 cc->authenc_key = kmalloc(crypt_authenckey_size(cc), GFP_KERNEL);
2342 if (!cc->authenc_key)
2343 return -ENOMEM;
2344
2345 return 0;
2346}
2347
2348static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key,
2349 char **ivmode, char **ivopts)
2350{
2351 struct crypt_config *cc = ti->private;
2352 char *tmp, *cipher_api;
2353 int ret = -EINVAL;
2354
2355 cc->tfms_count = 1;
2356
2357
2358
2359
2360
2361 tmp = &cipher_in[strlen("capi:")];
2362 cipher_api = strsep(&tmp, "-");
2363 *ivmode = strsep(&tmp, ":");
2364 *ivopts = tmp;
2365
2366 if (*ivmode && !strcmp(*ivmode, "lmk"))
2367 cc->tfms_count = 64;
2368
2369 cc->key_parts = cc->tfms_count;
2370
2371
2372 ret = crypt_alloc_tfms(cc, cipher_api);
2373 if (ret < 0) {
2374 ti->error = "Error allocating crypto tfm";
2375 return ret;
2376 }
2377
2378
2379 if (crypt_integrity_aead(cc)) {
2380 ret = crypt_ctr_auth_cipher(cc, cipher_api);
2381 if (ret < 0) {
2382 ti->error = "Invalid AEAD cipher spec";
2383 return -ENOMEM;
2384 }
2385 cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
2386 } else
2387 cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
2388
2389 ret = crypt_ctr_blkdev_cipher(cc);
2390 if (ret < 0) {
2391 ti->error = "Cannot allocate cipher string";
2392 return -ENOMEM;
2393 }
2394
2395 return 0;
2396}
2397
2398static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key,
2399 char **ivmode, char **ivopts)
2400{
2401 struct crypt_config *cc = ti->private;
2402 char *tmp, *cipher, *chainmode, *keycount;
2403 char *cipher_api = NULL;
2404 int ret = -EINVAL;
2405 char dummy;
2406
2407 if (strchr(cipher_in, '(') || crypt_integrity_aead(cc)) {
2408 ti->error = "Bad cipher specification";
2409 return -EINVAL;
2410 }
2411
2412
2413
2414
2415
2416 tmp = cipher_in;
2417 keycount = strsep(&tmp, "-");
2418 cipher = strsep(&keycount, ":");
2419
2420 if (!keycount)
2421 cc->tfms_count = 1;
2422 else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 ||
2423 !is_power_of_2(cc->tfms_count)) {
2424 ti->error = "Bad cipher key count specification";
2425 return -EINVAL;
2426 }
2427 cc->key_parts = cc->tfms_count;
2428
2429 cc->cipher = kstrdup(cipher, GFP_KERNEL);
2430 if (!cc->cipher)
2431 goto bad_mem;
2432
2433 chainmode = strsep(&tmp, "-");
2434 *ivopts = strsep(&tmp, "-");
2435 *ivmode = strsep(&*ivopts, ":");
2436
2437 if (tmp)
2438 DMWARN("Ignoring unexpected additional cipher options");
2439
2440
2441
2442
2443
2444 if (!chainmode || (!strcmp(chainmode, "plain") && !*ivmode)) {
2445 chainmode = "cbc";
2446 *ivmode = "plain";
2447 }
2448
2449 if (strcmp(chainmode, "ecb") && !*ivmode) {
2450 ti->error = "IV mechanism required";
2451 return -EINVAL;
2452 }
2453
2454 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
2455 if (!cipher_api)
2456 goto bad_mem;
2457
2458 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
2459 "%s(%s)", chainmode, cipher);
2460 if (ret < 0) {
2461 kfree(cipher_api);
2462 goto bad_mem;
2463 }
2464
2465
2466 ret = crypt_alloc_tfms(cc, cipher_api);
2467 if (ret < 0) {
2468 ti->error = "Error allocating crypto tfm";
2469 kfree(cipher_api);
2470 return ret;
2471 }
2472 kfree(cipher_api);
2473
2474 return 0;
2475bad_mem:
2476 ti->error = "Cannot allocate cipher strings";
2477 return -ENOMEM;
2478}
2479
2480static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key)
2481{
2482 struct crypt_config *cc = ti->private;
2483 char *ivmode = NULL, *ivopts = NULL;
2484 int ret;
2485
2486 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
2487 if (!cc->cipher_string) {
2488 ti->error = "Cannot allocate cipher strings";
2489 return -ENOMEM;
2490 }
2491
2492 if (strstarts(cipher_in, "capi:"))
2493 ret = crypt_ctr_cipher_new(ti, cipher_in, key, &ivmode, &ivopts);
2494 else
2495 ret = crypt_ctr_cipher_old(ti, cipher_in, key, &ivmode, &ivopts);
2496 if (ret)
2497 return ret;
2498
2499
2500 ret = crypt_ctr_ivmode(ti, ivmode);
2501 if (ret < 0)
2502 return ret;
2503
2504
2505 ret = crypt_set_key(cc, key);
2506 if (ret < 0) {
2507 ti->error = "Error decoding and setting key";
2508 return ret;
2509 }
2510
2511
2512 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
2513 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
2514 if (ret < 0) {
2515 ti->error = "Error creating IV";
2516 return ret;
2517 }
2518 }
2519
2520
2521 if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
2522 ret = cc->iv_gen_ops->init(cc);
2523 if (ret < 0) {
2524 ti->error = "Error initialising IV";
2525 return ret;
2526 }
2527 }
2528
2529
2530 if (cc->key_string)
2531 memset(cc->key, 0, cc->key_size * sizeof(u8));
2532
2533 return ret;
2534}
2535
2536static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **argv)
2537{
2538 struct crypt_config *cc = ti->private;
2539 struct dm_arg_set as;
2540 static const struct dm_arg _args[] = {
2541 {0, 6, "Invalid number of feature args"},
2542 };
2543 unsigned int opt_params, val;
2544 const char *opt_string, *sval;
2545 char dummy;
2546 int ret;
2547
2548
2549 as.argc = argc;
2550 as.argv = argv;
2551
2552 ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
2553 if (ret)
2554 return ret;
2555
2556 while (opt_params--) {
2557 opt_string = dm_shift_arg(&as);
2558 if (!opt_string) {
2559 ti->error = "Not enough feature arguments";
2560 return -EINVAL;
2561 }
2562
2563 if (!strcasecmp(opt_string, "allow_discards"))
2564 ti->num_discard_bios = 1;
2565
2566 else if (!strcasecmp(opt_string, "same_cpu_crypt"))
2567 set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
2568
2569 else if (!strcasecmp(opt_string, "submit_from_crypt_cpus"))
2570 set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
2571 else if (sscanf(opt_string, "integrity:%u:", &val) == 1) {
2572 if (val == 0 || val > MAX_TAG_SIZE) {
2573 ti->error = "Invalid integrity arguments";
2574 return -EINVAL;
2575 }
2576 cc->on_disk_tag_size = val;
2577 sval = strchr(opt_string + strlen("integrity:"), ':') + 1;
2578 if (!strcasecmp(sval, "aead")) {
2579 set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
2580 } else if (strcasecmp(sval, "none")) {
2581 ti->error = "Unknown integrity profile";
2582 return -EINVAL;
2583 }
2584
2585 cc->cipher_auth = kstrdup(sval, GFP_KERNEL);
2586 if (!cc->cipher_auth)
2587 return -ENOMEM;
2588 } else if (sscanf(opt_string, "sector_size:%hu%c", &cc->sector_size, &dummy) == 1) {
2589 if (cc->sector_size < (1 << SECTOR_SHIFT) ||
2590 cc->sector_size > 4096 ||
2591 (cc->sector_size & (cc->sector_size - 1))) {
2592 ti->error = "Invalid feature value for sector_size";
2593 return -EINVAL;
2594 }
2595 if (ti->len & ((cc->sector_size >> SECTOR_SHIFT) - 1)) {
2596 ti->error = "Device size is not multiple of sector_size feature";
2597 return -EINVAL;
2598 }
2599 cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
2600 } else if (!strcasecmp(opt_string, "iv_large_sectors"))
2601 set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
2602 else {
2603 ti->error = "Invalid feature arguments";
2604 return -EINVAL;
2605 }
2606 }
2607
2608 return 0;
2609}
2610
2611
2612
2613
2614
2615static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2616{
2617 struct crypt_config *cc;
2618 int key_size;
2619 unsigned int align_mask;
2620 unsigned long long tmpll;
2621 int ret;
2622 size_t iv_size_padding, additional_req_size;
2623 char dummy;
2624
2625 if (argc < 5) {
2626 ti->error = "Not enough arguments";
2627 return -EINVAL;
2628 }
2629
2630 key_size = get_key_size(&argv[1]);
2631 if (key_size < 0) {
2632 ti->error = "Cannot parse key size";
2633 return -EINVAL;
2634 }
2635
2636 cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
2637 if (!cc) {
2638 ti->error = "Cannot allocate encryption context";
2639 return -ENOMEM;
2640 }
2641 cc->key_size = key_size;
2642 cc->sector_size = (1 << SECTOR_SHIFT);
2643 cc->sector_shift = 0;
2644
2645 ti->private = cc;
2646
2647
2648 if (argc > 5) {
2649 ret = crypt_ctr_optional(ti, argc - 5, &argv[5]);
2650 if (ret)
2651 goto bad;
2652 }
2653
2654 ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
2655 if (ret < 0)
2656 goto bad;
2657
2658 if (crypt_integrity_aead(cc)) {
2659 cc->dmreq_start = sizeof(struct aead_request);
2660 cc->dmreq_start += crypto_aead_reqsize(any_tfm_aead(cc));
2661 align_mask = crypto_aead_alignmask(any_tfm_aead(cc));
2662 } else {
2663 cc->dmreq_start = sizeof(struct skcipher_request);
2664 cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc));
2665 align_mask = crypto_skcipher_alignmask(any_tfm(cc));
2666 }
2667 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
2668
2669 if (align_mask < CRYPTO_MINALIGN) {
2670
2671 iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
2672 & align_mask;
2673 } else {
2674
2675
2676
2677
2678
2679 iv_size_padding = align_mask;
2680 }
2681
2682 ret = -ENOMEM;
2683
2684
2685 additional_req_size = sizeof(struct dm_crypt_request) +
2686 iv_size_padding + cc->iv_size +
2687 cc->iv_size +
2688 sizeof(uint64_t) +
2689 sizeof(unsigned int);
2690
2691 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + additional_req_size);
2692 if (!cc->req_pool) {
2693 ti->error = "Cannot allocate crypt request mempool";
2694 goto bad;
2695 }
2696
2697 cc->per_bio_data_size = ti->per_io_data_size =
2698 ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size,
2699 ARCH_KMALLOC_MINALIGN);
2700
2701 cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0);
2702 if (!cc->page_pool) {
2703 ti->error = "Cannot allocate page mempool";
2704 goto bad;
2705 }
2706
2707 cc->bs = bioset_create(MIN_IOS, 0, BIOSET_NEED_BVECS);
2708 if (!cc->bs) {
2709 ti->error = "Cannot allocate crypt bioset";
2710 goto bad;
2711 }
2712
2713 mutex_init(&cc->bio_alloc_lock);
2714
2715 ret = -EINVAL;
2716 if ((sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) ||
2717 (tmpll & ((cc->sector_size >> SECTOR_SHIFT) - 1))) {
2718 ti->error = "Invalid iv_offset sector";
2719 goto bad;
2720 }
2721 cc->iv_offset = tmpll;
2722
2723 ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev);
2724 if (ret) {
2725 ti->error = "Device lookup failed";
2726 goto bad;
2727 }
2728
2729 ret = -EINVAL;
2730 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
2731 ti->error = "Invalid device sector";
2732 goto bad;
2733 }
2734 cc->start = tmpll;
2735
2736 if (crypt_integrity_aead(cc) || cc->integrity_iv_size) {
2737 ret = crypt_integrity_ctr(cc, ti);
2738 if (ret)
2739 goto bad;
2740
2741 cc->tag_pool_max_sectors = POOL_ENTRY_SIZE / cc->on_disk_tag_size;
2742 if (!cc->tag_pool_max_sectors)
2743 cc->tag_pool_max_sectors = 1;
2744
2745 cc->tag_pool = mempool_create_kmalloc_pool(MIN_IOS,
2746 cc->tag_pool_max_sectors * cc->on_disk_tag_size);
2747 if (!cc->tag_pool) {
2748 ti->error = "Cannot allocate integrity tags mempool";
2749 ret = -ENOMEM;
2750 goto bad;
2751 }
2752
2753 cc->tag_pool_max_sectors <<= cc->sector_shift;
2754 }
2755
2756 ret = -ENOMEM;
2757 cc->io_queue = alloc_workqueue("kcryptd_io", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
2758 if (!cc->io_queue) {
2759 ti->error = "Couldn't create kcryptd io queue";
2760 goto bad;
2761 }
2762
2763 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
2764 cc->crypt_queue = alloc_workqueue("kcryptd", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
2765 else
2766 cc->crypt_queue = alloc_workqueue("kcryptd",
2767 WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
2768 num_online_cpus());
2769 if (!cc->crypt_queue) {
2770 ti->error = "Couldn't create kcryptd queue";
2771 goto bad;
2772 }
2773
2774 init_waitqueue_head(&cc->write_thread_wait);
2775 cc->write_tree = RB_ROOT;
2776
2777 cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write");
2778 if (IS_ERR(cc->write_thread)) {
2779 ret = PTR_ERR(cc->write_thread);
2780 cc->write_thread = NULL;
2781 ti->error = "Couldn't spawn write thread";
2782 goto bad;
2783 }
2784 wake_up_process(cc->write_thread);
2785
2786 ti->num_flush_bios = 1;
2787
2788 return 0;
2789
2790bad:
2791 crypt_dtr(ti);
2792 return ret;
2793}
2794
2795static int crypt_map(struct dm_target *ti, struct bio *bio)
2796{
2797 struct dm_crypt_io *io;
2798 struct crypt_config *cc = ti->private;
2799
2800
2801
2802
2803
2804
2805 if (unlikely(bio->bi_opf & REQ_PREFLUSH ||
2806 bio_op(bio) == REQ_OP_DISCARD)) {
2807 bio_set_dev(bio, cc->dev->bdev);
2808 if (bio_sectors(bio))
2809 bio->bi_iter.bi_sector = cc->start +
2810 dm_target_offset(ti, bio->bi_iter.bi_sector);
2811 return DM_MAPIO_REMAPPED;
2812 }
2813
2814
2815
2816
2817 if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_PAGES << PAGE_SHIFT)) &&
2818 (bio_data_dir(bio) == WRITE || cc->on_disk_tag_size))
2819 dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT));
2820
2821
2822
2823
2824
2825 if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0))
2826 return DM_MAPIO_KILL;
2827
2828 if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1)))
2829 return DM_MAPIO_KILL;
2830
2831 io = dm_per_bio_data(bio, cc->per_bio_data_size);
2832 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
2833
2834 if (cc->on_disk_tag_size) {
2835 unsigned tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift);
2836
2837 if (unlikely(tag_len > KMALLOC_MAX_SIZE) ||
2838 unlikely(!(io->integrity_metadata = kmalloc(tag_len,
2839 GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
2840 if (bio_sectors(bio) > cc->tag_pool_max_sectors)
2841 dm_accept_partial_bio(bio, cc->tag_pool_max_sectors);
2842 io->integrity_metadata = mempool_alloc(cc->tag_pool, GFP_NOIO);
2843 io->integrity_metadata_from_pool = true;
2844 }
2845 }
2846
2847 if (crypt_integrity_aead(cc))
2848 io->ctx.r.req_aead = (struct aead_request *)(io + 1);
2849 else
2850 io->ctx.r.req = (struct skcipher_request *)(io + 1);
2851
2852 if (bio_data_dir(io->base_bio) == READ) {
2853 if (kcryptd_io_read(io, GFP_NOWAIT))
2854 kcryptd_queue_read(io);
2855 } else
2856 kcryptd_queue_crypt(io);
2857
2858 return DM_MAPIO_SUBMITTED;
2859}
2860
2861static void crypt_status(struct dm_target *ti, status_type_t type,
2862 unsigned status_flags, char *result, unsigned maxlen)
2863{
2864 struct crypt_config *cc = ti->private;
2865 unsigned i, sz = 0;
2866 int num_feature_args = 0;
2867
2868 switch (type) {
2869 case STATUSTYPE_INFO:
2870 result[0] = '\0';
2871 break;
2872
2873 case STATUSTYPE_TABLE:
2874 DMEMIT("%s ", cc->cipher_string);
2875
2876 if (cc->key_size > 0) {
2877 if (cc->key_string)
2878 DMEMIT(":%u:%s", cc->key_size, cc->key_string);
2879 else
2880 for (i = 0; i < cc->key_size; i++)
2881 DMEMIT("%02x", cc->key[i]);
2882 } else
2883 DMEMIT("-");
2884
2885 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
2886 cc->dev->name, (unsigned long long)cc->start);
2887
2888 num_feature_args += !!ti->num_discard_bios;
2889 num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
2890 num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
2891 num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT);
2892 num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
2893 if (cc->on_disk_tag_size)
2894 num_feature_args++;
2895 if (num_feature_args) {
2896 DMEMIT(" %d", num_feature_args);
2897 if (ti->num_discard_bios)
2898 DMEMIT(" allow_discards");
2899 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
2900 DMEMIT(" same_cpu_crypt");
2901 if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
2902 DMEMIT(" submit_from_crypt_cpus");
2903 if (cc->on_disk_tag_size)
2904 DMEMIT(" integrity:%u:%s", cc->on_disk_tag_size, cc->cipher_auth);
2905 if (cc->sector_size != (1 << SECTOR_SHIFT))
2906 DMEMIT(" sector_size:%d", cc->sector_size);
2907 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
2908 DMEMIT(" iv_large_sectors");
2909 }
2910
2911 break;
2912 }
2913}
2914
2915static void crypt_postsuspend(struct dm_target *ti)
2916{
2917 struct crypt_config *cc = ti->private;
2918
2919 set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
2920}
2921
2922static int crypt_preresume(struct dm_target *ti)
2923{
2924 struct crypt_config *cc = ti->private;
2925
2926 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
2927 DMERR("aborting resume - crypt key is not set.");
2928 return -EAGAIN;
2929 }
2930
2931 return 0;
2932}
2933
2934static void crypt_resume(struct dm_target *ti)
2935{
2936 struct crypt_config *cc = ti->private;
2937
2938 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
2939}
2940
2941
2942
2943
2944
2945static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
2946{
2947 struct crypt_config *cc = ti->private;
2948 int key_size, ret = -EINVAL;
2949
2950 if (argc < 2)
2951 goto error;
2952
2953 if (!strcasecmp(argv[0], "key")) {
2954 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
2955 DMWARN("not suspended during key manipulation.");
2956 return -EINVAL;
2957 }
2958 if (argc == 3 && !strcasecmp(argv[1], "set")) {
2959
2960 key_size = get_key_size(&argv[2]);
2961 if (key_size < 0 || cc->key_size != key_size) {
2962 memset(argv[2], '0', strlen(argv[2]));
2963 return -EINVAL;
2964 }
2965
2966 ret = crypt_set_key(cc, argv[2]);
2967 if (ret)
2968 return ret;
2969 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
2970 ret = cc->iv_gen_ops->init(cc);
2971
2972 if (cc->key_string)
2973 memset(cc->key, 0, cc->key_size * sizeof(u8));
2974 return ret;
2975 }
2976 if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
2977 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
2978 ret = cc->iv_gen_ops->wipe(cc);
2979 if (ret)
2980 return ret;
2981 }
2982 return crypt_wipe_key(cc);
2983 }
2984 }
2985
2986error:
2987 DMWARN("unrecognised message received.");
2988 return -EINVAL;
2989}
2990
2991static int crypt_iterate_devices(struct dm_target *ti,
2992 iterate_devices_callout_fn fn, void *data)
2993{
2994 struct crypt_config *cc = ti->private;
2995
2996 return fn(ti, cc->dev, cc->start, ti->len, data);
2997}
2998
2999static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
3000{
3001 struct crypt_config *cc = ti->private;
3002
3003
3004
3005
3006
3007
3008
3009 limits->max_segment_size = PAGE_SIZE;
3010
3011 if (cc->sector_size != (1 << SECTOR_SHIFT)) {
3012 limits->logical_block_size = cc->sector_size;
3013 limits->physical_block_size = cc->sector_size;
3014 blk_limits_io_min(limits, cc->sector_size);
3015 }
3016}
3017
3018static struct target_type crypt_target = {
3019 .name = "crypt",
3020 .version = {1, 18, 1},
3021 .module = THIS_MODULE,
3022 .ctr = crypt_ctr,
3023 .dtr = crypt_dtr,
3024 .map = crypt_map,
3025 .status = crypt_status,
3026 .postsuspend = crypt_postsuspend,
3027 .preresume = crypt_preresume,
3028 .resume = crypt_resume,
3029 .message = crypt_message,
3030 .iterate_devices = crypt_iterate_devices,
3031 .io_hints = crypt_io_hints,
3032};
3033
3034static int __init dm_crypt_init(void)
3035{
3036 int r;
3037
3038 r = dm_register_target(&crypt_target);
3039 if (r < 0)
3040 DMERR("register failed %d", r);
3041
3042 return r;
3043}
3044
3045static void __exit dm_crypt_exit(void)
3046{
3047 dm_unregister_target(&crypt_target);
3048}
3049
3050module_init(dm_crypt_init);
3051module_exit(dm_crypt_exit);
3052
3053MODULE_AUTHOR("Jana Saout <jana@saout.de>");
3054MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
3055MODULE_LICENSE("GPL");
3056