1
2
3
4
5
6
7
8
9
10#include <linux/completion.h>
11#include <linux/err.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/key.h>
16#include <linux/bio.h>
17#include <linux/blkdev.h>
18#include <linux/mempool.h>
19#include <linux/slab.h>
20#include <linux/crypto.h>
21#include <linux/workqueue.h>
22#include <linux/kthread.h>
23#include <linux/backing-dev.h>
24#include <linux/atomic.h>
25#include <linux/scatterlist.h>
26#include <linux/rbtree.h>
27#include <linux/ctype.h>
28#include <asm/page.h>
29#include <asm/unaligned.h>
30#include <crypto/hash.h>
31#include <crypto/md5.h>
32#include <crypto/algapi.h>
33#include <crypto/skcipher.h>
34#include <crypto/aead.h>
35#include <crypto/authenc.h>
36#include <linux/rtnetlink.h>
37#include <keys/user-type.h>
38
39#include <linux/device-mapper.h>
40
41#define DM_MSG_PREFIX "crypt"
42
43
44
45
46struct convert_context {
47 struct completion restart;
48 struct bio *bio_in;
49 struct bio *bio_out;
50 struct bvec_iter iter_in;
51 struct bvec_iter iter_out;
52 u64 cc_sector;
53 atomic_t cc_pending;
54 union {
55 struct skcipher_request *req;
56 struct aead_request *req_aead;
57 } r;
58
59};
60
61
62
63
64struct dm_crypt_io {
65 struct crypt_config *cc;
66 struct bio *base_bio;
67 u8 *integrity_metadata;
68 bool integrity_metadata_from_pool;
69 struct work_struct work;
70
71 struct convert_context ctx;
72
73 atomic_t io_pending;
74 blk_status_t error;
75 sector_t sector;
76
77 struct rb_node rb_node;
78} CRYPTO_MINALIGN_ATTR;
79
80struct dm_crypt_request {
81 struct convert_context *ctx;
82 struct scatterlist sg_in[4];
83 struct scatterlist sg_out[4];
84 u64 iv_sector;
85};
86
87struct crypt_config;
88
89struct crypt_iv_operations {
90 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
91 const char *opts);
92 void (*dtr)(struct crypt_config *cc);
93 int (*init)(struct crypt_config *cc);
94 int (*wipe)(struct crypt_config *cc);
95 int (*generator)(struct crypt_config *cc, u8 *iv,
96 struct dm_crypt_request *dmreq);
97 int (*post)(struct crypt_config *cc, u8 *iv,
98 struct dm_crypt_request *dmreq);
99};
100
101struct iv_essiv_private {
102 struct crypto_shash *hash_tfm;
103 u8 *salt;
104};
105
106struct iv_benbi_private {
107 int shift;
108};
109
110#define LMK_SEED_SIZE 64
111struct iv_lmk_private {
112 struct crypto_shash *hash_tfm;
113 u8 *seed;
114};
115
116#define TCW_WHITENING_SIZE 16
117struct iv_tcw_private {
118 struct crypto_shash *crc32_tfm;
119 u8 *iv_seed;
120 u8 *whitening;
121};
122
123struct iv_eboiv_private {
124 struct crypto_cipher *tfm;
125};
126
127
128
129
130
131enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
132 DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
133
134enum cipher_flags {
135 CRYPT_MODE_INTEGRITY_AEAD,
136 CRYPT_IV_LARGE_SECTORS,
137};
138
139
140
141
142struct crypt_config {
143 struct dm_dev *dev;
144 sector_t start;
145
146 struct percpu_counter n_allocated_pages;
147
148 struct workqueue_struct *io_queue;
149 struct workqueue_struct *crypt_queue;
150
151 spinlock_t write_thread_lock;
152 struct task_struct *write_thread;
153 struct rb_root write_tree;
154
155 char *cipher;
156 char *cipher_string;
157 char *cipher_auth;
158 char *key_string;
159
160 const struct crypt_iv_operations *iv_gen_ops;
161 union {
162 struct iv_essiv_private essiv;
163 struct iv_benbi_private benbi;
164 struct iv_lmk_private lmk;
165 struct iv_tcw_private tcw;
166 struct iv_eboiv_private eboiv;
167 } iv_gen_private;
168 u64 iv_offset;
169 unsigned int iv_size;
170 unsigned short int sector_size;
171 unsigned char sector_shift;
172
173
174 void *iv_private;
175 union {
176 struct crypto_skcipher **tfms;
177 struct crypto_aead **tfms_aead;
178 } cipher_tfm;
179 unsigned tfms_count;
180 unsigned long cipher_flags;
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195 unsigned int dmreq_start;
196
197 unsigned int per_bio_data_size;
198
199 unsigned long flags;
200 unsigned int key_size;
201 unsigned int key_parts;
202 unsigned int key_extra_size;
203 unsigned int key_mac_size;
204
205 unsigned int integrity_tag_size;
206 unsigned int integrity_iv_size;
207 unsigned int on_disk_tag_size;
208
209
210
211
212
213 unsigned tag_pool_max_sectors;
214 mempool_t tag_pool;
215 mempool_t req_pool;
216 mempool_t page_pool;
217
218 struct bio_set bs;
219 struct mutex bio_alloc_lock;
220
221 u8 *authenc_key;
222 u8 key[0];
223};
224
225#define MIN_IOS 64
226#define MAX_TAG_SIZE 480
227#define POOL_ENTRY_SIZE 512
228
229static DEFINE_SPINLOCK(dm_crypt_clients_lock);
230static unsigned dm_crypt_clients_n = 0;
231static volatile unsigned long dm_crypt_pages_per_client;
232#define DM_CRYPT_MEMORY_PERCENT 2
233#define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_PAGES * 16)
234
235static void clone_init(struct dm_crypt_io *, struct bio *);
236static void kcryptd_queue_crypt(struct dm_crypt_io *io);
237static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
238 struct scatterlist *sg);
239
240
241
242
243static struct crypto_skcipher *any_tfm(struct crypt_config *cc)
244{
245 return cc->cipher_tfm.tfms[0];
246}
247
248static struct crypto_aead *any_tfm_aead(struct crypt_config *cc)
249{
250 return cc->cipher_tfm.tfms_aead[0];
251}
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
305 struct dm_crypt_request *dmreq)
306{
307 memset(iv, 0, cc->iv_size);
308 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
309
310 return 0;
311}
312
313static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
314 struct dm_crypt_request *dmreq)
315{
316 memset(iv, 0, cc->iv_size);
317 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
318
319 return 0;
320}
321
322static int crypt_iv_plain64be_gen(struct crypt_config *cc, u8 *iv,
323 struct dm_crypt_request *dmreq)
324{
325 memset(iv, 0, cc->iv_size);
326
327 *(__be64 *)&iv[cc->iv_size - sizeof(u64)] = cpu_to_be64(dmreq->iv_sector);
328
329 return 0;
330}
331
332
333static int crypt_iv_essiv_init(struct crypt_config *cc)
334{
335 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
336 SHASH_DESC_ON_STACK(desc, essiv->hash_tfm);
337 struct crypto_cipher *essiv_tfm;
338 int err;
339
340 desc->tfm = essiv->hash_tfm;
341
342 err = crypto_shash_digest(desc, cc->key, cc->key_size, essiv->salt);
343 shash_desc_zero(desc);
344 if (err)
345 return err;
346
347 essiv_tfm = cc->iv_private;
348
349 err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
350 crypto_shash_digestsize(essiv->hash_tfm));
351 if (err)
352 return err;
353
354 return 0;
355}
356
357
358static int crypt_iv_essiv_wipe(struct crypt_config *cc)
359{
360 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
361 unsigned salt_size = crypto_shash_digestsize(essiv->hash_tfm);
362 struct crypto_cipher *essiv_tfm;
363 int r, err = 0;
364
365 memset(essiv->salt, 0, salt_size);
366
367 essiv_tfm = cc->iv_private;
368 r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
369 if (r)
370 err = r;
371
372 return err;
373}
374
375
376static struct crypto_cipher *alloc_essiv_cipher(struct crypt_config *cc,
377 struct dm_target *ti,
378 const u8 *salt,
379 unsigned int saltsize)
380{
381 struct crypto_cipher *essiv_tfm;
382 int err;
383
384
385 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, 0);
386 if (IS_ERR(essiv_tfm)) {
387 ti->error = "Error allocating crypto tfm for ESSIV";
388 return essiv_tfm;
389 }
390
391 if (crypto_cipher_blocksize(essiv_tfm) != cc->iv_size) {
392 ti->error = "Block size of ESSIV cipher does "
393 "not match IV size of block cipher";
394 crypto_free_cipher(essiv_tfm);
395 return ERR_PTR(-EINVAL);
396 }
397
398 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
399 if (err) {
400 ti->error = "Failed to set key for ESSIV cipher";
401 crypto_free_cipher(essiv_tfm);
402 return ERR_PTR(err);
403 }
404
405 return essiv_tfm;
406}
407
408static void crypt_iv_essiv_dtr(struct crypt_config *cc)
409{
410 struct crypto_cipher *essiv_tfm;
411 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
412
413 crypto_free_shash(essiv->hash_tfm);
414 essiv->hash_tfm = NULL;
415
416 kzfree(essiv->salt);
417 essiv->salt = NULL;
418
419 essiv_tfm = cc->iv_private;
420
421 if (essiv_tfm)
422 crypto_free_cipher(essiv_tfm);
423
424 cc->iv_private = NULL;
425}
426
427static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
428 const char *opts)
429{
430 struct crypto_cipher *essiv_tfm = NULL;
431 struct crypto_shash *hash_tfm = NULL;
432 u8 *salt = NULL;
433 int err;
434
435 if (!opts) {
436 ti->error = "Digest algorithm missing for ESSIV mode";
437 return -EINVAL;
438 }
439
440
441 hash_tfm = crypto_alloc_shash(opts, 0, 0);
442 if (IS_ERR(hash_tfm)) {
443 ti->error = "Error initializing ESSIV hash";
444 err = PTR_ERR(hash_tfm);
445 goto bad;
446 }
447
448 salt = kzalloc(crypto_shash_digestsize(hash_tfm), GFP_KERNEL);
449 if (!salt) {
450 ti->error = "Error kmallocing salt storage in ESSIV";
451 err = -ENOMEM;
452 goto bad;
453 }
454
455 cc->iv_gen_private.essiv.salt = salt;
456 cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
457
458 essiv_tfm = alloc_essiv_cipher(cc, ti, salt,
459 crypto_shash_digestsize(hash_tfm));
460 if (IS_ERR(essiv_tfm)) {
461 crypt_iv_essiv_dtr(cc);
462 return PTR_ERR(essiv_tfm);
463 }
464 cc->iv_private = essiv_tfm;
465
466 return 0;
467
468bad:
469 if (hash_tfm && !IS_ERR(hash_tfm))
470 crypto_free_shash(hash_tfm);
471 kfree(salt);
472 return err;
473}
474
475static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
476 struct dm_crypt_request *dmreq)
477{
478 struct crypto_cipher *essiv_tfm = cc->iv_private;
479
480 memset(iv, 0, cc->iv_size);
481 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
482 crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
483
484 return 0;
485}
486
487static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
488 const char *opts)
489{
490 unsigned bs = crypto_skcipher_blocksize(any_tfm(cc));
491 int log = ilog2(bs);
492
493
494
495
496 if (1 << log != bs) {
497 ti->error = "cypher blocksize is not a power of 2";
498 return -EINVAL;
499 }
500
501 if (log > 9) {
502 ti->error = "cypher blocksize is > 512";
503 return -EINVAL;
504 }
505
506 cc->iv_gen_private.benbi.shift = 9 - log;
507
508 return 0;
509}
510
511static void crypt_iv_benbi_dtr(struct crypt_config *cc)
512{
513}
514
515static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
516 struct dm_crypt_request *dmreq)
517{
518 __be64 val;
519
520 memset(iv, 0, cc->iv_size - sizeof(u64));
521
522 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
523 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
524
525 return 0;
526}
527
528static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
529 struct dm_crypt_request *dmreq)
530{
531 memset(iv, 0, cc->iv_size);
532
533 return 0;
534}
535
536static void crypt_iv_lmk_dtr(struct crypt_config *cc)
537{
538 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
539
540 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
541 crypto_free_shash(lmk->hash_tfm);
542 lmk->hash_tfm = NULL;
543
544 kzfree(lmk->seed);
545 lmk->seed = NULL;
546}
547
548static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
549 const char *opts)
550{
551 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
552
553 if (cc->sector_size != (1 << SECTOR_SHIFT)) {
554 ti->error = "Unsupported sector size for LMK";
555 return -EINVAL;
556 }
557
558 lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
559 if (IS_ERR(lmk->hash_tfm)) {
560 ti->error = "Error initializing LMK hash";
561 return PTR_ERR(lmk->hash_tfm);
562 }
563
564
565 if (cc->key_parts == cc->tfms_count) {
566 lmk->seed = NULL;
567 return 0;
568 }
569
570 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
571 if (!lmk->seed) {
572 crypt_iv_lmk_dtr(cc);
573 ti->error = "Error kmallocing seed storage in LMK";
574 return -ENOMEM;
575 }
576
577 return 0;
578}
579
580static int crypt_iv_lmk_init(struct crypt_config *cc)
581{
582 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
583 int subkey_size = cc->key_size / cc->key_parts;
584
585
586 if (lmk->seed)
587 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
588 crypto_shash_digestsize(lmk->hash_tfm));
589
590 return 0;
591}
592
593static int crypt_iv_lmk_wipe(struct crypt_config *cc)
594{
595 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
596
597 if (lmk->seed)
598 memset(lmk->seed, 0, LMK_SEED_SIZE);
599
600 return 0;
601}
602
603static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
604 struct dm_crypt_request *dmreq,
605 u8 *data)
606{
607 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
608 SHASH_DESC_ON_STACK(desc, lmk->hash_tfm);
609 struct md5_state md5state;
610 __le32 buf[4];
611 int i, r;
612
613 desc->tfm = lmk->hash_tfm;
614
615 r = crypto_shash_init(desc);
616 if (r)
617 return r;
618
619 if (lmk->seed) {
620 r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE);
621 if (r)
622 return r;
623 }
624
625
626 r = crypto_shash_update(desc, data + 16, 16 * 31);
627 if (r)
628 return r;
629
630
631 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
632 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
633 buf[2] = cpu_to_le32(4024);
634 buf[3] = 0;
635 r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf));
636 if (r)
637 return r;
638
639
640 r = crypto_shash_export(desc, &md5state);
641 if (r)
642 return r;
643
644 for (i = 0; i < MD5_HASH_WORDS; i++)
645 __cpu_to_le32s(&md5state.hash[i]);
646 memcpy(iv, &md5state.hash, cc->iv_size);
647
648 return 0;
649}
650
651static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
652 struct dm_crypt_request *dmreq)
653{
654 struct scatterlist *sg;
655 u8 *src;
656 int r = 0;
657
658 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
659 sg = crypt_get_sg_data(cc, dmreq->sg_in);
660 src = kmap_atomic(sg_page(sg));
661 r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset);
662 kunmap_atomic(src);
663 } else
664 memset(iv, 0, cc->iv_size);
665
666 return r;
667}
668
669static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
670 struct dm_crypt_request *dmreq)
671{
672 struct scatterlist *sg;
673 u8 *dst;
674 int r;
675
676 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
677 return 0;
678
679 sg = crypt_get_sg_data(cc, dmreq->sg_out);
680 dst = kmap_atomic(sg_page(sg));
681 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset);
682
683
684 if (!r)
685 crypto_xor(dst + sg->offset, iv, cc->iv_size);
686
687 kunmap_atomic(dst);
688 return r;
689}
690
691static void crypt_iv_tcw_dtr(struct crypt_config *cc)
692{
693 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
694
695 kzfree(tcw->iv_seed);
696 tcw->iv_seed = NULL;
697 kzfree(tcw->whitening);
698 tcw->whitening = NULL;
699
700 if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
701 crypto_free_shash(tcw->crc32_tfm);
702 tcw->crc32_tfm = NULL;
703}
704
705static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
706 const char *opts)
707{
708 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
709
710 if (cc->sector_size != (1 << SECTOR_SHIFT)) {
711 ti->error = "Unsupported sector size for TCW";
712 return -EINVAL;
713 }
714
715 if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
716 ti->error = "Wrong key size for TCW";
717 return -EINVAL;
718 }
719
720 tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0);
721 if (IS_ERR(tcw->crc32_tfm)) {
722 ti->error = "Error initializing CRC32 in TCW";
723 return PTR_ERR(tcw->crc32_tfm);
724 }
725
726 tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
727 tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
728 if (!tcw->iv_seed || !tcw->whitening) {
729 crypt_iv_tcw_dtr(cc);
730 ti->error = "Error allocating seed storage in TCW";
731 return -ENOMEM;
732 }
733
734 return 0;
735}
736
737static int crypt_iv_tcw_init(struct crypt_config *cc)
738{
739 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
740 int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
741
742 memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
743 memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
744 TCW_WHITENING_SIZE);
745
746 return 0;
747}
748
749static int crypt_iv_tcw_wipe(struct crypt_config *cc)
750{
751 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
752
753 memset(tcw->iv_seed, 0, cc->iv_size);
754 memset(tcw->whitening, 0, TCW_WHITENING_SIZE);
755
756 return 0;
757}
758
759static int crypt_iv_tcw_whitening(struct crypt_config *cc,
760 struct dm_crypt_request *dmreq,
761 u8 *data)
762{
763 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
764 __le64 sector = cpu_to_le64(dmreq->iv_sector);
765 u8 buf[TCW_WHITENING_SIZE];
766 SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm);
767 int i, r;
768
769
770 crypto_xor_cpy(buf, tcw->whitening, (u8 *)§or, 8);
771 crypto_xor_cpy(&buf[8], tcw->whitening + 8, (u8 *)§or, 8);
772
773
774 desc->tfm = tcw->crc32_tfm;
775 for (i = 0; i < 4; i++) {
776 r = crypto_shash_init(desc);
777 if (r)
778 goto out;
779 r = crypto_shash_update(desc, &buf[i * 4], 4);
780 if (r)
781 goto out;
782 r = crypto_shash_final(desc, &buf[i * 4]);
783 if (r)
784 goto out;
785 }
786 crypto_xor(&buf[0], &buf[12], 4);
787 crypto_xor(&buf[4], &buf[8], 4);
788
789
790 for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
791 crypto_xor(data + i * 8, buf, 8);
792out:
793 memzero_explicit(buf, sizeof(buf));
794 return r;
795}
796
797static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
798 struct dm_crypt_request *dmreq)
799{
800 struct scatterlist *sg;
801 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
802 __le64 sector = cpu_to_le64(dmreq->iv_sector);
803 u8 *src;
804 int r = 0;
805
806
807 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
808 sg = crypt_get_sg_data(cc, dmreq->sg_in);
809 src = kmap_atomic(sg_page(sg));
810 r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset);
811 kunmap_atomic(src);
812 }
813
814
815 crypto_xor_cpy(iv, tcw->iv_seed, (u8 *)§or, 8);
816 if (cc->iv_size > 8)
817 crypto_xor_cpy(&iv[8], tcw->iv_seed + 8, (u8 *)§or,
818 cc->iv_size - 8);
819
820 return r;
821}
822
823static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
824 struct dm_crypt_request *dmreq)
825{
826 struct scatterlist *sg;
827 u8 *dst;
828 int r;
829
830 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
831 return 0;
832
833
834 sg = crypt_get_sg_data(cc, dmreq->sg_out);
835 dst = kmap_atomic(sg_page(sg));
836 r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset);
837 kunmap_atomic(dst);
838
839 return r;
840}
841
842static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv,
843 struct dm_crypt_request *dmreq)
844{
845
846 get_random_bytes(iv, cc->iv_size);
847 return 0;
848}
849
850static void crypt_iv_eboiv_dtr(struct crypt_config *cc)
851{
852 struct iv_eboiv_private *eboiv = &cc->iv_gen_private.eboiv;
853
854 crypto_free_cipher(eboiv->tfm);
855 eboiv->tfm = NULL;
856}
857
858static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti,
859 const char *opts)
860{
861 struct iv_eboiv_private *eboiv = &cc->iv_gen_private.eboiv;
862 struct crypto_cipher *tfm;
863
864 tfm = crypto_alloc_cipher(cc->cipher, 0, 0);
865 if (IS_ERR(tfm)) {
866 ti->error = "Error allocating crypto tfm for EBOIV";
867 return PTR_ERR(tfm);
868 }
869
870 if (crypto_cipher_blocksize(tfm) != cc->iv_size) {
871 ti->error = "Block size of EBOIV cipher does "
872 "not match IV size of block cipher";
873 crypto_free_cipher(tfm);
874 return -EINVAL;
875 }
876
877 eboiv->tfm = tfm;
878 return 0;
879}
880
881static int crypt_iv_eboiv_init(struct crypt_config *cc)
882{
883 struct iv_eboiv_private *eboiv = &cc->iv_gen_private.eboiv;
884 int err;
885
886 err = crypto_cipher_setkey(eboiv->tfm, cc->key, cc->key_size);
887 if (err)
888 return err;
889
890 return 0;
891}
892
893static int crypt_iv_eboiv_wipe(struct crypt_config *cc)
894{
895
896 return crypt_iv_eboiv_init(cc);
897}
898
899static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
900 struct dm_crypt_request *dmreq)
901{
902 struct iv_eboiv_private *eboiv = &cc->iv_gen_private.eboiv;
903
904 memset(iv, 0, cc->iv_size);
905 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
906 crypto_cipher_encrypt_one(eboiv->tfm, iv, iv);
907
908 return 0;
909}
910
911static const struct crypt_iv_operations crypt_iv_plain_ops = {
912 .generator = crypt_iv_plain_gen
913};
914
915static const struct crypt_iv_operations crypt_iv_plain64_ops = {
916 .generator = crypt_iv_plain64_gen
917};
918
919static const struct crypt_iv_operations crypt_iv_plain64be_ops = {
920 .generator = crypt_iv_plain64be_gen
921};
922
923static const struct crypt_iv_operations crypt_iv_essiv_ops = {
924 .ctr = crypt_iv_essiv_ctr,
925 .dtr = crypt_iv_essiv_dtr,
926 .init = crypt_iv_essiv_init,
927 .wipe = crypt_iv_essiv_wipe,
928 .generator = crypt_iv_essiv_gen
929};
930
931static const struct crypt_iv_operations crypt_iv_benbi_ops = {
932 .ctr = crypt_iv_benbi_ctr,
933 .dtr = crypt_iv_benbi_dtr,
934 .generator = crypt_iv_benbi_gen
935};
936
937static const struct crypt_iv_operations crypt_iv_null_ops = {
938 .generator = crypt_iv_null_gen
939};
940
941static const struct crypt_iv_operations crypt_iv_lmk_ops = {
942 .ctr = crypt_iv_lmk_ctr,
943 .dtr = crypt_iv_lmk_dtr,
944 .init = crypt_iv_lmk_init,
945 .wipe = crypt_iv_lmk_wipe,
946 .generator = crypt_iv_lmk_gen,
947 .post = crypt_iv_lmk_post
948};
949
950static const struct crypt_iv_operations crypt_iv_tcw_ops = {
951 .ctr = crypt_iv_tcw_ctr,
952 .dtr = crypt_iv_tcw_dtr,
953 .init = crypt_iv_tcw_init,
954 .wipe = crypt_iv_tcw_wipe,
955 .generator = crypt_iv_tcw_gen,
956 .post = crypt_iv_tcw_post
957};
958
959static struct crypt_iv_operations crypt_iv_random_ops = {
960 .generator = crypt_iv_random_gen
961};
962
963static struct crypt_iv_operations crypt_iv_eboiv_ops = {
964 .ctr = crypt_iv_eboiv_ctr,
965 .dtr = crypt_iv_eboiv_dtr,
966 .init = crypt_iv_eboiv_init,
967 .wipe = crypt_iv_eboiv_wipe,
968 .generator = crypt_iv_eboiv_gen
969};
970
971
972
973
974static bool crypt_integrity_aead(struct crypt_config *cc)
975{
976 return test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
977}
978
979static bool crypt_integrity_hmac(struct crypt_config *cc)
980{
981 return crypt_integrity_aead(cc) && cc->key_mac_size;
982}
983
984
985static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
986 struct scatterlist *sg)
987{
988 if (unlikely(crypt_integrity_aead(cc)))
989 return &sg[2];
990
991 return sg;
992}
993
994static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
995{
996 struct bio_integrity_payload *bip;
997 unsigned int tag_len;
998 int ret;
999
1000 if (!bio_sectors(bio) || !io->cc->on_disk_tag_size)
1001 return 0;
1002
1003 bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
1004 if (IS_ERR(bip))
1005 return PTR_ERR(bip);
1006
1007 tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift);
1008
1009 bip->bip_iter.bi_size = tag_len;
1010 bip->bip_iter.bi_sector = io->cc->start + io->sector;
1011
1012 ret = bio_integrity_add_page(bio, virt_to_page(io->integrity_metadata),
1013 tag_len, offset_in_page(io->integrity_metadata));
1014 if (unlikely(ret != tag_len))
1015 return -ENOMEM;
1016
1017 return 0;
1018}
1019
1020static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
1021{
1022#ifdef CONFIG_BLK_DEV_INTEGRITY
1023 struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk);
1024 struct mapped_device *md = dm_table_get_md(ti->table);
1025
1026
1027 if (!bi || strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) {
1028 ti->error = "Integrity profile not supported.";
1029 return -EINVAL;
1030 }
1031
1032 if (bi->tag_size != cc->on_disk_tag_size ||
1033 bi->tuple_size != cc->on_disk_tag_size) {
1034 ti->error = "Integrity profile tag size mismatch.";
1035 return -EINVAL;
1036 }
1037 if (1 << bi->interval_exp != cc->sector_size) {
1038 ti->error = "Integrity profile sector size mismatch.";
1039 return -EINVAL;
1040 }
1041
1042 if (crypt_integrity_aead(cc)) {
1043 cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size;
1044 DMDEBUG("%s: Integrity AEAD, tag size %u, IV size %u.", dm_device_name(md),
1045 cc->integrity_tag_size, cc->integrity_iv_size);
1046
1047 if (crypto_aead_setauthsize(any_tfm_aead(cc), cc->integrity_tag_size)) {
1048 ti->error = "Integrity AEAD auth tag size is not supported.";
1049 return -EINVAL;
1050 }
1051 } else if (cc->integrity_iv_size)
1052 DMDEBUG("%s: Additional per-sector space %u bytes for IV.", dm_device_name(md),
1053 cc->integrity_iv_size);
1054
1055 if ((cc->integrity_tag_size + cc->integrity_iv_size) != bi->tag_size) {
1056 ti->error = "Not enough space for integrity tag in the profile.";
1057 return -EINVAL;
1058 }
1059
1060 return 0;
1061#else
1062 ti->error = "Integrity profile not supported.";
1063 return -EINVAL;
1064#endif
1065}
1066
1067static void crypt_convert_init(struct crypt_config *cc,
1068 struct convert_context *ctx,
1069 struct bio *bio_out, struct bio *bio_in,
1070 sector_t sector)
1071{
1072 ctx->bio_in = bio_in;
1073 ctx->bio_out = bio_out;
1074 if (bio_in)
1075 ctx->iter_in = bio_in->bi_iter;
1076 if (bio_out)
1077 ctx->iter_out = bio_out->bi_iter;
1078 ctx->cc_sector = sector + cc->iv_offset;
1079 init_completion(&ctx->restart);
1080}
1081
1082static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
1083 void *req)
1084{
1085 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
1086}
1087
1088static void *req_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq)
1089{
1090 return (void *)((char *)dmreq - cc->dmreq_start);
1091}
1092
1093static u8 *iv_of_dmreq(struct crypt_config *cc,
1094 struct dm_crypt_request *dmreq)
1095{
1096 if (crypt_integrity_aead(cc))
1097 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
1098 crypto_aead_alignmask(any_tfm_aead(cc)) + 1);
1099 else
1100 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
1101 crypto_skcipher_alignmask(any_tfm(cc)) + 1);
1102}
1103
1104static u8 *org_iv_of_dmreq(struct crypt_config *cc,
1105 struct dm_crypt_request *dmreq)
1106{
1107 return iv_of_dmreq(cc, dmreq) + cc->iv_size;
1108}
1109
1110static __le64 *org_sector_of_dmreq(struct crypt_config *cc,
1111 struct dm_crypt_request *dmreq)
1112{
1113 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size;
1114 return (__le64 *) ptr;
1115}
1116
1117static unsigned int *org_tag_of_dmreq(struct crypt_config *cc,
1118 struct dm_crypt_request *dmreq)
1119{
1120 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size +
1121 cc->iv_size + sizeof(uint64_t);
1122 return (unsigned int*)ptr;
1123}
1124
1125static void *tag_from_dmreq(struct crypt_config *cc,
1126 struct dm_crypt_request *dmreq)
1127{
1128 struct convert_context *ctx = dmreq->ctx;
1129 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
1130
1131 return &io->integrity_metadata[*org_tag_of_dmreq(cc, dmreq) *
1132 cc->on_disk_tag_size];
1133}
1134
1135static void *iv_tag_from_dmreq(struct crypt_config *cc,
1136 struct dm_crypt_request *dmreq)
1137{
1138 return tag_from_dmreq(cc, dmreq) + cc->integrity_tag_size;
1139}
1140
1141static int crypt_convert_block_aead(struct crypt_config *cc,
1142 struct convert_context *ctx,
1143 struct aead_request *req,
1144 unsigned int tag_offset)
1145{
1146 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
1147 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
1148 struct dm_crypt_request *dmreq;
1149 u8 *iv, *org_iv, *tag_iv, *tag;
1150 __le64 *sector;
1151 int r = 0;
1152
1153 BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size);
1154
1155
1156 if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
1157 return -EIO;
1158
1159 dmreq = dmreq_of_req(cc, req);
1160 dmreq->iv_sector = ctx->cc_sector;
1161 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
1162 dmreq->iv_sector >>= cc->sector_shift;
1163 dmreq->ctx = ctx;
1164
1165 *org_tag_of_dmreq(cc, dmreq) = tag_offset;
1166
1167 sector = org_sector_of_dmreq(cc, dmreq);
1168 *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
1169
1170 iv = iv_of_dmreq(cc, dmreq);
1171 org_iv = org_iv_of_dmreq(cc, dmreq);
1172 tag = tag_from_dmreq(cc, dmreq);
1173 tag_iv = iv_tag_from_dmreq(cc, dmreq);
1174
1175
1176
1177
1178
1179
1180 sg_init_table(dmreq->sg_in, 4);
1181 sg_set_buf(&dmreq->sg_in[0], sector, sizeof(uint64_t));
1182 sg_set_buf(&dmreq->sg_in[1], org_iv, cc->iv_size);
1183 sg_set_page(&dmreq->sg_in[2], bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
1184 sg_set_buf(&dmreq->sg_in[3], tag, cc->integrity_tag_size);
1185
1186 sg_init_table(dmreq->sg_out, 4);
1187 sg_set_buf(&dmreq->sg_out[0], sector, sizeof(uint64_t));
1188 sg_set_buf(&dmreq->sg_out[1], org_iv, cc->iv_size);
1189 sg_set_page(&dmreq->sg_out[2], bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
1190 sg_set_buf(&dmreq->sg_out[3], tag, cc->integrity_tag_size);
1191
1192 if (cc->iv_gen_ops) {
1193
1194 if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) {
1195 memcpy(org_iv, tag_iv, cc->iv_size);
1196 } else {
1197 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
1198 if (r < 0)
1199 return r;
1200
1201 if (cc->integrity_iv_size)
1202 memcpy(tag_iv, org_iv, cc->iv_size);
1203 }
1204
1205 memcpy(iv, org_iv, cc->iv_size);
1206 }
1207
1208 aead_request_set_ad(req, sizeof(uint64_t) + cc->iv_size);
1209 if (bio_data_dir(ctx->bio_in) == WRITE) {
1210 aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
1211 cc->sector_size, iv);
1212 r = crypto_aead_encrypt(req);
1213 if (cc->integrity_tag_size + cc->integrity_iv_size != cc->on_disk_tag_size)
1214 memset(tag + cc->integrity_tag_size + cc->integrity_iv_size, 0,
1215 cc->on_disk_tag_size - (cc->integrity_tag_size + cc->integrity_iv_size));
1216 } else {
1217 aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
1218 cc->sector_size + cc->integrity_tag_size, iv);
1219 r = crypto_aead_decrypt(req);
1220 }
1221
1222 if (r == -EBADMSG) {
1223 char b[BDEVNAME_SIZE];
1224 DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b),
1225 (unsigned long long)le64_to_cpu(*sector));
1226 }
1227
1228 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
1229 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
1230
1231 bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
1232 bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
1233
1234 return r;
1235}
1236
1237static int crypt_convert_block_skcipher(struct crypt_config *cc,
1238 struct convert_context *ctx,
1239 struct skcipher_request *req,
1240 unsigned int tag_offset)
1241{
1242 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
1243 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
1244 struct scatterlist *sg_in, *sg_out;
1245 struct dm_crypt_request *dmreq;
1246 u8 *iv, *org_iv, *tag_iv;
1247 __le64 *sector;
1248 int r = 0;
1249
1250
1251 if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
1252 return -EIO;
1253
1254 dmreq = dmreq_of_req(cc, req);
1255 dmreq->iv_sector = ctx->cc_sector;
1256 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
1257 dmreq->iv_sector >>= cc->sector_shift;
1258 dmreq->ctx = ctx;
1259
1260 *org_tag_of_dmreq(cc, dmreq) = tag_offset;
1261
1262 iv = iv_of_dmreq(cc, dmreq);
1263 org_iv = org_iv_of_dmreq(cc, dmreq);
1264 tag_iv = iv_tag_from_dmreq(cc, dmreq);
1265
1266 sector = org_sector_of_dmreq(cc, dmreq);
1267 *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
1268
1269
1270 sg_in = &dmreq->sg_in[0];
1271 sg_out = &dmreq->sg_out[0];
1272
1273 sg_init_table(sg_in, 1);
1274 sg_set_page(sg_in, bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
1275
1276 sg_init_table(sg_out, 1);
1277 sg_set_page(sg_out, bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
1278
1279 if (cc->iv_gen_ops) {
1280
1281 if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) {
1282 memcpy(org_iv, tag_iv, cc->integrity_iv_size);
1283 } else {
1284 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
1285 if (r < 0)
1286 return r;
1287
1288 if (cc->integrity_iv_size)
1289 memcpy(tag_iv, org_iv, cc->integrity_iv_size);
1290 }
1291
1292 memcpy(iv, org_iv, cc->iv_size);
1293 }
1294
1295 skcipher_request_set_crypt(req, sg_in, sg_out, cc->sector_size, iv);
1296
1297 if (bio_data_dir(ctx->bio_in) == WRITE)
1298 r = crypto_skcipher_encrypt(req);
1299 else
1300 r = crypto_skcipher_decrypt(req);
1301
1302 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
1303 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
1304
1305 bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
1306 bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
1307
1308 return r;
1309}
1310
1311static void kcryptd_async_done(struct crypto_async_request *async_req,
1312 int error);
1313
1314static void crypt_alloc_req_skcipher(struct crypt_config *cc,
1315 struct convert_context *ctx)
1316{
1317 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
1318
1319 if (!ctx->r.req)
1320 ctx->r.req = mempool_alloc(&cc->req_pool, GFP_NOIO);
1321
1322 skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]);
1323
1324
1325
1326
1327
1328 skcipher_request_set_callback(ctx->r.req,
1329 CRYPTO_TFM_REQ_MAY_BACKLOG,
1330 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
1331}
1332
1333static void crypt_alloc_req_aead(struct crypt_config *cc,
1334 struct convert_context *ctx)
1335{
1336 if (!ctx->r.req_aead)
1337 ctx->r.req_aead = mempool_alloc(&cc->req_pool, GFP_NOIO);
1338
1339 aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]);
1340
1341
1342
1343
1344
1345 aead_request_set_callback(ctx->r.req_aead,
1346 CRYPTO_TFM_REQ_MAY_BACKLOG,
1347 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
1348}
1349
1350static void crypt_alloc_req(struct crypt_config *cc,
1351 struct convert_context *ctx)
1352{
1353 if (crypt_integrity_aead(cc))
1354 crypt_alloc_req_aead(cc, ctx);
1355 else
1356 crypt_alloc_req_skcipher(cc, ctx);
1357}
1358
1359static void crypt_free_req_skcipher(struct crypt_config *cc,
1360 struct skcipher_request *req, struct bio *base_bio)
1361{
1362 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
1363
1364 if ((struct skcipher_request *)(io + 1) != req)
1365 mempool_free(req, &cc->req_pool);
1366}
1367
1368static void crypt_free_req_aead(struct crypt_config *cc,
1369 struct aead_request *req, struct bio *base_bio)
1370{
1371 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
1372
1373 if ((struct aead_request *)(io + 1) != req)
1374 mempool_free(req, &cc->req_pool);
1375}
1376
1377static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_bio)
1378{
1379 if (crypt_integrity_aead(cc))
1380 crypt_free_req_aead(cc, req, base_bio);
1381 else
1382 crypt_free_req_skcipher(cc, req, base_bio);
1383}
1384
1385
1386
1387
1388static blk_status_t crypt_convert(struct crypt_config *cc,
1389 struct convert_context *ctx)
1390{
1391 unsigned int tag_offset = 0;
1392 unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
1393 int r;
1394
1395 atomic_set(&ctx->cc_pending, 1);
1396
1397 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
1398
1399 crypt_alloc_req(cc, ctx);
1400 atomic_inc(&ctx->cc_pending);
1401
1402 if (crypt_integrity_aead(cc))
1403 r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, tag_offset);
1404 else
1405 r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, tag_offset);
1406
1407 switch (r) {
1408
1409
1410
1411
1412 case -EBUSY:
1413 wait_for_completion(&ctx->restart);
1414 reinit_completion(&ctx->restart);
1415
1416
1417
1418
1419
1420 case -EINPROGRESS:
1421 ctx->r.req = NULL;
1422 ctx->cc_sector += sector_step;
1423 tag_offset++;
1424 continue;
1425
1426
1427
1428 case 0:
1429 atomic_dec(&ctx->cc_pending);
1430 ctx->cc_sector += sector_step;
1431 tag_offset++;
1432 cond_resched();
1433 continue;
1434
1435
1436
1437 case -EBADMSG:
1438 atomic_dec(&ctx->cc_pending);
1439 return BLK_STS_PROTECTION;
1440
1441
1442
1443 default:
1444 atomic_dec(&ctx->cc_pending);
1445 return BLK_STS_IOERR;
1446 }
1447 }
1448
1449 return 0;
1450}
1451
1452static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
1472{
1473 struct crypt_config *cc = io->cc;
1474 struct bio *clone;
1475 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1476 gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
1477 unsigned i, len, remaining_size;
1478 struct page *page;
1479
1480retry:
1481 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
1482 mutex_lock(&cc->bio_alloc_lock);
1483
1484 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, &cc->bs);
1485 if (!clone)
1486 goto out;
1487
1488 clone_init(io, clone);
1489
1490 remaining_size = size;
1491
1492 for (i = 0; i < nr_iovecs; i++) {
1493 page = mempool_alloc(&cc->page_pool, gfp_mask);
1494 if (!page) {
1495 crypt_free_buffer_pages(cc, clone);
1496 bio_put(clone);
1497 gfp_mask |= __GFP_DIRECT_RECLAIM;
1498 goto retry;
1499 }
1500
1501 len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
1502
1503 bio_add_page(clone, page, len, 0);
1504
1505 remaining_size -= len;
1506 }
1507
1508
1509 if (dm_crypt_integrity_io_alloc(io, clone)) {
1510 crypt_free_buffer_pages(cc, clone);
1511 bio_put(clone);
1512 clone = NULL;
1513 }
1514out:
1515 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
1516 mutex_unlock(&cc->bio_alloc_lock);
1517
1518 return clone;
1519}
1520
1521static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
1522{
1523 struct bio_vec *bv;
1524 struct bvec_iter_all iter_all;
1525
1526 bio_for_each_segment_all(bv, clone, iter_all) {
1527 BUG_ON(!bv->bv_page);
1528 mempool_free(bv->bv_page, &cc->page_pool);
1529 }
1530}
1531
1532static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
1533 struct bio *bio, sector_t sector)
1534{
1535 io->cc = cc;
1536 io->base_bio = bio;
1537 io->sector = sector;
1538 io->error = 0;
1539 io->ctx.r.req = NULL;
1540 io->integrity_metadata = NULL;
1541 io->integrity_metadata_from_pool = false;
1542 atomic_set(&io->io_pending, 0);
1543}
1544
1545static void crypt_inc_pending(struct dm_crypt_io *io)
1546{
1547 atomic_inc(&io->io_pending);
1548}
1549
1550
1551
1552
1553
1554static void crypt_dec_pending(struct dm_crypt_io *io)
1555{
1556 struct crypt_config *cc = io->cc;
1557 struct bio *base_bio = io->base_bio;
1558 blk_status_t error = io->error;
1559
1560 if (!atomic_dec_and_test(&io->io_pending))
1561 return;
1562
1563 if (io->ctx.r.req)
1564 crypt_free_req(cc, io->ctx.r.req, base_bio);
1565
1566 if (unlikely(io->integrity_metadata_from_pool))
1567 mempool_free(io->integrity_metadata, &io->cc->tag_pool);
1568 else
1569 kfree(io->integrity_metadata);
1570
1571 base_bio->bi_status = error;
1572 bio_endio(base_bio);
1573}
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592static void crypt_endio(struct bio *clone)
1593{
1594 struct dm_crypt_io *io = clone->bi_private;
1595 struct crypt_config *cc = io->cc;
1596 unsigned rw = bio_data_dir(clone);
1597 blk_status_t error;
1598
1599
1600
1601
1602 if (rw == WRITE)
1603 crypt_free_buffer_pages(cc, clone);
1604
1605 error = clone->bi_status;
1606 bio_put(clone);
1607
1608 if (rw == READ && !error) {
1609 kcryptd_queue_crypt(io);
1610 return;
1611 }
1612
1613 if (unlikely(error))
1614 io->error = error;
1615
1616 crypt_dec_pending(io);
1617}
1618
1619static void clone_init(struct dm_crypt_io *io, struct bio *clone)
1620{
1621 struct crypt_config *cc = io->cc;
1622
1623 clone->bi_private = io;
1624 clone->bi_end_io = crypt_endio;
1625 bio_set_dev(clone, cc->dev->bdev);
1626 clone->bi_opf = io->base_bio->bi_opf;
1627}
1628
1629static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
1630{
1631 struct crypt_config *cc = io->cc;
1632 struct bio *clone;
1633
1634
1635
1636
1637
1638
1639
1640 clone = bio_clone_fast(io->base_bio, gfp, &cc->bs);
1641 if (!clone)
1642 return 1;
1643
1644 crypt_inc_pending(io);
1645
1646 clone_init(io, clone);
1647 clone->bi_iter.bi_sector = cc->start + io->sector;
1648
1649 if (dm_crypt_integrity_io_alloc(io, clone)) {
1650 crypt_dec_pending(io);
1651 bio_put(clone);
1652 return 1;
1653 }
1654
1655 generic_make_request(clone);
1656 return 0;
1657}
1658
1659static void kcryptd_io_read_work(struct work_struct *work)
1660{
1661 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1662
1663 crypt_inc_pending(io);
1664 if (kcryptd_io_read(io, GFP_NOIO))
1665 io->error = BLK_STS_RESOURCE;
1666 crypt_dec_pending(io);
1667}
1668
1669static void kcryptd_queue_read(struct dm_crypt_io *io)
1670{
1671 struct crypt_config *cc = io->cc;
1672
1673 INIT_WORK(&io->work, kcryptd_io_read_work);
1674 queue_work(cc->io_queue, &io->work);
1675}
1676
1677static void kcryptd_io_write(struct dm_crypt_io *io)
1678{
1679 struct bio *clone = io->ctx.bio_out;
1680
1681 generic_make_request(clone);
1682}
1683
1684#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
1685
1686static int dmcrypt_write(void *data)
1687{
1688 struct crypt_config *cc = data;
1689 struct dm_crypt_io *io;
1690
1691 while (1) {
1692 struct rb_root write_tree;
1693 struct blk_plug plug;
1694
1695 spin_lock_irq(&cc->write_thread_lock);
1696continue_locked:
1697
1698 if (!RB_EMPTY_ROOT(&cc->write_tree))
1699 goto pop_from_list;
1700
1701 set_current_state(TASK_INTERRUPTIBLE);
1702
1703 spin_unlock_irq(&cc->write_thread_lock);
1704
1705 if (unlikely(kthread_should_stop())) {
1706 set_current_state(TASK_RUNNING);
1707 break;
1708 }
1709
1710 schedule();
1711
1712 set_current_state(TASK_RUNNING);
1713 spin_lock_irq(&cc->write_thread_lock);
1714 goto continue_locked;
1715
1716pop_from_list:
1717 write_tree = cc->write_tree;
1718 cc->write_tree = RB_ROOT;
1719 spin_unlock_irq(&cc->write_thread_lock);
1720
1721 BUG_ON(rb_parent(write_tree.rb_node));
1722
1723
1724
1725
1726
1727 blk_start_plug(&plug);
1728 do {
1729 io = crypt_io_from_node(rb_first(&write_tree));
1730 rb_erase(&io->rb_node, &write_tree);
1731 kcryptd_io_write(io);
1732 } while (!RB_EMPTY_ROOT(&write_tree));
1733 blk_finish_plug(&plug);
1734 }
1735 return 0;
1736}
1737
1738static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
1739{
1740 struct bio *clone = io->ctx.bio_out;
1741 struct crypt_config *cc = io->cc;
1742 unsigned long flags;
1743 sector_t sector;
1744 struct rb_node **rbp, *parent;
1745
1746 if (unlikely(io->error)) {
1747 crypt_free_buffer_pages(cc, clone);
1748 bio_put(clone);
1749 crypt_dec_pending(io);
1750 return;
1751 }
1752
1753
1754 BUG_ON(io->ctx.iter_out.bi_size);
1755
1756 clone->bi_iter.bi_sector = cc->start + io->sector;
1757
1758 if (likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) {
1759 generic_make_request(clone);
1760 return;
1761 }
1762
1763 spin_lock_irqsave(&cc->write_thread_lock, flags);
1764 if (RB_EMPTY_ROOT(&cc->write_tree))
1765 wake_up_process(cc->write_thread);
1766 rbp = &cc->write_tree.rb_node;
1767 parent = NULL;
1768 sector = io->sector;
1769 while (*rbp) {
1770 parent = *rbp;
1771 if (sector < crypt_io_from_node(parent)->sector)
1772 rbp = &(*rbp)->rb_left;
1773 else
1774 rbp = &(*rbp)->rb_right;
1775 }
1776 rb_link_node(&io->rb_node, parent, rbp);
1777 rb_insert_color(&io->rb_node, &cc->write_tree);
1778 spin_unlock_irqrestore(&cc->write_thread_lock, flags);
1779}
1780
1781static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1782{
1783 struct crypt_config *cc = io->cc;
1784 struct bio *clone;
1785 int crypt_finished;
1786 sector_t sector = io->sector;
1787 blk_status_t r;
1788
1789
1790
1791
1792 crypt_inc_pending(io);
1793 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
1794
1795 clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
1796 if (unlikely(!clone)) {
1797 io->error = BLK_STS_IOERR;
1798 goto dec;
1799 }
1800
1801 io->ctx.bio_out = clone;
1802 io->ctx.iter_out = clone->bi_iter;
1803
1804 sector += bio_sectors(clone);
1805
1806 crypt_inc_pending(io);
1807 r = crypt_convert(cc, &io->ctx);
1808 if (r)
1809 io->error = r;
1810 crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
1811
1812
1813 if (crypt_finished) {
1814 kcryptd_crypt_write_io_submit(io, 0);
1815 io->sector = sector;
1816 }
1817
1818dec:
1819 crypt_dec_pending(io);
1820}
1821
1822static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
1823{
1824 crypt_dec_pending(io);
1825}
1826
1827static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
1828{
1829 struct crypt_config *cc = io->cc;
1830 blk_status_t r;
1831
1832 crypt_inc_pending(io);
1833
1834 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
1835 io->sector);
1836
1837 r = crypt_convert(cc, &io->ctx);
1838 if (r)
1839 io->error = r;
1840
1841 if (atomic_dec_and_test(&io->ctx.cc_pending))
1842 kcryptd_crypt_read_done(io);
1843
1844 crypt_dec_pending(io);
1845}
1846
1847static void kcryptd_async_done(struct crypto_async_request *async_req,
1848 int error)
1849{
1850 struct dm_crypt_request *dmreq = async_req->data;
1851 struct convert_context *ctx = dmreq->ctx;
1852 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
1853 struct crypt_config *cc = io->cc;
1854
1855
1856
1857
1858
1859
1860 if (error == -EINPROGRESS) {
1861 complete(&ctx->restart);
1862 return;
1863 }
1864
1865 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
1866 error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq);
1867
1868 if (error == -EBADMSG) {
1869 char b[BDEVNAME_SIZE];
1870 DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b),
1871 (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq)));
1872 io->error = BLK_STS_PROTECTION;
1873 } else if (error < 0)
1874 io->error = BLK_STS_IOERR;
1875
1876 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
1877
1878 if (!atomic_dec_and_test(&ctx->cc_pending))
1879 return;
1880
1881 if (bio_data_dir(io->base_bio) == READ)
1882 kcryptd_crypt_read_done(io);
1883 else
1884 kcryptd_crypt_write_io_submit(io, 1);
1885}
1886
1887static void kcryptd_crypt(struct work_struct *work)
1888{
1889 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1890
1891 if (bio_data_dir(io->base_bio) == READ)
1892 kcryptd_crypt_read_convert(io);
1893 else
1894 kcryptd_crypt_write_convert(io);
1895}
1896
1897static void kcryptd_queue_crypt(struct dm_crypt_io *io)
1898{
1899 struct crypt_config *cc = io->cc;
1900
1901 INIT_WORK(&io->work, kcryptd_crypt);
1902 queue_work(cc->crypt_queue, &io->work);
1903}
1904
1905static void crypt_free_tfms_aead(struct crypt_config *cc)
1906{
1907 if (!cc->cipher_tfm.tfms_aead)
1908 return;
1909
1910 if (cc->cipher_tfm.tfms_aead[0] && !IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
1911 crypto_free_aead(cc->cipher_tfm.tfms_aead[0]);
1912 cc->cipher_tfm.tfms_aead[0] = NULL;
1913 }
1914
1915 kfree(cc->cipher_tfm.tfms_aead);
1916 cc->cipher_tfm.tfms_aead = NULL;
1917}
1918
1919static void crypt_free_tfms_skcipher(struct crypt_config *cc)
1920{
1921 unsigned i;
1922
1923 if (!cc->cipher_tfm.tfms)
1924 return;
1925
1926 for (i = 0; i < cc->tfms_count; i++)
1927 if (cc->cipher_tfm.tfms[i] && !IS_ERR(cc->cipher_tfm.tfms[i])) {
1928 crypto_free_skcipher(cc->cipher_tfm.tfms[i]);
1929 cc->cipher_tfm.tfms[i] = NULL;
1930 }
1931
1932 kfree(cc->cipher_tfm.tfms);
1933 cc->cipher_tfm.tfms = NULL;
1934}
1935
1936static void crypt_free_tfms(struct crypt_config *cc)
1937{
1938 if (crypt_integrity_aead(cc))
1939 crypt_free_tfms_aead(cc);
1940 else
1941 crypt_free_tfms_skcipher(cc);
1942}
1943
1944static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode)
1945{
1946 unsigned i;
1947 int err;
1948
1949 cc->cipher_tfm.tfms = kcalloc(cc->tfms_count,
1950 sizeof(struct crypto_skcipher *),
1951 GFP_KERNEL);
1952 if (!cc->cipher_tfm.tfms)
1953 return -ENOMEM;
1954
1955 for (i = 0; i < cc->tfms_count; i++) {
1956 cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0, 0);
1957 if (IS_ERR(cc->cipher_tfm.tfms[i])) {
1958 err = PTR_ERR(cc->cipher_tfm.tfms[i]);
1959 crypt_free_tfms(cc);
1960 return err;
1961 }
1962 }
1963
1964
1965
1966
1967
1968
1969 DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
1970 crypto_skcipher_alg(any_tfm(cc))->base.cra_driver_name);
1971 return 0;
1972}
1973
1974static int crypt_alloc_tfms_aead(struct crypt_config *cc, char *ciphermode)
1975{
1976 int err;
1977
1978 cc->cipher_tfm.tfms = kmalloc(sizeof(struct crypto_aead *), GFP_KERNEL);
1979 if (!cc->cipher_tfm.tfms)
1980 return -ENOMEM;
1981
1982 cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0, 0);
1983 if (IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
1984 err = PTR_ERR(cc->cipher_tfm.tfms_aead[0]);
1985 crypt_free_tfms(cc);
1986 return err;
1987 }
1988
1989 DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
1990 crypto_aead_alg(any_tfm_aead(cc))->base.cra_driver_name);
1991 return 0;
1992}
1993
1994static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
1995{
1996 if (crypt_integrity_aead(cc))
1997 return crypt_alloc_tfms_aead(cc, ciphermode);
1998 else
1999 return crypt_alloc_tfms_skcipher(cc, ciphermode);
2000}
2001
2002static unsigned crypt_subkey_size(struct crypt_config *cc)
2003{
2004 return (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
2005}
2006
2007static unsigned crypt_authenckey_size(struct crypt_config *cc)
2008{
2009 return crypt_subkey_size(cc) + RTA_SPACE(sizeof(struct crypto_authenc_key_param));
2010}
2011
2012
2013
2014
2015
2016
2017static void crypt_copy_authenckey(char *p, const void *key,
2018 unsigned enckeylen, unsigned authkeylen)
2019{
2020 struct crypto_authenc_key_param *param;
2021 struct rtattr *rta;
2022
2023 rta = (struct rtattr *)p;
2024 param = RTA_DATA(rta);
2025 param->enckeylen = cpu_to_be32(enckeylen);
2026 rta->rta_len = RTA_LENGTH(sizeof(*param));
2027 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
2028 p += RTA_SPACE(sizeof(*param));
2029 memcpy(p, key + enckeylen, authkeylen);
2030 p += authkeylen;
2031 memcpy(p, key, enckeylen);
2032}
2033
2034static int crypt_setkey(struct crypt_config *cc)
2035{
2036 unsigned subkey_size;
2037 int err = 0, i, r;
2038
2039
2040 subkey_size = crypt_subkey_size(cc);
2041
2042 if (crypt_integrity_hmac(cc)) {
2043 if (subkey_size < cc->key_mac_size)
2044 return -EINVAL;
2045
2046 crypt_copy_authenckey(cc->authenc_key, cc->key,
2047 subkey_size - cc->key_mac_size,
2048 cc->key_mac_size);
2049 }
2050
2051 for (i = 0; i < cc->tfms_count; i++) {
2052 if (crypt_integrity_hmac(cc))
2053 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
2054 cc->authenc_key, crypt_authenckey_size(cc));
2055 else if (crypt_integrity_aead(cc))
2056 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
2057 cc->key + (i * subkey_size),
2058 subkey_size);
2059 else
2060 r = crypto_skcipher_setkey(cc->cipher_tfm.tfms[i],
2061 cc->key + (i * subkey_size),
2062 subkey_size);
2063 if (r)
2064 err = r;
2065 }
2066
2067 if (crypt_integrity_hmac(cc))
2068 memzero_explicit(cc->authenc_key, crypt_authenckey_size(cc));
2069
2070 return err;
2071}
2072
2073#ifdef CONFIG_KEYS
2074
2075static bool contains_whitespace(const char *str)
2076{
2077 while (*str)
2078 if (isspace(*str++))
2079 return true;
2080 return false;
2081}
2082
2083static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
2084{
2085 char *new_key_string, *key_desc;
2086 int ret;
2087 struct key *key;
2088 const struct user_key_payload *ukp;
2089
2090
2091
2092
2093
2094 if (contains_whitespace(key_string)) {
2095 DMERR("whitespace chars not allowed in key string");
2096 return -EINVAL;
2097 }
2098
2099
2100 key_desc = strpbrk(key_string, ":");
2101 if (!key_desc || key_desc == key_string || !strlen(key_desc + 1))
2102 return -EINVAL;
2103
2104 if (strncmp(key_string, "logon:", key_desc - key_string + 1) &&
2105 strncmp(key_string, "user:", key_desc - key_string + 1))
2106 return -EINVAL;
2107
2108 new_key_string = kstrdup(key_string, GFP_KERNEL);
2109 if (!new_key_string)
2110 return -ENOMEM;
2111
2112 key = request_key(key_string[0] == 'l' ? &key_type_logon : &key_type_user,
2113 key_desc + 1, NULL);
2114 if (IS_ERR(key)) {
2115 kzfree(new_key_string);
2116 return PTR_ERR(key);
2117 }
2118
2119 down_read(&key->sem);
2120
2121 ukp = user_key_payload_locked(key);
2122 if (!ukp) {
2123 up_read(&key->sem);
2124 key_put(key);
2125 kzfree(new_key_string);
2126 return -EKEYREVOKED;
2127 }
2128
2129 if (cc->key_size != ukp->datalen) {
2130 up_read(&key->sem);
2131 key_put(key);
2132 kzfree(new_key_string);
2133 return -EINVAL;
2134 }
2135
2136 memcpy(cc->key, ukp->data, cc->key_size);
2137
2138 up_read(&key->sem);
2139 key_put(key);
2140
2141
2142 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2143
2144 ret = crypt_setkey(cc);
2145
2146 if (!ret) {
2147 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2148 kzfree(cc->key_string);
2149 cc->key_string = new_key_string;
2150 } else
2151 kzfree(new_key_string);
2152
2153 return ret;
2154}
2155
2156static int get_key_size(char **key_string)
2157{
2158 char *colon, dummy;
2159 int ret;
2160
2161 if (*key_string[0] != ':')
2162 return strlen(*key_string) >> 1;
2163
2164
2165 colon = strpbrk(*key_string + 1, ":");
2166 if (!colon)
2167 return -EINVAL;
2168
2169 if (sscanf(*key_string + 1, "%u%c", &ret, &dummy) != 2 || dummy != ':')
2170 return -EINVAL;
2171
2172 *key_string = colon;
2173
2174
2175
2176 return ret;
2177}
2178
2179#else
2180
2181static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
2182{
2183 return -EINVAL;
2184}
2185
2186static int get_key_size(char **key_string)
2187{
2188 return (*key_string[0] == ':') ? -EINVAL : strlen(*key_string) >> 1;
2189}
2190
2191#endif
2192
2193static int crypt_set_key(struct crypt_config *cc, char *key)
2194{
2195 int r = -EINVAL;
2196 int key_string_len = strlen(key);
2197
2198
2199 if (!cc->key_size && strcmp(key, "-"))
2200 goto out;
2201
2202
2203 if (key[0] == ':') {
2204 r = crypt_set_keyring_key(cc, key + 1);
2205 goto out;
2206 }
2207
2208
2209 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2210
2211
2212 kzfree(cc->key_string);
2213 cc->key_string = NULL;
2214
2215
2216 if (cc->key_size && hex2bin(cc->key, key, cc->key_size) < 0)
2217 goto out;
2218
2219 r = crypt_setkey(cc);
2220 if (!r)
2221 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2222
2223out:
2224
2225 memset(key, '0', key_string_len);
2226
2227 return r;
2228}
2229
2230static int crypt_wipe_key(struct crypt_config *cc)
2231{
2232 int r;
2233
2234 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2235 get_random_bytes(&cc->key, cc->key_size);
2236
2237
2238 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
2239 r = cc->iv_gen_ops->wipe(cc);
2240 if (r)
2241 return r;
2242 }
2243
2244 kzfree(cc->key_string);
2245 cc->key_string = NULL;
2246 r = crypt_setkey(cc);
2247 memset(&cc->key, 0, cc->key_size * sizeof(u8));
2248
2249 return r;
2250}
2251
2252static void crypt_calculate_pages_per_client(void)
2253{
2254 unsigned long pages = (totalram_pages() - totalhigh_pages()) * DM_CRYPT_MEMORY_PERCENT / 100;
2255
2256 if (!dm_crypt_clients_n)
2257 return;
2258
2259 pages /= dm_crypt_clients_n;
2260 if (pages < DM_CRYPT_MIN_PAGES_PER_CLIENT)
2261 pages = DM_CRYPT_MIN_PAGES_PER_CLIENT;
2262 dm_crypt_pages_per_client = pages;
2263}
2264
2265static void *crypt_page_alloc(gfp_t gfp_mask, void *pool_data)
2266{
2267 struct crypt_config *cc = pool_data;
2268 struct page *page;
2269
2270 if (unlikely(percpu_counter_compare(&cc->n_allocated_pages, dm_crypt_pages_per_client) >= 0) &&
2271 likely(gfp_mask & __GFP_NORETRY))
2272 return NULL;
2273
2274 page = alloc_page(gfp_mask);
2275 if (likely(page != NULL))
2276 percpu_counter_add(&cc->n_allocated_pages, 1);
2277
2278 return page;
2279}
2280
2281static void crypt_page_free(void *page, void *pool_data)
2282{
2283 struct crypt_config *cc = pool_data;
2284
2285 __free_page(page);
2286 percpu_counter_sub(&cc->n_allocated_pages, 1);
2287}
2288
2289static void crypt_dtr(struct dm_target *ti)
2290{
2291 struct crypt_config *cc = ti->private;
2292
2293 ti->private = NULL;
2294
2295 if (!cc)
2296 return;
2297
2298 if (cc->write_thread)
2299 kthread_stop(cc->write_thread);
2300
2301 if (cc->io_queue)
2302 destroy_workqueue(cc->io_queue);
2303 if (cc->crypt_queue)
2304 destroy_workqueue(cc->crypt_queue);
2305
2306 crypt_free_tfms(cc);
2307
2308 bioset_exit(&cc->bs);
2309
2310 mempool_exit(&cc->page_pool);
2311 mempool_exit(&cc->req_pool);
2312 mempool_exit(&cc->tag_pool);
2313
2314 WARN_ON(percpu_counter_sum(&cc->n_allocated_pages) != 0);
2315 percpu_counter_destroy(&cc->n_allocated_pages);
2316
2317 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
2318 cc->iv_gen_ops->dtr(cc);
2319
2320 if (cc->dev)
2321 dm_put_device(ti, cc->dev);
2322
2323 kzfree(cc->cipher);
2324 kzfree(cc->cipher_string);
2325 kzfree(cc->key_string);
2326 kzfree(cc->cipher_auth);
2327 kzfree(cc->authenc_key);
2328
2329 mutex_destroy(&cc->bio_alloc_lock);
2330
2331
2332 kzfree(cc);
2333
2334 spin_lock(&dm_crypt_clients_lock);
2335 WARN_ON(!dm_crypt_clients_n);
2336 dm_crypt_clients_n--;
2337 crypt_calculate_pages_per_client();
2338 spin_unlock(&dm_crypt_clients_lock);
2339}
2340
2341static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode)
2342{
2343 struct crypt_config *cc = ti->private;
2344
2345 if (crypt_integrity_aead(cc))
2346 cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
2347 else
2348 cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
2349
2350 if (cc->iv_size)
2351
2352 cc->iv_size = max(cc->iv_size,
2353 (unsigned int)(sizeof(u64) / sizeof(u8)));
2354 else if (ivmode) {
2355 DMWARN("Selected cipher does not support IVs");
2356 ivmode = NULL;
2357 }
2358
2359
2360 if (ivmode == NULL)
2361 cc->iv_gen_ops = NULL;
2362 else if (strcmp(ivmode, "plain") == 0)
2363 cc->iv_gen_ops = &crypt_iv_plain_ops;
2364 else if (strcmp(ivmode, "plain64") == 0)
2365 cc->iv_gen_ops = &crypt_iv_plain64_ops;
2366 else if (strcmp(ivmode, "plain64be") == 0)
2367 cc->iv_gen_ops = &crypt_iv_plain64be_ops;
2368 else if (strcmp(ivmode, "essiv") == 0)
2369 cc->iv_gen_ops = &crypt_iv_essiv_ops;
2370 else if (strcmp(ivmode, "benbi") == 0)
2371 cc->iv_gen_ops = &crypt_iv_benbi_ops;
2372 else if (strcmp(ivmode, "null") == 0)
2373 cc->iv_gen_ops = &crypt_iv_null_ops;
2374 else if (strcmp(ivmode, "eboiv") == 0)
2375 cc->iv_gen_ops = &crypt_iv_eboiv_ops;
2376 else if (strcmp(ivmode, "lmk") == 0) {
2377 cc->iv_gen_ops = &crypt_iv_lmk_ops;
2378
2379
2380
2381
2382
2383
2384 if (cc->key_size % cc->key_parts) {
2385 cc->key_parts++;
2386 cc->key_extra_size = cc->key_size / cc->key_parts;
2387 }
2388 } else if (strcmp(ivmode, "tcw") == 0) {
2389 cc->iv_gen_ops = &crypt_iv_tcw_ops;
2390 cc->key_parts += 2;
2391 cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
2392 } else if (strcmp(ivmode, "random") == 0) {
2393 cc->iv_gen_ops = &crypt_iv_random_ops;
2394
2395 cc->integrity_iv_size = cc->iv_size;
2396 } else {
2397 ti->error = "Invalid IV mode";
2398 return -EINVAL;
2399 }
2400
2401 return 0;
2402}
2403
2404
2405
2406
2407
2408
2409static int crypt_ctr_blkdev_cipher(struct crypt_config *cc)
2410{
2411 const char *alg_name = NULL;
2412 char *start, *end;
2413
2414 if (crypt_integrity_aead(cc)) {
2415 alg_name = crypto_tfm_alg_name(crypto_aead_tfm(any_tfm_aead(cc)));
2416 if (!alg_name)
2417 return -EINVAL;
2418 if (crypt_integrity_hmac(cc)) {
2419 alg_name = strchr(alg_name, ',');
2420 if (!alg_name)
2421 return -EINVAL;
2422 }
2423 alg_name++;
2424 } else {
2425 alg_name = crypto_tfm_alg_name(crypto_skcipher_tfm(any_tfm(cc)));
2426 if (!alg_name)
2427 return -EINVAL;
2428 }
2429
2430 start = strchr(alg_name, '(');
2431 end = strchr(alg_name, ')');
2432
2433 if (!start && !end) {
2434 cc->cipher = kstrdup(alg_name, GFP_KERNEL);
2435 return cc->cipher ? 0 : -ENOMEM;
2436 }
2437
2438 if (!start || !end || ++start >= end)
2439 return -EINVAL;
2440
2441 cc->cipher = kzalloc(end - start + 1, GFP_KERNEL);
2442 if (!cc->cipher)
2443 return -ENOMEM;
2444
2445 strncpy(cc->cipher, start, end - start);
2446
2447 return 0;
2448}
2449
2450
2451
2452
2453
2454
2455static int crypt_ctr_auth_cipher(struct crypt_config *cc, char *cipher_api)
2456{
2457 char *start, *end, *mac_alg = NULL;
2458 struct crypto_ahash *mac;
2459
2460 if (!strstarts(cipher_api, "authenc("))
2461 return 0;
2462
2463 start = strchr(cipher_api, '(');
2464 end = strchr(cipher_api, ',');
2465 if (!start || !end || ++start > end)
2466 return -EINVAL;
2467
2468 mac_alg = kzalloc(end - start + 1, GFP_KERNEL);
2469 if (!mac_alg)
2470 return -ENOMEM;
2471 strncpy(mac_alg, start, end - start);
2472
2473 mac = crypto_alloc_ahash(mac_alg, 0, 0);
2474 kfree(mac_alg);
2475
2476 if (IS_ERR(mac))
2477 return PTR_ERR(mac);
2478
2479 cc->key_mac_size = crypto_ahash_digestsize(mac);
2480 crypto_free_ahash(mac);
2481
2482 cc->authenc_key = kmalloc(crypt_authenckey_size(cc), GFP_KERNEL);
2483 if (!cc->authenc_key)
2484 return -ENOMEM;
2485
2486 return 0;
2487}
2488
2489static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key,
2490 char **ivmode, char **ivopts)
2491{
2492 struct crypt_config *cc = ti->private;
2493 char *tmp, *cipher_api;
2494 int ret = -EINVAL;
2495
2496 cc->tfms_count = 1;
2497
2498
2499
2500
2501
2502 tmp = &cipher_in[strlen("capi:")];
2503
2504
2505 *ivopts = strrchr(tmp, ':');
2506 if (*ivopts) {
2507 **ivopts = '\0';
2508 (*ivopts)++;
2509 }
2510
2511 *ivmode = strrchr(tmp, '-');
2512 if (*ivmode) {
2513 **ivmode = '\0';
2514 (*ivmode)++;
2515 }
2516
2517 cipher_api = tmp;
2518
2519 if (*ivmode && !strcmp(*ivmode, "lmk"))
2520 cc->tfms_count = 64;
2521
2522 cc->key_parts = cc->tfms_count;
2523
2524
2525 ret = crypt_alloc_tfms(cc, cipher_api);
2526 if (ret < 0) {
2527 ti->error = "Error allocating crypto tfm";
2528 return ret;
2529 }
2530
2531
2532 if (crypt_integrity_aead(cc)) {
2533 ret = crypt_ctr_auth_cipher(cc, cipher_api);
2534 if (ret < 0) {
2535 ti->error = "Invalid AEAD cipher spec";
2536 return -ENOMEM;
2537 }
2538 cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
2539 } else
2540 cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
2541
2542 ret = crypt_ctr_blkdev_cipher(cc);
2543 if (ret < 0) {
2544 ti->error = "Cannot allocate cipher string";
2545 return -ENOMEM;
2546 }
2547
2548 return 0;
2549}
2550
2551static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key,
2552 char **ivmode, char **ivopts)
2553{
2554 struct crypt_config *cc = ti->private;
2555 char *tmp, *cipher, *chainmode, *keycount;
2556 char *cipher_api = NULL;
2557 int ret = -EINVAL;
2558 char dummy;
2559
2560 if (strchr(cipher_in, '(') || crypt_integrity_aead(cc)) {
2561 ti->error = "Bad cipher specification";
2562 return -EINVAL;
2563 }
2564
2565
2566
2567
2568
2569 tmp = cipher_in;
2570 keycount = strsep(&tmp, "-");
2571 cipher = strsep(&keycount, ":");
2572
2573 if (!keycount)
2574 cc->tfms_count = 1;
2575 else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 ||
2576 !is_power_of_2(cc->tfms_count)) {
2577 ti->error = "Bad cipher key count specification";
2578 return -EINVAL;
2579 }
2580 cc->key_parts = cc->tfms_count;
2581
2582 cc->cipher = kstrdup(cipher, GFP_KERNEL);
2583 if (!cc->cipher)
2584 goto bad_mem;
2585
2586 chainmode = strsep(&tmp, "-");
2587 *ivmode = strsep(&tmp, ":");
2588 *ivopts = tmp;
2589
2590
2591
2592
2593
2594 if (!chainmode || (!strcmp(chainmode, "plain") && !*ivmode)) {
2595 chainmode = "cbc";
2596 *ivmode = "plain";
2597 }
2598
2599 if (strcmp(chainmode, "ecb") && !*ivmode) {
2600 ti->error = "IV mechanism required";
2601 return -EINVAL;
2602 }
2603
2604 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
2605 if (!cipher_api)
2606 goto bad_mem;
2607
2608 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
2609 "%s(%s)", chainmode, cipher);
2610 if (ret < 0) {
2611 kfree(cipher_api);
2612 goto bad_mem;
2613 }
2614
2615
2616 ret = crypt_alloc_tfms(cc, cipher_api);
2617 if (ret < 0) {
2618 ti->error = "Error allocating crypto tfm";
2619 kfree(cipher_api);
2620 return ret;
2621 }
2622 kfree(cipher_api);
2623
2624 return 0;
2625bad_mem:
2626 ti->error = "Cannot allocate cipher strings";
2627 return -ENOMEM;
2628}
2629
2630static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key)
2631{
2632 struct crypt_config *cc = ti->private;
2633 char *ivmode = NULL, *ivopts = NULL;
2634 int ret;
2635
2636 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
2637 if (!cc->cipher_string) {
2638 ti->error = "Cannot allocate cipher strings";
2639 return -ENOMEM;
2640 }
2641
2642 if (strstarts(cipher_in, "capi:"))
2643 ret = crypt_ctr_cipher_new(ti, cipher_in, key, &ivmode, &ivopts);
2644 else
2645 ret = crypt_ctr_cipher_old(ti, cipher_in, key, &ivmode, &ivopts);
2646 if (ret)
2647 return ret;
2648
2649
2650 ret = crypt_ctr_ivmode(ti, ivmode);
2651 if (ret < 0)
2652 return ret;
2653
2654
2655 ret = crypt_set_key(cc, key);
2656 if (ret < 0) {
2657 ti->error = "Error decoding and setting key";
2658 return ret;
2659 }
2660
2661
2662 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
2663 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
2664 if (ret < 0) {
2665 ti->error = "Error creating IV";
2666 return ret;
2667 }
2668 }
2669
2670
2671 if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
2672 ret = cc->iv_gen_ops->init(cc);
2673 if (ret < 0) {
2674 ti->error = "Error initialising IV";
2675 return ret;
2676 }
2677 }
2678
2679
2680 if (cc->key_string)
2681 memset(cc->key, 0, cc->key_size * sizeof(u8));
2682
2683 return ret;
2684}
2685
2686static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **argv)
2687{
2688 struct crypt_config *cc = ti->private;
2689 struct dm_arg_set as;
2690 static const struct dm_arg _args[] = {
2691 {0, 6, "Invalid number of feature args"},
2692 };
2693 unsigned int opt_params, val;
2694 const char *opt_string, *sval;
2695 char dummy;
2696 int ret;
2697
2698
2699 as.argc = argc;
2700 as.argv = argv;
2701
2702 ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
2703 if (ret)
2704 return ret;
2705
2706 while (opt_params--) {
2707 opt_string = dm_shift_arg(&as);
2708 if (!opt_string) {
2709 ti->error = "Not enough feature arguments";
2710 return -EINVAL;
2711 }
2712
2713 if (!strcasecmp(opt_string, "allow_discards"))
2714 ti->num_discard_bios = 1;
2715
2716 else if (!strcasecmp(opt_string, "same_cpu_crypt"))
2717 set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
2718
2719 else if (!strcasecmp(opt_string, "submit_from_crypt_cpus"))
2720 set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
2721 else if (sscanf(opt_string, "integrity:%u:", &val) == 1) {
2722 if (val == 0 || val > MAX_TAG_SIZE) {
2723 ti->error = "Invalid integrity arguments";
2724 return -EINVAL;
2725 }
2726 cc->on_disk_tag_size = val;
2727 sval = strchr(opt_string + strlen("integrity:"), ':') + 1;
2728 if (!strcasecmp(sval, "aead")) {
2729 set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
2730 } else if (strcasecmp(sval, "none")) {
2731 ti->error = "Unknown integrity profile";
2732 return -EINVAL;
2733 }
2734
2735 cc->cipher_auth = kstrdup(sval, GFP_KERNEL);
2736 if (!cc->cipher_auth)
2737 return -ENOMEM;
2738 } else if (sscanf(opt_string, "sector_size:%hu%c", &cc->sector_size, &dummy) == 1) {
2739 if (cc->sector_size < (1 << SECTOR_SHIFT) ||
2740 cc->sector_size > 4096 ||
2741 (cc->sector_size & (cc->sector_size - 1))) {
2742 ti->error = "Invalid feature value for sector_size";
2743 return -EINVAL;
2744 }
2745 if (ti->len & ((cc->sector_size >> SECTOR_SHIFT) - 1)) {
2746 ti->error = "Device size is not multiple of sector_size feature";
2747 return -EINVAL;
2748 }
2749 cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
2750 } else if (!strcasecmp(opt_string, "iv_large_sectors"))
2751 set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
2752 else {
2753 ti->error = "Invalid feature arguments";
2754 return -EINVAL;
2755 }
2756 }
2757
2758 return 0;
2759}
2760
2761
2762
2763
2764
2765static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2766{
2767 struct crypt_config *cc;
2768 const char *devname = dm_table_device_name(ti->table);
2769 int key_size;
2770 unsigned int align_mask;
2771 unsigned long long tmpll;
2772 int ret;
2773 size_t iv_size_padding, additional_req_size;
2774 char dummy;
2775
2776 if (argc < 5) {
2777 ti->error = "Not enough arguments";
2778 return -EINVAL;
2779 }
2780
2781 key_size = get_key_size(&argv[1]);
2782 if (key_size < 0) {
2783 ti->error = "Cannot parse key size";
2784 return -EINVAL;
2785 }
2786
2787 cc = kzalloc(struct_size(cc, key, key_size), GFP_KERNEL);
2788 if (!cc) {
2789 ti->error = "Cannot allocate encryption context";
2790 return -ENOMEM;
2791 }
2792 cc->key_size = key_size;
2793 cc->sector_size = (1 << SECTOR_SHIFT);
2794 cc->sector_shift = 0;
2795
2796 ti->private = cc;
2797
2798 spin_lock(&dm_crypt_clients_lock);
2799 dm_crypt_clients_n++;
2800 crypt_calculate_pages_per_client();
2801 spin_unlock(&dm_crypt_clients_lock);
2802
2803 ret = percpu_counter_init(&cc->n_allocated_pages, 0, GFP_KERNEL);
2804 if (ret < 0)
2805 goto bad;
2806
2807
2808 if (argc > 5) {
2809 ret = crypt_ctr_optional(ti, argc - 5, &argv[5]);
2810 if (ret)
2811 goto bad;
2812 }
2813
2814 ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
2815 if (ret < 0)
2816 goto bad;
2817
2818 if (crypt_integrity_aead(cc)) {
2819 cc->dmreq_start = sizeof(struct aead_request);
2820 cc->dmreq_start += crypto_aead_reqsize(any_tfm_aead(cc));
2821 align_mask = crypto_aead_alignmask(any_tfm_aead(cc));
2822 } else {
2823 cc->dmreq_start = sizeof(struct skcipher_request);
2824 cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc));
2825 align_mask = crypto_skcipher_alignmask(any_tfm(cc));
2826 }
2827 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
2828
2829 if (align_mask < CRYPTO_MINALIGN) {
2830
2831 iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
2832 & align_mask;
2833 } else {
2834
2835
2836
2837
2838
2839 iv_size_padding = align_mask;
2840 }
2841
2842
2843 additional_req_size = sizeof(struct dm_crypt_request) +
2844 iv_size_padding + cc->iv_size +
2845 cc->iv_size +
2846 sizeof(uint64_t) +
2847 sizeof(unsigned int);
2848
2849 ret = mempool_init_kmalloc_pool(&cc->req_pool, MIN_IOS, cc->dmreq_start + additional_req_size);
2850 if (ret) {
2851 ti->error = "Cannot allocate crypt request mempool";
2852 goto bad;
2853 }
2854
2855 cc->per_bio_data_size = ti->per_io_data_size =
2856 ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size,
2857 ARCH_KMALLOC_MINALIGN);
2858
2859 ret = mempool_init(&cc->page_pool, BIO_MAX_PAGES, crypt_page_alloc, crypt_page_free, cc);
2860 if (ret) {
2861 ti->error = "Cannot allocate page mempool";
2862 goto bad;
2863 }
2864
2865 ret = bioset_init(&cc->bs, MIN_IOS, 0, BIOSET_NEED_BVECS);
2866 if (ret) {
2867 ti->error = "Cannot allocate crypt bioset";
2868 goto bad;
2869 }
2870
2871 mutex_init(&cc->bio_alloc_lock);
2872
2873 ret = -EINVAL;
2874 if ((sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) ||
2875 (tmpll & ((cc->sector_size >> SECTOR_SHIFT) - 1))) {
2876 ti->error = "Invalid iv_offset sector";
2877 goto bad;
2878 }
2879 cc->iv_offset = tmpll;
2880
2881 ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev);
2882 if (ret) {
2883 ti->error = "Device lookup failed";
2884 goto bad;
2885 }
2886
2887 ret = -EINVAL;
2888 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
2889 ti->error = "Invalid device sector";
2890 goto bad;
2891 }
2892 cc->start = tmpll;
2893
2894 if (crypt_integrity_aead(cc) || cc->integrity_iv_size) {
2895 ret = crypt_integrity_ctr(cc, ti);
2896 if (ret)
2897 goto bad;
2898
2899 cc->tag_pool_max_sectors = POOL_ENTRY_SIZE / cc->on_disk_tag_size;
2900 if (!cc->tag_pool_max_sectors)
2901 cc->tag_pool_max_sectors = 1;
2902
2903 ret = mempool_init_kmalloc_pool(&cc->tag_pool, MIN_IOS,
2904 cc->tag_pool_max_sectors * cc->on_disk_tag_size);
2905 if (ret) {
2906 ti->error = "Cannot allocate integrity tags mempool";
2907 goto bad;
2908 }
2909
2910 cc->tag_pool_max_sectors <<= cc->sector_shift;
2911 }
2912
2913 ret = -ENOMEM;
2914 cc->io_queue = alloc_workqueue("kcryptd_io/%s",
2915 WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
2916 1, devname);
2917 if (!cc->io_queue) {
2918 ti->error = "Couldn't create kcryptd io queue";
2919 goto bad;
2920 }
2921
2922 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
2923 cc->crypt_queue = alloc_workqueue("kcryptd/%s",
2924 WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
2925 1, devname);
2926 else
2927 cc->crypt_queue = alloc_workqueue("kcryptd/%s",
2928 WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
2929 num_online_cpus(), devname);
2930 if (!cc->crypt_queue) {
2931 ti->error = "Couldn't create kcryptd queue";
2932 goto bad;
2933 }
2934
2935 spin_lock_init(&cc->write_thread_lock);
2936 cc->write_tree = RB_ROOT;
2937
2938 cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write/%s", devname);
2939 if (IS_ERR(cc->write_thread)) {
2940 ret = PTR_ERR(cc->write_thread);
2941 cc->write_thread = NULL;
2942 ti->error = "Couldn't spawn write thread";
2943 goto bad;
2944 }
2945 wake_up_process(cc->write_thread);
2946
2947 ti->num_flush_bios = 1;
2948
2949 return 0;
2950
2951bad:
2952 crypt_dtr(ti);
2953 return ret;
2954}
2955
2956static int crypt_map(struct dm_target *ti, struct bio *bio)
2957{
2958 struct dm_crypt_io *io;
2959 struct crypt_config *cc = ti->private;
2960
2961
2962
2963
2964
2965
2966 if (unlikely(bio->bi_opf & REQ_PREFLUSH ||
2967 bio_op(bio) == REQ_OP_DISCARD)) {
2968 bio_set_dev(bio, cc->dev->bdev);
2969 if (bio_sectors(bio))
2970 bio->bi_iter.bi_sector = cc->start +
2971 dm_target_offset(ti, bio->bi_iter.bi_sector);
2972 return DM_MAPIO_REMAPPED;
2973 }
2974
2975
2976
2977
2978 if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_PAGES << PAGE_SHIFT)) &&
2979 (bio_data_dir(bio) == WRITE || cc->on_disk_tag_size))
2980 dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT));
2981
2982
2983
2984
2985
2986 if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0))
2987 return DM_MAPIO_KILL;
2988
2989 if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1)))
2990 return DM_MAPIO_KILL;
2991
2992 io = dm_per_bio_data(bio, cc->per_bio_data_size);
2993 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
2994
2995 if (cc->on_disk_tag_size) {
2996 unsigned tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift);
2997
2998 if (unlikely(tag_len > KMALLOC_MAX_SIZE) ||
2999 unlikely(!(io->integrity_metadata = kmalloc(tag_len,
3000 GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
3001 if (bio_sectors(bio) > cc->tag_pool_max_sectors)
3002 dm_accept_partial_bio(bio, cc->tag_pool_max_sectors);
3003 io->integrity_metadata = mempool_alloc(&cc->tag_pool, GFP_NOIO);
3004 io->integrity_metadata_from_pool = true;
3005 }
3006 }
3007
3008 if (crypt_integrity_aead(cc))
3009 io->ctx.r.req_aead = (struct aead_request *)(io + 1);
3010 else
3011 io->ctx.r.req = (struct skcipher_request *)(io + 1);
3012
3013 if (bio_data_dir(io->base_bio) == READ) {
3014 if (kcryptd_io_read(io, GFP_NOWAIT))
3015 kcryptd_queue_read(io);
3016 } else
3017 kcryptd_queue_crypt(io);
3018
3019 return DM_MAPIO_SUBMITTED;
3020}
3021
3022static void crypt_status(struct dm_target *ti, status_type_t type,
3023 unsigned status_flags, char *result, unsigned maxlen)
3024{
3025 struct crypt_config *cc = ti->private;
3026 unsigned i, sz = 0;
3027 int num_feature_args = 0;
3028
3029 switch (type) {
3030 case STATUSTYPE_INFO:
3031 result[0] = '\0';
3032 break;
3033
3034 case STATUSTYPE_TABLE:
3035 DMEMIT("%s ", cc->cipher_string);
3036
3037 if (cc->key_size > 0) {
3038 if (cc->key_string)
3039 DMEMIT(":%u:%s", cc->key_size, cc->key_string);
3040 else
3041 for (i = 0; i < cc->key_size; i++)
3042 DMEMIT("%02x", cc->key[i]);
3043 } else
3044 DMEMIT("-");
3045
3046 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
3047 cc->dev->name, (unsigned long long)cc->start);
3048
3049 num_feature_args += !!ti->num_discard_bios;
3050 num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
3051 num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
3052 num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT);
3053 num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
3054 if (cc->on_disk_tag_size)
3055 num_feature_args++;
3056 if (num_feature_args) {
3057 DMEMIT(" %d", num_feature_args);
3058 if (ti->num_discard_bios)
3059 DMEMIT(" allow_discards");
3060 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
3061 DMEMIT(" same_cpu_crypt");
3062 if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
3063 DMEMIT(" submit_from_crypt_cpus");
3064 if (cc->on_disk_tag_size)
3065 DMEMIT(" integrity:%u:%s", cc->on_disk_tag_size, cc->cipher_auth);
3066 if (cc->sector_size != (1 << SECTOR_SHIFT))
3067 DMEMIT(" sector_size:%d", cc->sector_size);
3068 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
3069 DMEMIT(" iv_large_sectors");
3070 }
3071
3072 break;
3073 }
3074}
3075
3076static void crypt_postsuspend(struct dm_target *ti)
3077{
3078 struct crypt_config *cc = ti->private;
3079
3080 set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
3081}
3082
3083static int crypt_preresume(struct dm_target *ti)
3084{
3085 struct crypt_config *cc = ti->private;
3086
3087 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
3088 DMERR("aborting resume - crypt key is not set.");
3089 return -EAGAIN;
3090 }
3091
3092 return 0;
3093}
3094
3095static void crypt_resume(struct dm_target *ti)
3096{
3097 struct crypt_config *cc = ti->private;
3098
3099 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
3100}
3101
3102
3103
3104
3105
3106static int crypt_message(struct dm_target *ti, unsigned argc, char **argv,
3107 char *result, unsigned maxlen)
3108{
3109 struct crypt_config *cc = ti->private;
3110 int key_size, ret = -EINVAL;
3111
3112 if (argc < 2)
3113 goto error;
3114
3115 if (!strcasecmp(argv[0], "key")) {
3116 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
3117 DMWARN("not suspended during key manipulation.");
3118 return -EINVAL;
3119 }
3120 if (argc == 3 && !strcasecmp(argv[1], "set")) {
3121
3122 key_size = get_key_size(&argv[2]);
3123 if (key_size < 0 || cc->key_size != key_size) {
3124 memset(argv[2], '0', strlen(argv[2]));
3125 return -EINVAL;
3126 }
3127
3128 ret = crypt_set_key(cc, argv[2]);
3129 if (ret)
3130 return ret;
3131 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
3132 ret = cc->iv_gen_ops->init(cc);
3133
3134 if (cc->key_string)
3135 memset(cc->key, 0, cc->key_size * sizeof(u8));
3136 return ret;
3137 }
3138 if (argc == 2 && !strcasecmp(argv[1], "wipe"))
3139 return crypt_wipe_key(cc);
3140 }
3141
3142error:
3143 DMWARN("unrecognised message received.");
3144 return -EINVAL;
3145}
3146
3147static int crypt_iterate_devices(struct dm_target *ti,
3148 iterate_devices_callout_fn fn, void *data)
3149{
3150 struct crypt_config *cc = ti->private;
3151
3152 return fn(ti, cc->dev, cc->start, ti->len, data);
3153}
3154
3155static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
3156{
3157 struct crypt_config *cc = ti->private;
3158
3159
3160
3161
3162
3163
3164
3165 limits->max_segment_size = PAGE_SIZE;
3166
3167 limits->logical_block_size =
3168 max_t(unsigned short, limits->logical_block_size, cc->sector_size);
3169 limits->physical_block_size =
3170 max_t(unsigned, limits->physical_block_size, cc->sector_size);
3171 limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size);
3172}
3173
3174static struct target_type crypt_target = {
3175 .name = "crypt",
3176 .version = {1, 19, 0},
3177 .module = THIS_MODULE,
3178 .ctr = crypt_ctr,
3179 .dtr = crypt_dtr,
3180 .map = crypt_map,
3181 .status = crypt_status,
3182 .postsuspend = crypt_postsuspend,
3183 .preresume = crypt_preresume,
3184 .resume = crypt_resume,
3185 .message = crypt_message,
3186 .iterate_devices = crypt_iterate_devices,
3187 .io_hints = crypt_io_hints,
3188};
3189
3190static int __init dm_crypt_init(void)
3191{
3192 int r;
3193
3194 r = dm_register_target(&crypt_target);
3195 if (r < 0)
3196 DMERR("register failed %d", r);
3197
3198 return r;
3199}
3200
3201static void __exit dm_crypt_exit(void)
3202{
3203 dm_unregister_target(&crypt_target);
3204}
3205
3206module_init(dm_crypt_init);
3207module_exit(dm_crypt_exit);
3208
3209MODULE_AUTHOR("Jana Saout <jana@saout.de>");
3210MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
3211MODULE_LICENSE("GPL");
3212