1
2
3
4
5
6
7
8
9#include <linux/completion.h>
10#include <linux/err.h>
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/bio.h>
15#include <linux/blkdev.h>
16#include <linux/mempool.h>
17#include <linux/slab.h>
18#include <linux/crypto.h>
19#include <linux/workqueue.h>
20#include <linux/backing-dev.h>
21#include <linux/percpu.h>
22#include <linux/atomic.h>
23#include <linux/scatterlist.h>
24#include <asm/page.h>
25#include <asm/unaligned.h>
26#include <crypto/hash.h>
27#include <crypto/md5.h>
28#include <crypto/algapi.h>
29
30#include <linux/device-mapper.h>
31
32#define DM_MSG_PREFIX "crypt"
33
34
35
36
37struct convert_context {
38 struct completion restart;
39 struct bio *bio_in;
40 struct bio *bio_out;
41 unsigned int offset_in;
42 unsigned int offset_out;
43 unsigned int idx_in;
44 unsigned int idx_out;
45 sector_t sector;
46 atomic_t pending;
47};
48
49
50
51
52struct dm_crypt_io {
53 struct dm_target *target;
54 struct bio *base_bio;
55 struct work_struct work;
56
57 struct convert_context ctx;
58
59 atomic_t pending;
60 int error;
61 sector_t sector;
62 struct dm_crypt_io *base_io;
63};
64
65struct dm_crypt_request {
66 struct convert_context *ctx;
67 struct scatterlist sg_in;
68 struct scatterlist sg_out;
69 sector_t iv_sector;
70};
71
72struct crypt_config;
73
74struct crypt_iv_operations {
75 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
76 const char *opts);
77 void (*dtr)(struct crypt_config *cc);
78 int (*init)(struct crypt_config *cc);
79 int (*wipe)(struct crypt_config *cc);
80 int (*generator)(struct crypt_config *cc, u8 *iv,
81 struct dm_crypt_request *dmreq);
82 int (*post)(struct crypt_config *cc, u8 *iv,
83 struct dm_crypt_request *dmreq);
84};
85
86struct iv_essiv_private {
87 struct crypto_hash *hash_tfm;
88 u8 *salt;
89};
90
91struct iv_benbi_private {
92 int shift;
93};
94
95#define LMK_SEED_SIZE 64
96struct iv_lmk_private {
97 struct crypto_shash *hash_tfm;
98 u8 *seed;
99};
100
101
102
103
104
105enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
106
107
108
109
110struct crypt_cpu {
111 struct ablkcipher_request *req;
112
113 void *iv_private;
114 struct crypto_ablkcipher *tfms[0];
115};
116
117
118
119
120
121struct crypt_config {
122 struct dm_dev *dev;
123 sector_t start;
124
125
126
127
128
129 mempool_t *io_pool;
130 mempool_t *req_pool;
131 mempool_t *page_pool;
132 struct bio_set *bs;
133
134 struct workqueue_struct *io_queue;
135 struct workqueue_struct *crypt_queue;
136
137 char *cipher;
138 char *cipher_string;
139
140 struct crypt_iv_operations *iv_gen_ops;
141 union {
142 struct iv_essiv_private essiv;
143 struct iv_benbi_private benbi;
144 struct iv_lmk_private lmk;
145 } iv_gen_private;
146 sector_t iv_offset;
147 unsigned int iv_size;
148
149
150
151
152
153 struct crypt_cpu __percpu *cpu;
154 unsigned tfms_count;
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169 unsigned int dmreq_start;
170
171 unsigned long flags;
172 unsigned int key_size;
173 unsigned int key_parts;
174 u8 key[0];
175};
176
177#define MIN_IOS 16
178#define MIN_POOL_PAGES 32
179#define MIN_BIO_PAGES 8
180
181static struct kmem_cache *_crypt_io_pool;
182
183static void clone_init(struct dm_crypt_io *, struct bio *);
184static void kcryptd_queue_crypt(struct dm_crypt_io *io);
185static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
186
187static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
188{
189 return this_cpu_ptr(cc->cpu);
190}
191
192
193
194
195static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
196{
197 return __this_cpu_ptr(cc->cpu)->tfms[0];
198}
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
238 struct dm_crypt_request *dmreq)
239{
240 memset(iv, 0, cc->iv_size);
241 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
242
243 return 0;
244}
245
246static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
247 struct dm_crypt_request *dmreq)
248{
249 memset(iv, 0, cc->iv_size);
250 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
251
252 return 0;
253}
254
255
256static int crypt_iv_essiv_init(struct crypt_config *cc)
257{
258 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
259 struct hash_desc desc;
260 struct scatterlist sg;
261 struct crypto_cipher *essiv_tfm;
262 int err, cpu;
263
264 sg_init_one(&sg, cc->key, cc->key_size);
265 desc.tfm = essiv->hash_tfm;
266 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
267
268 err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt);
269 if (err)
270 return err;
271
272 for_each_possible_cpu(cpu) {
273 essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private,
274
275 err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
276 crypto_hash_digestsize(essiv->hash_tfm));
277 if (err)
278 return err;
279 }
280
281 return 0;
282}
283
284
285static int crypt_iv_essiv_wipe(struct crypt_config *cc)
286{
287 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
288 unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
289 struct crypto_cipher *essiv_tfm;
290 int cpu, r, err = 0;
291
292 memset(essiv->salt, 0, salt_size);
293
294 for_each_possible_cpu(cpu) {
295 essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private;
296 r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
297 if (r)
298 err = r;
299 }
300
301 return err;
302}
303
304
305static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
306 struct dm_target *ti,
307 u8 *salt, unsigned saltsize)
308{
309 struct crypto_cipher *essiv_tfm;
310 int err;
311
312
313 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
314 if (IS_ERR(essiv_tfm)) {
315 ti->error = "Error allocating crypto tfm for ESSIV";
316 return essiv_tfm;
317 }
318
319 if (crypto_cipher_blocksize(essiv_tfm) !=
320 crypto_ablkcipher_ivsize(any_tfm(cc))) {
321 ti->error = "Block size of ESSIV cipher does "
322 "not match IV size of block cipher";
323 crypto_free_cipher(essiv_tfm);
324 return ERR_PTR(-EINVAL);
325 }
326
327 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
328 if (err) {
329 ti->error = "Failed to set key for ESSIV cipher";
330 crypto_free_cipher(essiv_tfm);
331 return ERR_PTR(err);
332 }
333
334 return essiv_tfm;
335}
336
337static void crypt_iv_essiv_dtr(struct crypt_config *cc)
338{
339 int cpu;
340 struct crypt_cpu *cpu_cc;
341 struct crypto_cipher *essiv_tfm;
342 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
343
344 crypto_free_hash(essiv->hash_tfm);
345 essiv->hash_tfm = NULL;
346
347 kzfree(essiv->salt);
348 essiv->salt = NULL;
349
350 for_each_possible_cpu(cpu) {
351 cpu_cc = per_cpu_ptr(cc->cpu, cpu);
352 essiv_tfm = cpu_cc->iv_private;
353
354 if (essiv_tfm)
355 crypto_free_cipher(essiv_tfm);
356
357 cpu_cc->iv_private = NULL;
358 }
359}
360
361static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
362 const char *opts)
363{
364 struct crypto_cipher *essiv_tfm = NULL;
365 struct crypto_hash *hash_tfm = NULL;
366 u8 *salt = NULL;
367 int err, cpu;
368
369 if (!opts) {
370 ti->error = "Digest algorithm missing for ESSIV mode";
371 return -EINVAL;
372 }
373
374
375 hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
376 if (IS_ERR(hash_tfm)) {
377 ti->error = "Error initializing ESSIV hash";
378 err = PTR_ERR(hash_tfm);
379 goto bad;
380 }
381
382 salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL);
383 if (!salt) {
384 ti->error = "Error kmallocing salt storage in ESSIV";
385 err = -ENOMEM;
386 goto bad;
387 }
388
389 cc->iv_gen_private.essiv.salt = salt;
390 cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
391
392 for_each_possible_cpu(cpu) {
393 essiv_tfm = setup_essiv_cpu(cc, ti, salt,
394 crypto_hash_digestsize(hash_tfm));
395 if (IS_ERR(essiv_tfm)) {
396 crypt_iv_essiv_dtr(cc);
397 return PTR_ERR(essiv_tfm);
398 }
399 per_cpu_ptr(cc->cpu, cpu)->iv_private = essiv_tfm;
400 }
401
402 return 0;
403
404bad:
405 if (hash_tfm && !IS_ERR(hash_tfm))
406 crypto_free_hash(hash_tfm);
407 kfree(salt);
408 return err;
409}
410
411static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
412 struct dm_crypt_request *dmreq)
413{
414 struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private;
415
416 memset(iv, 0, cc->iv_size);
417 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
418 crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
419
420 return 0;
421}
422
423static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
424 const char *opts)
425{
426 unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc));
427 int log = ilog2(bs);
428
429
430
431
432 if (1 << log != bs) {
433 ti->error = "cypher blocksize is not a power of 2";
434 return -EINVAL;
435 }
436
437 if (log > 9) {
438 ti->error = "cypher blocksize is > 512";
439 return -EINVAL;
440 }
441
442 cc->iv_gen_private.benbi.shift = 9 - log;
443
444 return 0;
445}
446
447static void crypt_iv_benbi_dtr(struct crypt_config *cc)
448{
449}
450
451static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
452 struct dm_crypt_request *dmreq)
453{
454 __be64 val;
455
456 memset(iv, 0, cc->iv_size - sizeof(u64));
457
458 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
459 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
460
461 return 0;
462}
463
464static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
465 struct dm_crypt_request *dmreq)
466{
467 memset(iv, 0, cc->iv_size);
468
469 return 0;
470}
471
472static void crypt_iv_lmk_dtr(struct crypt_config *cc)
473{
474 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
475
476 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
477 crypto_free_shash(lmk->hash_tfm);
478 lmk->hash_tfm = NULL;
479
480 kzfree(lmk->seed);
481 lmk->seed = NULL;
482}
483
484static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
485 const char *opts)
486{
487 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
488
489 lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
490 if (IS_ERR(lmk->hash_tfm)) {
491 ti->error = "Error initializing LMK hash";
492 return PTR_ERR(lmk->hash_tfm);
493 }
494
495
496 if (cc->key_parts == cc->tfms_count) {
497 lmk->seed = NULL;
498 return 0;
499 }
500
501 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
502 if (!lmk->seed) {
503 crypt_iv_lmk_dtr(cc);
504 ti->error = "Error kmallocing seed storage in LMK";
505 return -ENOMEM;
506 }
507
508 return 0;
509}
510
511static int crypt_iv_lmk_init(struct crypt_config *cc)
512{
513 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
514 int subkey_size = cc->key_size / cc->key_parts;
515
516
517 if (lmk->seed)
518 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
519 crypto_shash_digestsize(lmk->hash_tfm));
520
521 return 0;
522}
523
524static int crypt_iv_lmk_wipe(struct crypt_config *cc)
525{
526 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
527
528 if (lmk->seed)
529 memset(lmk->seed, 0, LMK_SEED_SIZE);
530
531 return 0;
532}
533
534static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
535 struct dm_crypt_request *dmreq,
536 u8 *data)
537{
538 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
539 struct {
540 struct shash_desc desc;
541 char ctx[crypto_shash_descsize(lmk->hash_tfm)];
542 } sdesc;
543 struct md5_state md5state;
544 u32 buf[4];
545 int i, r;
546
547 sdesc.desc.tfm = lmk->hash_tfm;
548 sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
549
550 r = crypto_shash_init(&sdesc.desc);
551 if (r)
552 return r;
553
554 if (lmk->seed) {
555 r = crypto_shash_update(&sdesc.desc, lmk->seed, LMK_SEED_SIZE);
556 if (r)
557 return r;
558 }
559
560
561 r = crypto_shash_update(&sdesc.desc, data + 16, 16 * 31);
562 if (r)
563 return r;
564
565
566 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
567 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
568 buf[2] = cpu_to_le32(4024);
569 buf[3] = 0;
570 r = crypto_shash_update(&sdesc.desc, (u8 *)buf, sizeof(buf));
571 if (r)
572 return r;
573
574
575 r = crypto_shash_export(&sdesc.desc, &md5state);
576 if (r)
577 return r;
578
579 for (i = 0; i < MD5_HASH_WORDS; i++)
580 __cpu_to_le32s(&md5state.hash[i]);
581 memcpy(iv, &md5state.hash, cc->iv_size);
582
583 return 0;
584}
585
586static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
587 struct dm_crypt_request *dmreq)
588{
589 u8 *src;
590 int r = 0;
591
592 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
593 src = kmap_atomic(sg_page(&dmreq->sg_in), KM_USER0);
594 r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset);
595 kunmap_atomic(src, KM_USER0);
596 } else
597 memset(iv, 0, cc->iv_size);
598
599 return r;
600}
601
602static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
603 struct dm_crypt_request *dmreq)
604{
605 u8 *dst;
606 int r;
607
608 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
609 return 0;
610
611 dst = kmap_atomic(sg_page(&dmreq->sg_out), KM_USER0);
612 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset);
613
614
615 if (!r)
616 crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size);
617
618 kunmap_atomic(dst, KM_USER0);
619 return r;
620}
621
622static struct crypt_iv_operations crypt_iv_plain_ops = {
623 .generator = crypt_iv_plain_gen
624};
625
626static struct crypt_iv_operations crypt_iv_plain64_ops = {
627 .generator = crypt_iv_plain64_gen
628};
629
630static struct crypt_iv_operations crypt_iv_essiv_ops = {
631 .ctr = crypt_iv_essiv_ctr,
632 .dtr = crypt_iv_essiv_dtr,
633 .init = crypt_iv_essiv_init,
634 .wipe = crypt_iv_essiv_wipe,
635 .generator = crypt_iv_essiv_gen
636};
637
638static struct crypt_iv_operations crypt_iv_benbi_ops = {
639 .ctr = crypt_iv_benbi_ctr,
640 .dtr = crypt_iv_benbi_dtr,
641 .generator = crypt_iv_benbi_gen
642};
643
644static struct crypt_iv_operations crypt_iv_null_ops = {
645 .generator = crypt_iv_null_gen
646};
647
648static struct crypt_iv_operations crypt_iv_lmk_ops = {
649 .ctr = crypt_iv_lmk_ctr,
650 .dtr = crypt_iv_lmk_dtr,
651 .init = crypt_iv_lmk_init,
652 .wipe = crypt_iv_lmk_wipe,
653 .generator = crypt_iv_lmk_gen,
654 .post = crypt_iv_lmk_post
655};
656
657static void crypt_convert_init(struct crypt_config *cc,
658 struct convert_context *ctx,
659 struct bio *bio_out, struct bio *bio_in,
660 sector_t sector)
661{
662 ctx->bio_in = bio_in;
663 ctx->bio_out = bio_out;
664 ctx->offset_in = 0;
665 ctx->offset_out = 0;
666 ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
667 ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
668 ctx->sector = sector + cc->iv_offset;
669 init_completion(&ctx->restart);
670}
671
672static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
673 struct ablkcipher_request *req)
674{
675 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
676}
677
678static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
679 struct dm_crypt_request *dmreq)
680{
681 return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
682}
683
684static u8 *iv_of_dmreq(struct crypt_config *cc,
685 struct dm_crypt_request *dmreq)
686{
687 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
688 crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
689}
690
691static int crypt_convert_block(struct crypt_config *cc,
692 struct convert_context *ctx,
693 struct ablkcipher_request *req)
694{
695 struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
696 struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
697 struct dm_crypt_request *dmreq;
698 u8 *iv;
699 int r = 0;
700
701 dmreq = dmreq_of_req(cc, req);
702 iv = iv_of_dmreq(cc, dmreq);
703
704 dmreq->iv_sector = ctx->sector;
705 dmreq->ctx = ctx;
706 sg_init_table(&dmreq->sg_in, 1);
707 sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
708 bv_in->bv_offset + ctx->offset_in);
709
710 sg_init_table(&dmreq->sg_out, 1);
711 sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT,
712 bv_out->bv_offset + ctx->offset_out);
713
714 ctx->offset_in += 1 << SECTOR_SHIFT;
715 if (ctx->offset_in >= bv_in->bv_len) {
716 ctx->offset_in = 0;
717 ctx->idx_in++;
718 }
719
720 ctx->offset_out += 1 << SECTOR_SHIFT;
721 if (ctx->offset_out >= bv_out->bv_len) {
722 ctx->offset_out = 0;
723 ctx->idx_out++;
724 }
725
726 if (cc->iv_gen_ops) {
727 r = cc->iv_gen_ops->generator(cc, iv, dmreq);
728 if (r < 0)
729 return r;
730 }
731
732 ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
733 1 << SECTOR_SHIFT, iv);
734
735 if (bio_data_dir(ctx->bio_in) == WRITE)
736 r = crypto_ablkcipher_encrypt(req);
737 else
738 r = crypto_ablkcipher_decrypt(req);
739
740 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
741 r = cc->iv_gen_ops->post(cc, iv, dmreq);
742
743 return r;
744}
745
746static void kcryptd_async_done(struct crypto_async_request *async_req,
747 int error);
748
749static void crypt_alloc_req(struct crypt_config *cc,
750 struct convert_context *ctx)
751{
752 struct crypt_cpu *this_cc = this_crypt_config(cc);
753 unsigned key_index = ctx->sector & (cc->tfms_count - 1);
754
755 if (!this_cc->req)
756 this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
757
758 ablkcipher_request_set_tfm(this_cc->req, this_cc->tfms[key_index]);
759 ablkcipher_request_set_callback(this_cc->req,
760 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
761 kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
762}
763
764
765
766
767static int crypt_convert(struct crypt_config *cc,
768 struct convert_context *ctx)
769{
770 struct crypt_cpu *this_cc = this_crypt_config(cc);
771 int r;
772
773 atomic_set(&ctx->pending, 1);
774
775 while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
776 ctx->idx_out < ctx->bio_out->bi_vcnt) {
777
778 crypt_alloc_req(cc, ctx);
779
780 atomic_inc(&ctx->pending);
781
782 r = crypt_convert_block(cc, ctx, this_cc->req);
783
784 switch (r) {
785
786 case -EBUSY:
787 wait_for_completion(&ctx->restart);
788 INIT_COMPLETION(ctx->restart);
789
790 case -EINPROGRESS:
791 this_cc->req = NULL;
792 ctx->sector++;
793 continue;
794
795
796 case 0:
797 atomic_dec(&ctx->pending);
798 ctx->sector++;
799 cond_resched();
800 continue;
801
802
803 default:
804 atomic_dec(&ctx->pending);
805 return r;
806 }
807 }
808
809 return 0;
810}
811
812static void dm_crypt_bio_destructor(struct bio *bio)
813{
814 struct dm_crypt_io *io = bio->bi_private;
815 struct crypt_config *cc = io->target->private;
816
817 bio_free(bio, cc->bs);
818}
819
820
821
822
823
824
825
826static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
827 unsigned *out_of_pages)
828{
829 struct crypt_config *cc = io->target->private;
830 struct bio *clone;
831 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
832 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
833 unsigned i, len;
834 struct page *page;
835
836 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
837 if (!clone)
838 return NULL;
839
840 clone_init(io, clone);
841 *out_of_pages = 0;
842
843 for (i = 0; i < nr_iovecs; i++) {
844 page = mempool_alloc(cc->page_pool, gfp_mask);
845 if (!page) {
846 *out_of_pages = 1;
847 break;
848 }
849
850
851
852
853
854
855 if (i == (MIN_BIO_PAGES - 1))
856 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
857
858 len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
859
860 if (!bio_add_page(clone, page, len, 0)) {
861 mempool_free(page, cc->page_pool);
862 break;
863 }
864
865 size -= len;
866 }
867
868 if (!clone->bi_size) {
869 bio_put(clone);
870 return NULL;
871 }
872
873 return clone;
874}
875
876static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
877{
878 unsigned int i;
879 struct bio_vec *bv;
880
881 for (i = 0; i < clone->bi_vcnt; i++) {
882 bv = bio_iovec_idx(clone, i);
883 BUG_ON(!bv->bv_page);
884 mempool_free(bv->bv_page, cc->page_pool);
885 bv->bv_page = NULL;
886 }
887}
888
889static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti,
890 struct bio *bio, sector_t sector)
891{
892 struct crypt_config *cc = ti->private;
893 struct dm_crypt_io *io;
894
895 io = mempool_alloc(cc->io_pool, GFP_NOIO);
896 io->target = ti;
897 io->base_bio = bio;
898 io->sector = sector;
899 io->error = 0;
900 io->base_io = NULL;
901 atomic_set(&io->pending, 0);
902
903 return io;
904}
905
906static void crypt_inc_pending(struct dm_crypt_io *io)
907{
908 atomic_inc(&io->pending);
909}
910
911
912
913
914
915
916static void crypt_dec_pending(struct dm_crypt_io *io)
917{
918 struct crypt_config *cc = io->target->private;
919 struct bio *base_bio = io->base_bio;
920 struct dm_crypt_io *base_io = io->base_io;
921 int error = io->error;
922
923 if (!atomic_dec_and_test(&io->pending))
924 return;
925
926 mempool_free(io, cc->io_pool);
927
928 if (likely(!base_io))
929 bio_endio(base_bio, error);
930 else {
931 if (error && !base_io->error)
932 base_io->error = error;
933 crypt_dec_pending(base_io);
934 }
935}
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954static void crypt_endio(struct bio *clone, int error)
955{
956 struct dm_crypt_io *io = clone->bi_private;
957 struct crypt_config *cc = io->target->private;
958 unsigned rw = bio_data_dir(clone);
959
960 if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
961 error = -EIO;
962
963
964
965
966 if (rw == WRITE)
967 crypt_free_buffer_pages(cc, clone);
968
969 bio_put(clone);
970
971 if (rw == READ && !error) {
972 kcryptd_queue_crypt(io);
973 return;
974 }
975
976 if (unlikely(error))
977 io->error = error;
978
979 crypt_dec_pending(io);
980}
981
982static void clone_init(struct dm_crypt_io *io, struct bio *clone)
983{
984 struct crypt_config *cc = io->target->private;
985
986 clone->bi_private = io;
987 clone->bi_end_io = crypt_endio;
988 clone->bi_bdev = cc->dev->bdev;
989 clone->bi_rw = io->base_bio->bi_rw;
990 clone->bi_destructor = dm_crypt_bio_destructor;
991}
992
993static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
994{
995 struct crypt_config *cc = io->target->private;
996 struct bio *base_bio = io->base_bio;
997 struct bio *clone;
998
999
1000
1001
1002
1003
1004 clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs);
1005 if (!clone)
1006 return 1;
1007
1008 crypt_inc_pending(io);
1009
1010 clone_init(io, clone);
1011 clone->bi_idx = 0;
1012 clone->bi_vcnt = bio_segments(base_bio);
1013 clone->bi_size = base_bio->bi_size;
1014 clone->bi_sector = cc->start + io->sector;
1015 memcpy(clone->bi_io_vec, bio_iovec(base_bio),
1016 sizeof(struct bio_vec) * clone->bi_vcnt);
1017
1018 generic_make_request(clone);
1019 return 0;
1020}
1021
1022static void kcryptd_io_write(struct dm_crypt_io *io)
1023{
1024 struct bio *clone = io->ctx.bio_out;
1025 generic_make_request(clone);
1026}
1027
1028static void kcryptd_io(struct work_struct *work)
1029{
1030 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1031
1032 if (bio_data_dir(io->base_bio) == READ) {
1033 crypt_inc_pending(io);
1034 if (kcryptd_io_read(io, GFP_NOIO))
1035 io->error = -ENOMEM;
1036 crypt_dec_pending(io);
1037 } else
1038 kcryptd_io_write(io);
1039}
1040
1041static void kcryptd_queue_io(struct dm_crypt_io *io)
1042{
1043 struct crypt_config *cc = io->target->private;
1044
1045 INIT_WORK(&io->work, kcryptd_io);
1046 queue_work(cc->io_queue, &io->work);
1047}
1048
1049static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
1050 int error, int async)
1051{
1052 struct bio *clone = io->ctx.bio_out;
1053 struct crypt_config *cc = io->target->private;
1054
1055 if (unlikely(error < 0)) {
1056 crypt_free_buffer_pages(cc, clone);
1057 bio_put(clone);
1058 io->error = -EIO;
1059 crypt_dec_pending(io);
1060 return;
1061 }
1062
1063
1064 BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
1065
1066 clone->bi_sector = cc->start + io->sector;
1067
1068 if (async)
1069 kcryptd_queue_io(io);
1070 else
1071 generic_make_request(clone);
1072}
1073
1074static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1075{
1076 struct crypt_config *cc = io->target->private;
1077 struct bio *clone;
1078 struct dm_crypt_io *new_io;
1079 int crypt_finished;
1080 unsigned out_of_pages = 0;
1081 unsigned remaining = io->base_bio->bi_size;
1082 sector_t sector = io->sector;
1083 int r;
1084
1085
1086
1087
1088 crypt_inc_pending(io);
1089 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
1090
1091
1092
1093
1094
1095 while (remaining) {
1096 clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
1097 if (unlikely(!clone)) {
1098 io->error = -ENOMEM;
1099 break;
1100 }
1101
1102 io->ctx.bio_out = clone;
1103 io->ctx.idx_out = 0;
1104
1105 remaining -= clone->bi_size;
1106 sector += bio_sectors(clone);
1107
1108 crypt_inc_pending(io);
1109 r = crypt_convert(cc, &io->ctx);
1110 crypt_finished = atomic_dec_and_test(&io->ctx.pending);
1111
1112
1113 if (crypt_finished) {
1114 kcryptd_crypt_write_io_submit(io, r, 0);
1115
1116
1117
1118
1119
1120 if (unlikely(r < 0))
1121 break;
1122
1123 io->sector = sector;
1124 }
1125
1126
1127
1128
1129
1130 if (unlikely(out_of_pages))
1131 congestion_wait(BLK_RW_ASYNC, HZ/100);
1132
1133
1134
1135
1136
1137 if (unlikely(!crypt_finished && remaining)) {
1138 new_io = crypt_io_alloc(io->target, io->base_bio,
1139 sector);
1140 crypt_inc_pending(new_io);
1141 crypt_convert_init(cc, &new_io->ctx, NULL,
1142 io->base_bio, sector);
1143 new_io->ctx.idx_in = io->ctx.idx_in;
1144 new_io->ctx.offset_in = io->ctx.offset_in;
1145
1146
1147
1148
1149
1150 if (!io->base_io)
1151 new_io->base_io = io;
1152 else {
1153 new_io->base_io = io->base_io;
1154 crypt_inc_pending(io->base_io);
1155 crypt_dec_pending(io);
1156 }
1157
1158 io = new_io;
1159 }
1160 }
1161
1162 crypt_dec_pending(io);
1163}
1164
1165static void kcryptd_crypt_read_done(struct dm_crypt_io *io, int error)
1166{
1167 if (unlikely(error < 0))
1168 io->error = -EIO;
1169
1170 crypt_dec_pending(io);
1171}
1172
1173static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
1174{
1175 struct crypt_config *cc = io->target->private;
1176 int r = 0;
1177
1178 crypt_inc_pending(io);
1179
1180 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
1181 io->sector);
1182
1183 r = crypt_convert(cc, &io->ctx);
1184
1185 if (atomic_dec_and_test(&io->ctx.pending))
1186 kcryptd_crypt_read_done(io, r);
1187
1188 crypt_dec_pending(io);
1189}
1190
1191static void kcryptd_async_done(struct crypto_async_request *async_req,
1192 int error)
1193{
1194 struct dm_crypt_request *dmreq = async_req->data;
1195 struct convert_context *ctx = dmreq->ctx;
1196 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
1197 struct crypt_config *cc = io->target->private;
1198
1199 if (error == -EINPROGRESS) {
1200 complete(&ctx->restart);
1201 return;
1202 }
1203
1204 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
1205 error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
1206
1207 mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
1208
1209 if (!atomic_dec_and_test(&ctx->pending))
1210 return;
1211
1212 if (bio_data_dir(io->base_bio) == READ)
1213 kcryptd_crypt_read_done(io, error);
1214 else
1215 kcryptd_crypt_write_io_submit(io, error, 1);
1216}
1217
1218static void kcryptd_crypt(struct work_struct *work)
1219{
1220 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1221
1222 if (bio_data_dir(io->base_bio) == READ)
1223 kcryptd_crypt_read_convert(io);
1224 else
1225 kcryptd_crypt_write_convert(io);
1226}
1227
1228static void kcryptd_queue_crypt(struct dm_crypt_io *io)
1229{
1230 struct crypt_config *cc = io->target->private;
1231
1232 INIT_WORK(&io->work, kcryptd_crypt);
1233 queue_work(cc->crypt_queue, &io->work);
1234}
1235
1236
1237
1238
1239static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
1240{
1241 char buffer[3];
1242 char *endp;
1243 unsigned int i;
1244
1245 buffer[2] = '\0';
1246
1247 for (i = 0; i < size; i++) {
1248 buffer[0] = *hex++;
1249 buffer[1] = *hex++;
1250
1251 key[i] = (u8)simple_strtoul(buffer, &endp, 16);
1252
1253 if (endp != &buffer[2])
1254 return -EINVAL;
1255 }
1256
1257 if (*hex != '\0')
1258 return -EINVAL;
1259
1260 return 0;
1261}
1262
1263
1264
1265
1266static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
1267{
1268 unsigned int i;
1269
1270 for (i = 0; i < size; i++) {
1271 sprintf(hex, "%02x", *key);
1272 hex += 2;
1273 key++;
1274 }
1275}
1276
1277static void crypt_free_tfms(struct crypt_config *cc, int cpu)
1278{
1279 struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu);
1280 unsigned i;
1281
1282 for (i = 0; i < cc->tfms_count; i++)
1283 if (cpu_cc->tfms[i] && !IS_ERR(cpu_cc->tfms[i])) {
1284 crypto_free_ablkcipher(cpu_cc->tfms[i]);
1285 cpu_cc->tfms[i] = NULL;
1286 }
1287}
1288
1289static int crypt_alloc_tfms(struct crypt_config *cc, int cpu, char *ciphermode)
1290{
1291 struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu);
1292 unsigned i;
1293 int err;
1294
1295 for (i = 0; i < cc->tfms_count; i++) {
1296 cpu_cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
1297 if (IS_ERR(cpu_cc->tfms[i])) {
1298 err = PTR_ERR(cpu_cc->tfms[i]);
1299 crypt_free_tfms(cc, cpu);
1300 return err;
1301 }
1302 }
1303
1304 return 0;
1305}
1306
1307static int crypt_setkey_allcpus(struct crypt_config *cc)
1308{
1309 unsigned subkey_size = cc->key_size >> ilog2(cc->tfms_count);
1310 int cpu, err = 0, i, r;
1311
1312 for_each_possible_cpu(cpu) {
1313 for (i = 0; i < cc->tfms_count; i++) {
1314 r = crypto_ablkcipher_setkey(per_cpu_ptr(cc->cpu, cpu)->tfms[i],
1315 cc->key + (i * subkey_size), subkey_size);
1316 if (r)
1317 err = r;
1318 }
1319 }
1320
1321 return err;
1322}
1323
1324static int crypt_set_key(struct crypt_config *cc, char *key)
1325{
1326 int r = -EINVAL;
1327 int key_string_len = strlen(key);
1328
1329
1330 if (cc->key_size != (key_string_len >> 1))
1331 goto out;
1332
1333
1334 if (!cc->key_size && strcmp(key, "-"))
1335 goto out;
1336
1337 if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
1338 goto out;
1339
1340 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1341
1342 r = crypt_setkey_allcpus(cc);
1343
1344out:
1345
1346 memset(key, '0', key_string_len);
1347
1348 return r;
1349}
1350
1351static int crypt_wipe_key(struct crypt_config *cc)
1352{
1353 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1354 memset(&cc->key, 0, cc->key_size * sizeof(u8));
1355
1356 return crypt_setkey_allcpus(cc);
1357}
1358
1359static void crypt_dtr(struct dm_target *ti)
1360{
1361 struct crypt_config *cc = ti->private;
1362 struct crypt_cpu *cpu_cc;
1363 int cpu;
1364
1365 ti->private = NULL;
1366
1367 if (!cc)
1368 return;
1369
1370 if (cc->io_queue)
1371 destroy_workqueue(cc->io_queue);
1372 if (cc->crypt_queue)
1373 destroy_workqueue(cc->crypt_queue);
1374
1375 if (cc->cpu)
1376 for_each_possible_cpu(cpu) {
1377 cpu_cc = per_cpu_ptr(cc->cpu, cpu);
1378 if (cpu_cc->req)
1379 mempool_free(cpu_cc->req, cc->req_pool);
1380 crypt_free_tfms(cc, cpu);
1381 }
1382
1383 if (cc->bs)
1384 bioset_free(cc->bs);
1385
1386 if (cc->page_pool)
1387 mempool_destroy(cc->page_pool);
1388 if (cc->req_pool)
1389 mempool_destroy(cc->req_pool);
1390 if (cc->io_pool)
1391 mempool_destroy(cc->io_pool);
1392
1393 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
1394 cc->iv_gen_ops->dtr(cc);
1395
1396 if (cc->dev)
1397 dm_put_device(ti, cc->dev);
1398
1399 if (cc->cpu)
1400 free_percpu(cc->cpu);
1401
1402 kzfree(cc->cipher);
1403 kzfree(cc->cipher_string);
1404
1405
1406 kzfree(cc);
1407}
1408
1409static int crypt_ctr_cipher(struct dm_target *ti,
1410 char *cipher_in, char *key)
1411{
1412 struct crypt_config *cc = ti->private;
1413 char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount;
1414 char *cipher_api = NULL;
1415 int cpu, ret = -EINVAL;
1416
1417
1418 if (strchr(cipher_in, '(')) {
1419 ti->error = "Bad cipher specification";
1420 return -EINVAL;
1421 }
1422
1423 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
1424 if (!cc->cipher_string)
1425 goto bad_mem;
1426
1427
1428
1429
1430
1431 tmp = cipher_in;
1432 keycount = strsep(&tmp, "-");
1433 cipher = strsep(&keycount, ":");
1434
1435 if (!keycount)
1436 cc->tfms_count = 1;
1437 else if (sscanf(keycount, "%u", &cc->tfms_count) != 1 ||
1438 !is_power_of_2(cc->tfms_count)) {
1439 ti->error = "Bad cipher key count specification";
1440 return -EINVAL;
1441 }
1442 cc->key_parts = cc->tfms_count;
1443
1444 cc->cipher = kstrdup(cipher, GFP_KERNEL);
1445 if (!cc->cipher)
1446 goto bad_mem;
1447
1448 chainmode = strsep(&tmp, "-");
1449 ivopts = strsep(&tmp, "-");
1450 ivmode = strsep(&ivopts, ":");
1451
1452 if (tmp)
1453 DMWARN("Ignoring unexpected additional cipher options");
1454
1455 cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)) +
1456 cc->tfms_count * sizeof(*(cc->cpu->tfms)),
1457 __alignof__(struct crypt_cpu));
1458 if (!cc->cpu) {
1459 ti->error = "Cannot allocate per cpu state";
1460 goto bad_mem;
1461 }
1462
1463
1464
1465
1466
1467 if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) {
1468 chainmode = "cbc";
1469 ivmode = "plain";
1470 }
1471
1472 if (strcmp(chainmode, "ecb") && !ivmode) {
1473 ti->error = "IV mechanism required";
1474 return -EINVAL;
1475 }
1476
1477 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
1478 if (!cipher_api)
1479 goto bad_mem;
1480
1481 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
1482 "%s(%s)", chainmode, cipher);
1483 if (ret < 0) {
1484 kfree(cipher_api);
1485 goto bad_mem;
1486 }
1487
1488
1489 for_each_possible_cpu(cpu) {
1490 ret = crypt_alloc_tfms(cc, cpu, cipher_api);
1491 if (ret < 0) {
1492 ti->error = "Error allocating crypto tfm";
1493 goto bad;
1494 }
1495 }
1496
1497
1498 ret = crypt_set_key(cc, key);
1499 if (ret < 0) {
1500 ti->error = "Error decoding and setting key";
1501 goto bad;
1502 }
1503
1504
1505 cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
1506 if (cc->iv_size)
1507
1508 cc->iv_size = max(cc->iv_size,
1509 (unsigned int)(sizeof(u64) / sizeof(u8)));
1510 else if (ivmode) {
1511 DMWARN("Selected cipher does not support IVs");
1512 ivmode = NULL;
1513 }
1514
1515
1516 if (ivmode == NULL)
1517 cc->iv_gen_ops = NULL;
1518 else if (strcmp(ivmode, "plain") == 0)
1519 cc->iv_gen_ops = &crypt_iv_plain_ops;
1520 else if (strcmp(ivmode, "plain64") == 0)
1521 cc->iv_gen_ops = &crypt_iv_plain64_ops;
1522 else if (strcmp(ivmode, "essiv") == 0)
1523 cc->iv_gen_ops = &crypt_iv_essiv_ops;
1524 else if (strcmp(ivmode, "benbi") == 0)
1525 cc->iv_gen_ops = &crypt_iv_benbi_ops;
1526 else if (strcmp(ivmode, "null") == 0)
1527 cc->iv_gen_ops = &crypt_iv_null_ops;
1528 else if (strcmp(ivmode, "lmk") == 0) {
1529 cc->iv_gen_ops = &crypt_iv_lmk_ops;
1530
1531
1532
1533
1534 if (cc->key_size % cc->key_parts)
1535 cc->key_parts++;
1536 } else {
1537 ret = -EINVAL;
1538 ti->error = "Invalid IV mode";
1539 goto bad;
1540 }
1541
1542
1543 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
1544 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
1545 if (ret < 0) {
1546 ti->error = "Error creating IV";
1547 goto bad;
1548 }
1549 }
1550
1551
1552 if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
1553 ret = cc->iv_gen_ops->init(cc);
1554 if (ret < 0) {
1555 ti->error = "Error initialising IV";
1556 goto bad;
1557 }
1558 }
1559
1560 ret = 0;
1561bad:
1562 kfree(cipher_api);
1563 return ret;
1564
1565bad_mem:
1566 ti->error = "Cannot allocate cipher strings";
1567 return -ENOMEM;
1568}
1569
1570
1571
1572
1573
1574static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1575{
1576 struct crypt_config *cc;
1577 unsigned int key_size, opt_params;
1578 unsigned long long tmpll;
1579 int ret;
1580 struct dm_arg_set as;
1581 const char *opt_string;
1582
1583 static struct dm_arg _args[] = {
1584 {0, 1, "Invalid number of feature args"},
1585 };
1586
1587 if (argc < 5) {
1588 ti->error = "Not enough arguments";
1589 return -EINVAL;
1590 }
1591
1592 key_size = strlen(argv[1]) >> 1;
1593
1594 cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
1595 if (!cc) {
1596 ti->error = "Cannot allocate encryption context";
1597 return -ENOMEM;
1598 }
1599 cc->key_size = key_size;
1600
1601 ti->private = cc;
1602 ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
1603 if (ret < 0)
1604 goto bad;
1605
1606 ret = -ENOMEM;
1607 cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
1608 if (!cc->io_pool) {
1609 ti->error = "Cannot allocate crypt io mempool";
1610 goto bad;
1611 }
1612
1613 cc->dmreq_start = sizeof(struct ablkcipher_request);
1614 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
1615 cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment());
1616 cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) &
1617 ~(crypto_tfm_ctx_alignment() - 1);
1618
1619 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
1620 sizeof(struct dm_crypt_request) + cc->iv_size);
1621 if (!cc->req_pool) {
1622 ti->error = "Cannot allocate crypt request mempool";
1623 goto bad;
1624 }
1625
1626 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
1627 if (!cc->page_pool) {
1628 ti->error = "Cannot allocate page mempool";
1629 goto bad;
1630 }
1631
1632 cc->bs = bioset_create(MIN_IOS, 0);
1633 if (!cc->bs) {
1634 ti->error = "Cannot allocate crypt bioset";
1635 goto bad;
1636 }
1637
1638 ret = -EINVAL;
1639 if (sscanf(argv[2], "%llu", &tmpll) != 1) {
1640 ti->error = "Invalid iv_offset sector";
1641 goto bad;
1642 }
1643 cc->iv_offset = tmpll;
1644
1645 if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) {
1646 ti->error = "Device lookup failed";
1647 goto bad;
1648 }
1649
1650 if (sscanf(argv[4], "%llu", &tmpll) != 1) {
1651 ti->error = "Invalid device sector";
1652 goto bad;
1653 }
1654 cc->start = tmpll;
1655
1656 argv += 5;
1657 argc -= 5;
1658
1659
1660 if (argc) {
1661 as.argc = argc;
1662 as.argv = argv;
1663
1664 ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
1665 if (ret)
1666 goto bad;
1667
1668 opt_string = dm_shift_arg(&as);
1669
1670 if (opt_params == 1 && opt_string &&
1671 !strcasecmp(opt_string, "allow_discards"))
1672 ti->num_discard_requests = 1;
1673 else if (opt_params) {
1674 ret = -EINVAL;
1675 ti->error = "Invalid feature arguments";
1676 goto bad;
1677 }
1678 }
1679
1680 ret = -ENOMEM;
1681 cc->io_queue = alloc_workqueue("kcryptd_io",
1682 WQ_NON_REENTRANT|
1683 WQ_MEM_RECLAIM,
1684 1);
1685 if (!cc->io_queue) {
1686 ti->error = "Couldn't create kcryptd io queue";
1687 goto bad;
1688 }
1689
1690 cc->crypt_queue = alloc_workqueue("kcryptd",
1691 WQ_NON_REENTRANT|
1692 WQ_CPU_INTENSIVE|
1693 WQ_MEM_RECLAIM,
1694 1);
1695 if (!cc->crypt_queue) {
1696 ti->error = "Couldn't create kcryptd queue";
1697 goto bad;
1698 }
1699
1700 ti->num_flush_requests = 1;
1701 ti->discard_zeroes_data_unsupported = 1;
1702
1703 return 0;
1704
1705bad:
1706 crypt_dtr(ti);
1707 return ret;
1708}
1709
1710static int crypt_map(struct dm_target *ti, struct bio *bio,
1711 union map_info *map_context)
1712{
1713 struct dm_crypt_io *io;
1714 struct crypt_config *cc;
1715
1716
1717
1718
1719
1720
1721 if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
1722 cc = ti->private;
1723 bio->bi_bdev = cc->dev->bdev;
1724 if (bio_sectors(bio))
1725 bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector);
1726 return DM_MAPIO_REMAPPED;
1727 }
1728
1729 io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector));
1730
1731 if (bio_data_dir(io->base_bio) == READ) {
1732 if (kcryptd_io_read(io, GFP_NOWAIT))
1733 kcryptd_queue_io(io);
1734 } else
1735 kcryptd_queue_crypt(io);
1736
1737 return DM_MAPIO_SUBMITTED;
1738}
1739
1740static int crypt_status(struct dm_target *ti, status_type_t type,
1741 char *result, unsigned int maxlen)
1742{
1743 struct crypt_config *cc = ti->private;
1744 unsigned int sz = 0;
1745
1746 switch (type) {
1747 case STATUSTYPE_INFO:
1748 result[0] = '\0';
1749 break;
1750
1751 case STATUSTYPE_TABLE:
1752 DMEMIT("%s ", cc->cipher_string);
1753
1754 if (cc->key_size > 0) {
1755 if ((maxlen - sz) < ((cc->key_size << 1) + 1))
1756 return -ENOMEM;
1757
1758 crypt_encode_key(result + sz, cc->key, cc->key_size);
1759 sz += cc->key_size << 1;
1760 } else {
1761 if (sz >= maxlen)
1762 return -ENOMEM;
1763 result[sz++] = '-';
1764 }
1765
1766 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
1767 cc->dev->name, (unsigned long long)cc->start);
1768
1769 if (ti->num_discard_requests)
1770 DMEMIT(" 1 allow_discards");
1771
1772 break;
1773 }
1774 return 0;
1775}
1776
1777static void crypt_postsuspend(struct dm_target *ti)
1778{
1779 struct crypt_config *cc = ti->private;
1780
1781 set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1782}
1783
1784static int crypt_preresume(struct dm_target *ti)
1785{
1786 struct crypt_config *cc = ti->private;
1787
1788 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
1789 DMERR("aborting resume - crypt key is not set.");
1790 return -EAGAIN;
1791 }
1792
1793 return 0;
1794}
1795
1796static void crypt_resume(struct dm_target *ti)
1797{
1798 struct crypt_config *cc = ti->private;
1799
1800 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1801}
1802
1803
1804
1805
1806
1807static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
1808{
1809 struct crypt_config *cc = ti->private;
1810 int ret = -EINVAL;
1811
1812 if (argc < 2)
1813 goto error;
1814
1815 if (!strcasecmp(argv[0], "key")) {
1816 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
1817 DMWARN("not suspended during key manipulation.");
1818 return -EINVAL;
1819 }
1820 if (argc == 3 && !strcasecmp(argv[1], "set")) {
1821 ret = crypt_set_key(cc, argv[2]);
1822 if (ret)
1823 return ret;
1824 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
1825 ret = cc->iv_gen_ops->init(cc);
1826 return ret;
1827 }
1828 if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
1829 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
1830 ret = cc->iv_gen_ops->wipe(cc);
1831 if (ret)
1832 return ret;
1833 }
1834 return crypt_wipe_key(cc);
1835 }
1836 }
1837
1838error:
1839 DMWARN("unrecognised message received.");
1840 return -EINVAL;
1841}
1842
1843static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
1844 struct bio_vec *biovec, int max_size)
1845{
1846 struct crypt_config *cc = ti->private;
1847 struct request_queue *q = bdev_get_queue(cc->dev->bdev);
1848
1849 if (!q->merge_bvec_fn)
1850 return max_size;
1851
1852 bvm->bi_bdev = cc->dev->bdev;
1853 bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector);
1854
1855 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
1856}
1857
1858static int crypt_iterate_devices(struct dm_target *ti,
1859 iterate_devices_callout_fn fn, void *data)
1860{
1861 struct crypt_config *cc = ti->private;
1862
1863 return fn(ti, cc->dev, cc->start, ti->len, data);
1864}
1865
1866static struct target_type crypt_target = {
1867 .name = "crypt",
1868 .version = {1, 11, 0},
1869 .module = THIS_MODULE,
1870 .ctr = crypt_ctr,
1871 .dtr = crypt_dtr,
1872 .map = crypt_map,
1873 .status = crypt_status,
1874 .postsuspend = crypt_postsuspend,
1875 .preresume = crypt_preresume,
1876 .resume = crypt_resume,
1877 .message = crypt_message,
1878 .merge = crypt_merge,
1879 .iterate_devices = crypt_iterate_devices,
1880};
1881
1882static int __init dm_crypt_init(void)
1883{
1884 int r;
1885
1886 _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
1887 if (!_crypt_io_pool)
1888 return -ENOMEM;
1889
1890 r = dm_register_target(&crypt_target);
1891 if (r < 0) {
1892 DMERR("register failed %d", r);
1893 kmem_cache_destroy(_crypt_io_pool);
1894 }
1895
1896 return r;
1897}
1898
1899static void __exit dm_crypt_exit(void)
1900{
1901 dm_unregister_target(&crypt_target);
1902 kmem_cache_destroy(_crypt_io_pool);
1903}
1904
1905module_init(dm_crypt_init);
1906module_exit(dm_crypt_exit);
1907
1908MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
1909MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
1910MODULE_LICENSE("GPL");
1911