1
2
3
4
5
6
7
8
9#include <linux/completion.h>
10#include <linux/err.h>
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/bio.h>
15#include <linux/blkdev.h>
16#include <linux/mempool.h>
17#include <linux/slab.h>
18#include <linux/crypto.h>
19#include <linux/workqueue.h>
20#include <linux/backing-dev.h>
21#include <linux/percpu.h>
22#include <linux/atomic.h>
23#include <linux/scatterlist.h>
24#include <asm/page.h>
25#include <asm/unaligned.h>
26#include <crypto/hash.h>
27#include <crypto/md5.h>
28#include <crypto/algapi.h>
29
30#include <linux/device-mapper.h>
31
32#define DM_MSG_PREFIX "crypt"
33
34
35
36
37struct convert_context {
38 struct completion restart;
39 struct bio *bio_in;
40 struct bio *bio_out;
41 unsigned int offset_in;
42 unsigned int offset_out;
43 unsigned int idx_in;
44 unsigned int idx_out;
45 sector_t cc_sector;
46 atomic_t cc_pending;
47};
48
49
50
51
52struct dm_crypt_io {
53 struct crypt_config *cc;
54 struct bio *base_bio;
55 struct work_struct work;
56
57 struct convert_context ctx;
58
59 atomic_t io_pending;
60 int error;
61 sector_t sector;
62 struct dm_crypt_io *base_io;
63};
64
65struct dm_crypt_request {
66 struct convert_context *ctx;
67 struct scatterlist sg_in;
68 struct scatterlist sg_out;
69 sector_t iv_sector;
70};
71
72struct crypt_config;
73
74struct crypt_iv_operations {
75 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
76 const char *opts);
77 void (*dtr)(struct crypt_config *cc);
78 int (*init)(struct crypt_config *cc);
79 int (*wipe)(struct crypt_config *cc);
80 int (*generator)(struct crypt_config *cc, u8 *iv,
81 struct dm_crypt_request *dmreq);
82 int (*post)(struct crypt_config *cc, u8 *iv,
83 struct dm_crypt_request *dmreq);
84};
85
86struct iv_essiv_private {
87 struct crypto_hash *hash_tfm;
88 u8 *salt;
89};
90
91struct iv_benbi_private {
92 int shift;
93};
94
95#define LMK_SEED_SIZE 64
96struct iv_lmk_private {
97 struct crypto_shash *hash_tfm;
98 u8 *seed;
99};
100
101
102
103
104
105enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
106
107
108
109
110struct crypt_cpu {
111 struct ablkcipher_request *req;
112};
113
114
115
116
117
118struct crypt_config {
119 struct dm_dev *dev;
120 sector_t start;
121
122
123
124
125
126 mempool_t *io_pool;
127 mempool_t *req_pool;
128 mempool_t *page_pool;
129 struct bio_set *bs;
130
131 struct workqueue_struct *io_queue;
132 struct workqueue_struct *crypt_queue;
133
134 char *cipher;
135 char *cipher_string;
136
137 struct crypt_iv_operations *iv_gen_ops;
138 union {
139 struct iv_essiv_private essiv;
140 struct iv_benbi_private benbi;
141 struct iv_lmk_private lmk;
142 } iv_gen_private;
143 sector_t iv_offset;
144 unsigned int iv_size;
145
146
147
148
149
150 struct crypt_cpu __percpu *cpu;
151
152
153 void *iv_private;
154 struct crypto_ablkcipher **tfms;
155 unsigned tfms_count;
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170 unsigned int dmreq_start;
171
172 unsigned long flags;
173 unsigned int key_size;
174 unsigned int key_parts;
175 u8 key[0];
176};
177
178#define MIN_IOS 16
179#define MIN_POOL_PAGES 32
180
181static struct kmem_cache *_crypt_io_pool;
182
183static void clone_init(struct dm_crypt_io *, struct bio *);
184static void kcryptd_queue_crypt(struct dm_crypt_io *io);
185static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
186
187static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
188{
189 return this_cpu_ptr(cc->cpu);
190}
191
192
193
194
195static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
196{
197 return cc->tfms[0];
198}
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
238 struct dm_crypt_request *dmreq)
239{
240 memset(iv, 0, cc->iv_size);
241 *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
242
243 return 0;
244}
245
246static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
247 struct dm_crypt_request *dmreq)
248{
249 memset(iv, 0, cc->iv_size);
250 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
251
252 return 0;
253}
254
255
256static int crypt_iv_essiv_init(struct crypt_config *cc)
257{
258 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
259 struct hash_desc desc;
260 struct scatterlist sg;
261 struct crypto_cipher *essiv_tfm;
262 int err;
263
264 sg_init_one(&sg, cc->key, cc->key_size);
265 desc.tfm = essiv->hash_tfm;
266 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
267
268 err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt);
269 if (err)
270 return err;
271
272 essiv_tfm = cc->iv_private;
273
274 err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
275 crypto_hash_digestsize(essiv->hash_tfm));
276 if (err)
277 return err;
278
279 return 0;
280}
281
282
283static int crypt_iv_essiv_wipe(struct crypt_config *cc)
284{
285 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
286 unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
287 struct crypto_cipher *essiv_tfm;
288 int r, err = 0;
289
290 memset(essiv->salt, 0, salt_size);
291
292 essiv_tfm = cc->iv_private;
293 r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
294 if (r)
295 err = r;
296
297 return err;
298}
299
300
301static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
302 struct dm_target *ti,
303 u8 *salt, unsigned saltsize)
304{
305 struct crypto_cipher *essiv_tfm;
306 int err;
307
308
309 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
310 if (IS_ERR(essiv_tfm)) {
311 ti->error = "Error allocating crypto tfm for ESSIV";
312 return essiv_tfm;
313 }
314
315 if (crypto_cipher_blocksize(essiv_tfm) !=
316 crypto_ablkcipher_ivsize(any_tfm(cc))) {
317 ti->error = "Block size of ESSIV cipher does "
318 "not match IV size of block cipher";
319 crypto_free_cipher(essiv_tfm);
320 return ERR_PTR(-EINVAL);
321 }
322
323 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
324 if (err) {
325 ti->error = "Failed to set key for ESSIV cipher";
326 crypto_free_cipher(essiv_tfm);
327 return ERR_PTR(err);
328 }
329
330 return essiv_tfm;
331}
332
333static void crypt_iv_essiv_dtr(struct crypt_config *cc)
334{
335 struct crypto_cipher *essiv_tfm;
336 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
337
338 crypto_free_hash(essiv->hash_tfm);
339 essiv->hash_tfm = NULL;
340
341 kzfree(essiv->salt);
342 essiv->salt = NULL;
343
344 essiv_tfm = cc->iv_private;
345
346 if (essiv_tfm)
347 crypto_free_cipher(essiv_tfm);
348
349 cc->iv_private = NULL;
350}
351
352static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
353 const char *opts)
354{
355 struct crypto_cipher *essiv_tfm = NULL;
356 struct crypto_hash *hash_tfm = NULL;
357 u8 *salt = NULL;
358 int err;
359
360 if (!opts) {
361 ti->error = "Digest algorithm missing for ESSIV mode";
362 return -EINVAL;
363 }
364
365
366 hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
367 if (IS_ERR(hash_tfm)) {
368 ti->error = "Error initializing ESSIV hash";
369 err = PTR_ERR(hash_tfm);
370 goto bad;
371 }
372
373 salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL);
374 if (!salt) {
375 ti->error = "Error kmallocing salt storage in ESSIV";
376 err = -ENOMEM;
377 goto bad;
378 }
379
380 cc->iv_gen_private.essiv.salt = salt;
381 cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
382
383 essiv_tfm = setup_essiv_cpu(cc, ti, salt,
384 crypto_hash_digestsize(hash_tfm));
385 if (IS_ERR(essiv_tfm)) {
386 crypt_iv_essiv_dtr(cc);
387 return PTR_ERR(essiv_tfm);
388 }
389 cc->iv_private = essiv_tfm;
390
391 return 0;
392
393bad:
394 if (hash_tfm && !IS_ERR(hash_tfm))
395 crypto_free_hash(hash_tfm);
396 kfree(salt);
397 return err;
398}
399
400static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
401 struct dm_crypt_request *dmreq)
402{
403 struct crypto_cipher *essiv_tfm = cc->iv_private;
404
405 memset(iv, 0, cc->iv_size);
406 *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
407 crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
408
409 return 0;
410}
411
412static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
413 const char *opts)
414{
415 unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc));
416 int log = ilog2(bs);
417
418
419
420
421 if (1 << log != bs) {
422 ti->error = "cypher blocksize is not a power of 2";
423 return -EINVAL;
424 }
425
426 if (log > 9) {
427 ti->error = "cypher blocksize is > 512";
428 return -EINVAL;
429 }
430
431 cc->iv_gen_private.benbi.shift = 9 - log;
432
433 return 0;
434}
435
436static void crypt_iv_benbi_dtr(struct crypt_config *cc)
437{
438}
439
440static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
441 struct dm_crypt_request *dmreq)
442{
443 __be64 val;
444
445 memset(iv, 0, cc->iv_size - sizeof(u64));
446
447 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
448 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
449
450 return 0;
451}
452
453static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
454 struct dm_crypt_request *dmreq)
455{
456 memset(iv, 0, cc->iv_size);
457
458 return 0;
459}
460
461static void crypt_iv_lmk_dtr(struct crypt_config *cc)
462{
463 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
464
465 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
466 crypto_free_shash(lmk->hash_tfm);
467 lmk->hash_tfm = NULL;
468
469 kzfree(lmk->seed);
470 lmk->seed = NULL;
471}
472
473static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
474 const char *opts)
475{
476 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
477
478 lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
479 if (IS_ERR(lmk->hash_tfm)) {
480 ti->error = "Error initializing LMK hash";
481 return PTR_ERR(lmk->hash_tfm);
482 }
483
484
485 if (cc->key_parts == cc->tfms_count) {
486 lmk->seed = NULL;
487 return 0;
488 }
489
490 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
491 if (!lmk->seed) {
492 crypt_iv_lmk_dtr(cc);
493 ti->error = "Error kmallocing seed storage in LMK";
494 return -ENOMEM;
495 }
496
497 return 0;
498}
499
500static int crypt_iv_lmk_init(struct crypt_config *cc)
501{
502 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
503 int subkey_size = cc->key_size / cc->key_parts;
504
505
506 if (lmk->seed)
507 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
508 crypto_shash_digestsize(lmk->hash_tfm));
509
510 return 0;
511}
512
513static int crypt_iv_lmk_wipe(struct crypt_config *cc)
514{
515 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
516
517 if (lmk->seed)
518 memset(lmk->seed, 0, LMK_SEED_SIZE);
519
520 return 0;
521}
522
523static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
524 struct dm_crypt_request *dmreq,
525 u8 *data)
526{
527 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
528 struct {
529 struct shash_desc desc;
530 char ctx[crypto_shash_descsize(lmk->hash_tfm)];
531 } sdesc;
532 struct md5_state md5state;
533 u32 buf[4];
534 int i, r;
535
536 sdesc.desc.tfm = lmk->hash_tfm;
537 sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
538
539 r = crypto_shash_init(&sdesc.desc);
540 if (r)
541 return r;
542
543 if (lmk->seed) {
544 r = crypto_shash_update(&sdesc.desc, lmk->seed, LMK_SEED_SIZE);
545 if (r)
546 return r;
547 }
548
549
550 r = crypto_shash_update(&sdesc.desc, data + 16, 16 * 31);
551 if (r)
552 return r;
553
554
555 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
556 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
557 buf[2] = cpu_to_le32(4024);
558 buf[3] = 0;
559 r = crypto_shash_update(&sdesc.desc, (u8 *)buf, sizeof(buf));
560 if (r)
561 return r;
562
563
564 r = crypto_shash_export(&sdesc.desc, &md5state);
565 if (r)
566 return r;
567
568 for (i = 0; i < MD5_HASH_WORDS; i++)
569 __cpu_to_le32s(&md5state.hash[i]);
570 memcpy(iv, &md5state.hash, cc->iv_size);
571
572 return 0;
573}
574
575static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
576 struct dm_crypt_request *dmreq)
577{
578 u8 *src;
579 int r = 0;
580
581 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
582 src = kmap_atomic(sg_page(&dmreq->sg_in));
583 r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset);
584 kunmap_atomic(src);
585 } else
586 memset(iv, 0, cc->iv_size);
587
588 return r;
589}
590
591static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
592 struct dm_crypt_request *dmreq)
593{
594 u8 *dst;
595 int r;
596
597 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
598 return 0;
599
600 dst = kmap_atomic(sg_page(&dmreq->sg_out));
601 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset);
602
603
604 if (!r)
605 crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size);
606
607 kunmap_atomic(dst);
608 return r;
609}
610
611static struct crypt_iv_operations crypt_iv_plain_ops = {
612 .generator = crypt_iv_plain_gen
613};
614
615static struct crypt_iv_operations crypt_iv_plain64_ops = {
616 .generator = crypt_iv_plain64_gen
617};
618
619static struct crypt_iv_operations crypt_iv_essiv_ops = {
620 .ctr = crypt_iv_essiv_ctr,
621 .dtr = crypt_iv_essiv_dtr,
622 .init = crypt_iv_essiv_init,
623 .wipe = crypt_iv_essiv_wipe,
624 .generator = crypt_iv_essiv_gen
625};
626
627static struct crypt_iv_operations crypt_iv_benbi_ops = {
628 .ctr = crypt_iv_benbi_ctr,
629 .dtr = crypt_iv_benbi_dtr,
630 .generator = crypt_iv_benbi_gen
631};
632
633static struct crypt_iv_operations crypt_iv_null_ops = {
634 .generator = crypt_iv_null_gen
635};
636
637static struct crypt_iv_operations crypt_iv_lmk_ops = {
638 .ctr = crypt_iv_lmk_ctr,
639 .dtr = crypt_iv_lmk_dtr,
640 .init = crypt_iv_lmk_init,
641 .wipe = crypt_iv_lmk_wipe,
642 .generator = crypt_iv_lmk_gen,
643 .post = crypt_iv_lmk_post
644};
645
646static void crypt_convert_init(struct crypt_config *cc,
647 struct convert_context *ctx,
648 struct bio *bio_out, struct bio *bio_in,
649 sector_t sector)
650{
651 ctx->bio_in = bio_in;
652 ctx->bio_out = bio_out;
653 ctx->offset_in = 0;
654 ctx->offset_out = 0;
655 ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
656 ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
657 ctx->cc_sector = sector + cc->iv_offset;
658 init_completion(&ctx->restart);
659}
660
661static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
662 struct ablkcipher_request *req)
663{
664 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
665}
666
667static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
668 struct dm_crypt_request *dmreq)
669{
670 return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
671}
672
673static u8 *iv_of_dmreq(struct crypt_config *cc,
674 struct dm_crypt_request *dmreq)
675{
676 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
677 crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
678}
679
680static int crypt_convert_block(struct crypt_config *cc,
681 struct convert_context *ctx,
682 struct ablkcipher_request *req)
683{
684 struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
685 struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
686 struct dm_crypt_request *dmreq;
687 u8 *iv;
688 int r;
689
690 dmreq = dmreq_of_req(cc, req);
691 iv = iv_of_dmreq(cc, dmreq);
692
693 dmreq->iv_sector = ctx->cc_sector;
694 dmreq->ctx = ctx;
695 sg_init_table(&dmreq->sg_in, 1);
696 sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
697 bv_in->bv_offset + ctx->offset_in);
698
699 sg_init_table(&dmreq->sg_out, 1);
700 sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT,
701 bv_out->bv_offset + ctx->offset_out);
702
703 ctx->offset_in += 1 << SECTOR_SHIFT;
704 if (ctx->offset_in >= bv_in->bv_len) {
705 ctx->offset_in = 0;
706 ctx->idx_in++;
707 }
708
709 ctx->offset_out += 1 << SECTOR_SHIFT;
710 if (ctx->offset_out >= bv_out->bv_len) {
711 ctx->offset_out = 0;
712 ctx->idx_out++;
713 }
714
715 if (cc->iv_gen_ops) {
716 r = cc->iv_gen_ops->generator(cc, iv, dmreq);
717 if (r < 0)
718 return r;
719 }
720
721 ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
722 1 << SECTOR_SHIFT, iv);
723
724 if (bio_data_dir(ctx->bio_in) == WRITE)
725 r = crypto_ablkcipher_encrypt(req);
726 else
727 r = crypto_ablkcipher_decrypt(req);
728
729 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
730 r = cc->iv_gen_ops->post(cc, iv, dmreq);
731
732 return r;
733}
734
735static void kcryptd_async_done(struct crypto_async_request *async_req,
736 int error);
737
738static void crypt_alloc_req(struct crypt_config *cc,
739 struct convert_context *ctx)
740{
741 struct crypt_cpu *this_cc = this_crypt_config(cc);
742 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
743
744 if (!this_cc->req)
745 this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
746
747 ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]);
748 ablkcipher_request_set_callback(this_cc->req,
749 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
750 kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
751}
752
753
754
755
756static int crypt_convert(struct crypt_config *cc,
757 struct convert_context *ctx)
758{
759 struct crypt_cpu *this_cc = this_crypt_config(cc);
760 int r;
761
762 atomic_set(&ctx->cc_pending, 1);
763
764 while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
765 ctx->idx_out < ctx->bio_out->bi_vcnt) {
766
767 crypt_alloc_req(cc, ctx);
768
769 atomic_inc(&ctx->cc_pending);
770
771 r = crypt_convert_block(cc, ctx, this_cc->req);
772
773 switch (r) {
774
775 case -EBUSY:
776 wait_for_completion(&ctx->restart);
777 INIT_COMPLETION(ctx->restart);
778
779 case -EINPROGRESS:
780 this_cc->req = NULL;
781 ctx->cc_sector++;
782 continue;
783
784
785 case 0:
786 atomic_dec(&ctx->cc_pending);
787 ctx->cc_sector++;
788 cond_resched();
789 continue;
790
791
792 default:
793 atomic_dec(&ctx->cc_pending);
794 return r;
795 }
796 }
797
798 return 0;
799}
800
801
802
803
804
805
806
807static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
808 unsigned *out_of_pages)
809{
810 struct crypt_config *cc = io->cc;
811 struct bio *clone;
812 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
813 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
814 unsigned i, len;
815 struct page *page;
816
817 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
818 if (!clone)
819 return NULL;
820
821 clone_init(io, clone);
822 *out_of_pages = 0;
823
824 for (i = 0; i < nr_iovecs; i++) {
825 page = mempool_alloc(cc->page_pool, gfp_mask);
826 if (!page) {
827 *out_of_pages = 1;
828 break;
829 }
830
831
832
833
834
835
836 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
837
838 len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
839
840 if (!bio_add_page(clone, page, len, 0)) {
841 mempool_free(page, cc->page_pool);
842 break;
843 }
844
845 size -= len;
846 }
847
848 if (!clone->bi_size) {
849 bio_put(clone);
850 return NULL;
851 }
852
853 return clone;
854}
855
856static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
857{
858 unsigned int i;
859 struct bio_vec *bv;
860
861 bio_for_each_segment_all(bv, clone, i) {
862 BUG_ON(!bv->bv_page);
863 mempool_free(bv->bv_page, cc->page_pool);
864 bv->bv_page = NULL;
865 }
866}
867
868static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc,
869 struct bio *bio, sector_t sector)
870{
871 struct dm_crypt_io *io;
872
873 io = mempool_alloc(cc->io_pool, GFP_NOIO);
874 io->cc = cc;
875 io->base_bio = bio;
876 io->sector = sector;
877 io->error = 0;
878 io->base_io = NULL;
879 atomic_set(&io->io_pending, 0);
880
881 return io;
882}
883
884static void crypt_inc_pending(struct dm_crypt_io *io)
885{
886 atomic_inc(&io->io_pending);
887}
888
889
890
891
892
893
894static void crypt_dec_pending(struct dm_crypt_io *io)
895{
896 struct crypt_config *cc = io->cc;
897 struct bio *base_bio = io->base_bio;
898 struct dm_crypt_io *base_io = io->base_io;
899 int error = io->error;
900
901 if (!atomic_dec_and_test(&io->io_pending))
902 return;
903
904 mempool_free(io, cc->io_pool);
905
906 if (likely(!base_io))
907 bio_endio(base_bio, error);
908 else {
909 if (error && !base_io->error)
910 base_io->error = error;
911 crypt_dec_pending(base_io);
912 }
913}
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932static void crypt_endio(struct bio *clone, int error)
933{
934 struct dm_crypt_io *io = clone->bi_private;
935 struct crypt_config *cc = io->cc;
936 unsigned rw = bio_data_dir(clone);
937
938 if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
939 error = -EIO;
940
941
942
943
944 if (rw == WRITE)
945 crypt_free_buffer_pages(cc, clone);
946
947 bio_put(clone);
948
949 if (rw == READ && !error) {
950 kcryptd_queue_crypt(io);
951 return;
952 }
953
954 if (unlikely(error))
955 io->error = error;
956
957 crypt_dec_pending(io);
958}
959
960static void clone_init(struct dm_crypt_io *io, struct bio *clone)
961{
962 struct crypt_config *cc = io->cc;
963
964 clone->bi_private = io;
965 clone->bi_end_io = crypt_endio;
966 clone->bi_bdev = cc->dev->bdev;
967 clone->bi_rw = io->base_bio->bi_rw;
968}
969
970static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
971{
972 struct crypt_config *cc = io->cc;
973 struct bio *base_bio = io->base_bio;
974 struct bio *clone;
975
976
977
978
979
980
981 clone = bio_clone_bioset(base_bio, gfp, cc->bs);
982 if (!clone)
983 return 1;
984
985 crypt_inc_pending(io);
986
987 clone_init(io, clone);
988 clone->bi_sector = cc->start + io->sector;
989
990 generic_make_request(clone);
991 return 0;
992}
993
994static void kcryptd_io_write(struct dm_crypt_io *io)
995{
996 struct bio *clone = io->ctx.bio_out;
997 generic_make_request(clone);
998}
999
1000static void kcryptd_io(struct work_struct *work)
1001{
1002 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1003
1004 if (bio_data_dir(io->base_bio) == READ) {
1005 crypt_inc_pending(io);
1006 if (kcryptd_io_read(io, GFP_NOIO))
1007 io->error = -ENOMEM;
1008 crypt_dec_pending(io);
1009 } else
1010 kcryptd_io_write(io);
1011}
1012
1013static void kcryptd_queue_io(struct dm_crypt_io *io)
1014{
1015 struct crypt_config *cc = io->cc;
1016
1017 INIT_WORK(&io->work, kcryptd_io);
1018 queue_work(cc->io_queue, &io->work);
1019}
1020
1021static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
1022{
1023 struct bio *clone = io->ctx.bio_out;
1024 struct crypt_config *cc = io->cc;
1025
1026 if (unlikely(io->error < 0)) {
1027 crypt_free_buffer_pages(cc, clone);
1028 bio_put(clone);
1029 crypt_dec_pending(io);
1030 return;
1031 }
1032
1033
1034 BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
1035
1036 clone->bi_sector = cc->start + io->sector;
1037
1038 if (async)
1039 kcryptd_queue_io(io);
1040 else
1041 generic_make_request(clone);
1042}
1043
1044static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1045{
1046 struct crypt_config *cc = io->cc;
1047 struct bio *clone;
1048 struct dm_crypt_io *new_io;
1049 int crypt_finished;
1050 unsigned out_of_pages = 0;
1051 unsigned remaining = io->base_bio->bi_size;
1052 sector_t sector = io->sector;
1053 int r;
1054
1055
1056
1057
1058 crypt_inc_pending(io);
1059 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
1060
1061
1062
1063
1064
1065 while (remaining) {
1066 clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
1067 if (unlikely(!clone)) {
1068 io->error = -ENOMEM;
1069 break;
1070 }
1071
1072 io->ctx.bio_out = clone;
1073 io->ctx.idx_out = 0;
1074
1075 remaining -= clone->bi_size;
1076 sector += bio_sectors(clone);
1077
1078 crypt_inc_pending(io);
1079
1080 r = crypt_convert(cc, &io->ctx);
1081 if (r < 0)
1082 io->error = -EIO;
1083
1084 crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
1085
1086
1087 if (crypt_finished) {
1088 kcryptd_crypt_write_io_submit(io, 0);
1089
1090
1091
1092
1093
1094 if (unlikely(r < 0))
1095 break;
1096
1097 io->sector = sector;
1098 }
1099
1100
1101
1102
1103
1104 if (unlikely(out_of_pages))
1105 congestion_wait(BLK_RW_ASYNC, HZ/100);
1106
1107
1108
1109
1110
1111 if (unlikely(!crypt_finished && remaining)) {
1112 new_io = crypt_io_alloc(io->cc, io->base_bio,
1113 sector);
1114 crypt_inc_pending(new_io);
1115 crypt_convert_init(cc, &new_io->ctx, NULL,
1116 io->base_bio, sector);
1117 new_io->ctx.idx_in = io->ctx.idx_in;
1118 new_io->ctx.offset_in = io->ctx.offset_in;
1119
1120
1121
1122
1123
1124 if (!io->base_io)
1125 new_io->base_io = io;
1126 else {
1127 new_io->base_io = io->base_io;
1128 crypt_inc_pending(io->base_io);
1129 crypt_dec_pending(io);
1130 }
1131
1132 io = new_io;
1133 }
1134 }
1135
1136 crypt_dec_pending(io);
1137}
1138
1139static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
1140{
1141 crypt_dec_pending(io);
1142}
1143
1144static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
1145{
1146 struct crypt_config *cc = io->cc;
1147 int r = 0;
1148
1149 crypt_inc_pending(io);
1150
1151 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
1152 io->sector);
1153
1154 r = crypt_convert(cc, &io->ctx);
1155 if (r < 0)
1156 io->error = -EIO;
1157
1158 if (atomic_dec_and_test(&io->ctx.cc_pending))
1159 kcryptd_crypt_read_done(io);
1160
1161 crypt_dec_pending(io);
1162}
1163
1164static void kcryptd_async_done(struct crypto_async_request *async_req,
1165 int error)
1166{
1167 struct dm_crypt_request *dmreq = async_req->data;
1168 struct convert_context *ctx = dmreq->ctx;
1169 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
1170 struct crypt_config *cc = io->cc;
1171
1172 if (error == -EINPROGRESS) {
1173 complete(&ctx->restart);
1174 return;
1175 }
1176
1177 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
1178 error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
1179
1180 if (error < 0)
1181 io->error = -EIO;
1182
1183 mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
1184
1185 if (!atomic_dec_and_test(&ctx->cc_pending))
1186 return;
1187
1188 if (bio_data_dir(io->base_bio) == READ)
1189 kcryptd_crypt_read_done(io);
1190 else
1191 kcryptd_crypt_write_io_submit(io, 1);
1192}
1193
1194static void kcryptd_crypt(struct work_struct *work)
1195{
1196 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1197
1198 if (bio_data_dir(io->base_bio) == READ)
1199 kcryptd_crypt_read_convert(io);
1200 else
1201 kcryptd_crypt_write_convert(io);
1202}
1203
1204static void kcryptd_queue_crypt(struct dm_crypt_io *io)
1205{
1206 struct crypt_config *cc = io->cc;
1207
1208 INIT_WORK(&io->work, kcryptd_crypt);
1209 queue_work(cc->crypt_queue, &io->work);
1210}
1211
1212
1213
1214
1215static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
1216{
1217 char buffer[3];
1218 unsigned int i;
1219
1220 buffer[2] = '\0';
1221
1222 for (i = 0; i < size; i++) {
1223 buffer[0] = *hex++;
1224 buffer[1] = *hex++;
1225
1226 if (kstrtou8(buffer, 16, &key[i]))
1227 return -EINVAL;
1228 }
1229
1230 if (*hex != '\0')
1231 return -EINVAL;
1232
1233 return 0;
1234}
1235
1236static void crypt_free_tfms(struct crypt_config *cc)
1237{
1238 unsigned i;
1239
1240 if (!cc->tfms)
1241 return;
1242
1243 for (i = 0; i < cc->tfms_count; i++)
1244 if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) {
1245 crypto_free_ablkcipher(cc->tfms[i]);
1246 cc->tfms[i] = NULL;
1247 }
1248
1249 kfree(cc->tfms);
1250 cc->tfms = NULL;
1251}
1252
1253static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
1254{
1255 unsigned i;
1256 int err;
1257
1258 cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *),
1259 GFP_KERNEL);
1260 if (!cc->tfms)
1261 return -ENOMEM;
1262
1263 for (i = 0; i < cc->tfms_count; i++) {
1264 cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
1265 if (IS_ERR(cc->tfms[i])) {
1266 err = PTR_ERR(cc->tfms[i]);
1267 crypt_free_tfms(cc);
1268 return err;
1269 }
1270 }
1271
1272 return 0;
1273}
1274
1275static int crypt_setkey_allcpus(struct crypt_config *cc)
1276{
1277 unsigned subkey_size = cc->key_size >> ilog2(cc->tfms_count);
1278 int err = 0, i, r;
1279
1280 for (i = 0; i < cc->tfms_count; i++) {
1281 r = crypto_ablkcipher_setkey(cc->tfms[i],
1282 cc->key + (i * subkey_size),
1283 subkey_size);
1284 if (r)
1285 err = r;
1286 }
1287
1288 return err;
1289}
1290
1291static int crypt_set_key(struct crypt_config *cc, char *key)
1292{
1293 int r = -EINVAL;
1294 int key_string_len = strlen(key);
1295
1296
1297 if (cc->key_size != (key_string_len >> 1))
1298 goto out;
1299
1300
1301 if (!cc->key_size && strcmp(key, "-"))
1302 goto out;
1303
1304 if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
1305 goto out;
1306
1307 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1308
1309 r = crypt_setkey_allcpus(cc);
1310
1311out:
1312
1313 memset(key, '0', key_string_len);
1314
1315 return r;
1316}
1317
1318static int crypt_wipe_key(struct crypt_config *cc)
1319{
1320 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1321 memset(&cc->key, 0, cc->key_size * sizeof(u8));
1322
1323 return crypt_setkey_allcpus(cc);
1324}
1325
1326static void crypt_dtr(struct dm_target *ti)
1327{
1328 struct crypt_config *cc = ti->private;
1329 struct crypt_cpu *cpu_cc;
1330 int cpu;
1331
1332 ti->private = NULL;
1333
1334 if (!cc)
1335 return;
1336
1337 if (cc->io_queue)
1338 destroy_workqueue(cc->io_queue);
1339 if (cc->crypt_queue)
1340 destroy_workqueue(cc->crypt_queue);
1341
1342 if (cc->cpu)
1343 for_each_possible_cpu(cpu) {
1344 cpu_cc = per_cpu_ptr(cc->cpu, cpu);
1345 if (cpu_cc->req)
1346 mempool_free(cpu_cc->req, cc->req_pool);
1347 }
1348
1349 crypt_free_tfms(cc);
1350
1351 if (cc->bs)
1352 bioset_free(cc->bs);
1353
1354 if (cc->page_pool)
1355 mempool_destroy(cc->page_pool);
1356 if (cc->req_pool)
1357 mempool_destroy(cc->req_pool);
1358 if (cc->io_pool)
1359 mempool_destroy(cc->io_pool);
1360
1361 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
1362 cc->iv_gen_ops->dtr(cc);
1363
1364 if (cc->dev)
1365 dm_put_device(ti, cc->dev);
1366
1367 if (cc->cpu)
1368 free_percpu(cc->cpu);
1369
1370 kzfree(cc->cipher);
1371 kzfree(cc->cipher_string);
1372
1373
1374 kzfree(cc);
1375}
1376
1377static int crypt_ctr_cipher(struct dm_target *ti,
1378 char *cipher_in, char *key)
1379{
1380 struct crypt_config *cc = ti->private;
1381 char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount;
1382 char *cipher_api = NULL;
1383 int ret = -EINVAL;
1384 char dummy;
1385
1386
1387 if (strchr(cipher_in, '(')) {
1388 ti->error = "Bad cipher specification";
1389 return -EINVAL;
1390 }
1391
1392 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
1393 if (!cc->cipher_string)
1394 goto bad_mem;
1395
1396
1397
1398
1399
1400 tmp = cipher_in;
1401 keycount = strsep(&tmp, "-");
1402 cipher = strsep(&keycount, ":");
1403
1404 if (!keycount)
1405 cc->tfms_count = 1;
1406 else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 ||
1407 !is_power_of_2(cc->tfms_count)) {
1408 ti->error = "Bad cipher key count specification";
1409 return -EINVAL;
1410 }
1411 cc->key_parts = cc->tfms_count;
1412
1413 cc->cipher = kstrdup(cipher, GFP_KERNEL);
1414 if (!cc->cipher)
1415 goto bad_mem;
1416
1417 chainmode = strsep(&tmp, "-");
1418 ivopts = strsep(&tmp, "-");
1419 ivmode = strsep(&ivopts, ":");
1420
1421 if (tmp)
1422 DMWARN("Ignoring unexpected additional cipher options");
1423
1424 cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)),
1425 __alignof__(struct crypt_cpu));
1426 if (!cc->cpu) {
1427 ti->error = "Cannot allocate per cpu state";
1428 goto bad_mem;
1429 }
1430
1431
1432
1433
1434
1435 if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) {
1436 chainmode = "cbc";
1437 ivmode = "plain";
1438 }
1439
1440 if (strcmp(chainmode, "ecb") && !ivmode) {
1441 ti->error = "IV mechanism required";
1442 return -EINVAL;
1443 }
1444
1445 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
1446 if (!cipher_api)
1447 goto bad_mem;
1448
1449 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
1450 "%s(%s)", chainmode, cipher);
1451 if (ret < 0) {
1452 kfree(cipher_api);
1453 goto bad_mem;
1454 }
1455
1456
1457 ret = crypt_alloc_tfms(cc, cipher_api);
1458 if (ret < 0) {
1459 ti->error = "Error allocating crypto tfm";
1460 goto bad;
1461 }
1462
1463
1464 ret = crypt_set_key(cc, key);
1465 if (ret < 0) {
1466 ti->error = "Error decoding and setting key";
1467 goto bad;
1468 }
1469
1470
1471 cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
1472 if (cc->iv_size)
1473
1474 cc->iv_size = max(cc->iv_size,
1475 (unsigned int)(sizeof(u64) / sizeof(u8)));
1476 else if (ivmode) {
1477 DMWARN("Selected cipher does not support IVs");
1478 ivmode = NULL;
1479 }
1480
1481
1482 if (ivmode == NULL)
1483 cc->iv_gen_ops = NULL;
1484 else if (strcmp(ivmode, "plain") == 0)
1485 cc->iv_gen_ops = &crypt_iv_plain_ops;
1486 else if (strcmp(ivmode, "plain64") == 0)
1487 cc->iv_gen_ops = &crypt_iv_plain64_ops;
1488 else if (strcmp(ivmode, "essiv") == 0)
1489 cc->iv_gen_ops = &crypt_iv_essiv_ops;
1490 else if (strcmp(ivmode, "benbi") == 0)
1491 cc->iv_gen_ops = &crypt_iv_benbi_ops;
1492 else if (strcmp(ivmode, "null") == 0)
1493 cc->iv_gen_ops = &crypt_iv_null_ops;
1494 else if (strcmp(ivmode, "lmk") == 0) {
1495 cc->iv_gen_ops = &crypt_iv_lmk_ops;
1496
1497
1498
1499
1500 if (cc->key_size % cc->key_parts)
1501 cc->key_parts++;
1502 } else {
1503 ret = -EINVAL;
1504 ti->error = "Invalid IV mode";
1505 goto bad;
1506 }
1507
1508
1509 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
1510 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
1511 if (ret < 0) {
1512 ti->error = "Error creating IV";
1513 goto bad;
1514 }
1515 }
1516
1517
1518 if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
1519 ret = cc->iv_gen_ops->init(cc);
1520 if (ret < 0) {
1521 ti->error = "Error initialising IV";
1522 goto bad;
1523 }
1524 }
1525
1526 ret = 0;
1527bad:
1528 kfree(cipher_api);
1529 return ret;
1530
1531bad_mem:
1532 ti->error = "Cannot allocate cipher strings";
1533 return -ENOMEM;
1534}
1535
1536
1537
1538
1539
1540static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1541{
1542 struct crypt_config *cc;
1543 unsigned int key_size, opt_params;
1544 unsigned long long tmpll;
1545 int ret;
1546 struct dm_arg_set as;
1547 const char *opt_string;
1548 char dummy;
1549
1550 static struct dm_arg _args[] = {
1551 {0, 1, "Invalid number of feature args"},
1552 };
1553
1554 if (argc < 5) {
1555 ti->error = "Not enough arguments";
1556 return -EINVAL;
1557 }
1558
1559 key_size = strlen(argv[1]) >> 1;
1560
1561 cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
1562 if (!cc) {
1563 ti->error = "Cannot allocate encryption context";
1564 return -ENOMEM;
1565 }
1566 cc->key_size = key_size;
1567
1568 ti->private = cc;
1569 ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
1570 if (ret < 0)
1571 goto bad;
1572
1573 ret = -ENOMEM;
1574 cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
1575 if (!cc->io_pool) {
1576 ti->error = "Cannot allocate crypt io mempool";
1577 goto bad;
1578 }
1579
1580 cc->dmreq_start = sizeof(struct ablkcipher_request);
1581 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
1582 cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment());
1583 cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) &
1584 ~(crypto_tfm_ctx_alignment() - 1);
1585
1586 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
1587 sizeof(struct dm_crypt_request) + cc->iv_size);
1588 if (!cc->req_pool) {
1589 ti->error = "Cannot allocate crypt request mempool";
1590 goto bad;
1591 }
1592
1593 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
1594 if (!cc->page_pool) {
1595 ti->error = "Cannot allocate page mempool";
1596 goto bad;
1597 }
1598
1599 cc->bs = bioset_create(MIN_IOS, 0);
1600 if (!cc->bs) {
1601 ti->error = "Cannot allocate crypt bioset";
1602 goto bad;
1603 }
1604
1605 ret = -EINVAL;
1606 if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) {
1607 ti->error = "Invalid iv_offset sector";
1608 goto bad;
1609 }
1610 cc->iv_offset = tmpll;
1611
1612 if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) {
1613 ti->error = "Device lookup failed";
1614 goto bad;
1615 }
1616
1617 if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
1618 ti->error = "Invalid device sector";
1619 goto bad;
1620 }
1621 cc->start = tmpll;
1622
1623 argv += 5;
1624 argc -= 5;
1625
1626
1627 if (argc) {
1628 as.argc = argc;
1629 as.argv = argv;
1630
1631 ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
1632 if (ret)
1633 goto bad;
1634
1635 opt_string = dm_shift_arg(&as);
1636
1637 if (opt_params == 1 && opt_string &&
1638 !strcasecmp(opt_string, "allow_discards"))
1639 ti->num_discard_bios = 1;
1640 else if (opt_params) {
1641 ret = -EINVAL;
1642 ti->error = "Invalid feature arguments";
1643 goto bad;
1644 }
1645 }
1646
1647 ret = -ENOMEM;
1648 cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1);
1649 if (!cc->io_queue) {
1650 ti->error = "Couldn't create kcryptd io queue";
1651 goto bad;
1652 }
1653
1654 cc->crypt_queue = alloc_workqueue("kcryptd",
1655 WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
1656 if (!cc->crypt_queue) {
1657 ti->error = "Couldn't create kcryptd queue";
1658 goto bad;
1659 }
1660
1661 ti->num_flush_bios = 1;
1662 ti->discard_zeroes_data_unsupported = true;
1663
1664 return 0;
1665
1666bad:
1667 crypt_dtr(ti);
1668 return ret;
1669}
1670
1671static int crypt_map(struct dm_target *ti, struct bio *bio)
1672{
1673 struct dm_crypt_io *io;
1674 struct crypt_config *cc = ti->private;
1675
1676
1677
1678
1679
1680
1681 if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
1682 bio->bi_bdev = cc->dev->bdev;
1683 if (bio_sectors(bio))
1684 bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector);
1685 return DM_MAPIO_REMAPPED;
1686 }
1687
1688 io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_sector));
1689
1690 if (bio_data_dir(io->base_bio) == READ) {
1691 if (kcryptd_io_read(io, GFP_NOWAIT))
1692 kcryptd_queue_io(io);
1693 } else
1694 kcryptd_queue_crypt(io);
1695
1696 return DM_MAPIO_SUBMITTED;
1697}
1698
1699static void crypt_status(struct dm_target *ti, status_type_t type,
1700 unsigned status_flags, char *result, unsigned maxlen)
1701{
1702 struct crypt_config *cc = ti->private;
1703 unsigned i, sz = 0;
1704
1705 switch (type) {
1706 case STATUSTYPE_INFO:
1707 result[0] = '\0';
1708 break;
1709
1710 case STATUSTYPE_TABLE:
1711 DMEMIT("%s ", cc->cipher_string);
1712
1713 if (cc->key_size > 0)
1714 for (i = 0; i < cc->key_size; i++)
1715 DMEMIT("%02x", cc->key[i]);
1716 else
1717 DMEMIT("-");
1718
1719 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
1720 cc->dev->name, (unsigned long long)cc->start);
1721
1722 if (ti->num_discard_bios)
1723 DMEMIT(" 1 allow_discards");
1724
1725 break;
1726 }
1727}
1728
1729static void crypt_postsuspend(struct dm_target *ti)
1730{
1731 struct crypt_config *cc = ti->private;
1732
1733 set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1734}
1735
1736static int crypt_preresume(struct dm_target *ti)
1737{
1738 struct crypt_config *cc = ti->private;
1739
1740 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
1741 DMERR("aborting resume - crypt key is not set.");
1742 return -EAGAIN;
1743 }
1744
1745 return 0;
1746}
1747
1748static void crypt_resume(struct dm_target *ti)
1749{
1750 struct crypt_config *cc = ti->private;
1751
1752 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1753}
1754
1755
1756
1757
1758
1759static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
1760{
1761 struct crypt_config *cc = ti->private;
1762 int ret = -EINVAL;
1763
1764 if (argc < 2)
1765 goto error;
1766
1767 if (!strcasecmp(argv[0], "key")) {
1768 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
1769 DMWARN("not suspended during key manipulation.");
1770 return -EINVAL;
1771 }
1772 if (argc == 3 && !strcasecmp(argv[1], "set")) {
1773 ret = crypt_set_key(cc, argv[2]);
1774 if (ret)
1775 return ret;
1776 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
1777 ret = cc->iv_gen_ops->init(cc);
1778 return ret;
1779 }
1780 if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
1781 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
1782 ret = cc->iv_gen_ops->wipe(cc);
1783 if (ret)
1784 return ret;
1785 }
1786 return crypt_wipe_key(cc);
1787 }
1788 }
1789
1790error:
1791 DMWARN("unrecognised message received.");
1792 return -EINVAL;
1793}
1794
1795static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
1796 struct bio_vec *biovec, int max_size)
1797{
1798 struct crypt_config *cc = ti->private;
1799 struct request_queue *q = bdev_get_queue(cc->dev->bdev);
1800
1801 if (!q->merge_bvec_fn)
1802 return max_size;
1803
1804 bvm->bi_bdev = cc->dev->bdev;
1805 bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector);
1806
1807 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
1808}
1809
1810static int crypt_iterate_devices(struct dm_target *ti,
1811 iterate_devices_callout_fn fn, void *data)
1812{
1813 struct crypt_config *cc = ti->private;
1814
1815 return fn(ti, cc->dev, cc->start, ti->len, data);
1816}
1817
1818static struct target_type crypt_target = {
1819 .name = "crypt",
1820 .version = {1, 12, 1},
1821 .module = THIS_MODULE,
1822 .ctr = crypt_ctr,
1823 .dtr = crypt_dtr,
1824 .map = crypt_map,
1825 .status = crypt_status,
1826 .postsuspend = crypt_postsuspend,
1827 .preresume = crypt_preresume,
1828 .resume = crypt_resume,
1829 .message = crypt_message,
1830 .merge = crypt_merge,
1831 .iterate_devices = crypt_iterate_devices,
1832};
1833
1834static int __init dm_crypt_init(void)
1835{
1836 int r;
1837
1838 _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
1839 if (!_crypt_io_pool)
1840 return -ENOMEM;
1841
1842 r = dm_register_target(&crypt_target);
1843 if (r < 0) {
1844 DMERR("register failed %d", r);
1845 kmem_cache_destroy(_crypt_io_pool);
1846 }
1847
1848 return r;
1849}
1850
1851static void __exit dm_crypt_exit(void)
1852{
1853 dm_unregister_target(&crypt_target);
1854 kmem_cache_destroy(_crypt_io_pool);
1855}
1856
1857module_init(dm_crypt_init);
1858module_exit(dm_crypt_exit);
1859
1860MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
1861MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
1862MODULE_LICENSE("GPL");
1863