1
2
3
4
5
6
7
8
9#include <linux/completion.h>
10#include <linux/err.h>
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/bio.h>
15#include <linux/blkdev.h>
16#include <linux/mempool.h>
17#include <linux/slab.h>
18#include <linux/crypto.h>
19#include <linux/workqueue.h>
20#include <linux/backing-dev.h>
21#include <linux/percpu.h>
22#include <asm/atomic.h>
23#include <linux/scatterlist.h>
24#include <asm/page.h>
25#include <asm/unaligned.h>
26#include <crypto/hash.h>
27#include <crypto/md5.h>
28#include <crypto/algapi.h>
29
30#include <linux/device-mapper.h>
31
32#define DM_MSG_PREFIX "crypt"
33#define MESG_STR(x) x, sizeof(x)
34
35
36
37
38struct convert_context {
39 struct completion restart;
40 struct bio *bio_in;
41 struct bio *bio_out;
42 unsigned int offset_in;
43 unsigned int offset_out;
44 unsigned int idx_in;
45 unsigned int idx_out;
46 sector_t sector;
47 atomic_t pending;
48};
49
50
51
52
53struct dm_crypt_io {
54 struct dm_target *target;
55 struct bio *base_bio;
56 struct work_struct work;
57
58 struct convert_context ctx;
59
60 atomic_t pending;
61 int error;
62 sector_t sector;
63 struct dm_crypt_io *base_io;
64};
65
66struct dm_crypt_request {
67 struct convert_context *ctx;
68 struct scatterlist sg_in;
69 struct scatterlist sg_out;
70 sector_t iv_sector;
71};
72
73struct crypt_config;
74
75struct crypt_iv_operations {
76 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
77 const char *opts);
78 void (*dtr)(struct crypt_config *cc);
79 int (*init)(struct crypt_config *cc);
80 int (*wipe)(struct crypt_config *cc);
81 int (*generator)(struct crypt_config *cc, u8 *iv,
82 struct dm_crypt_request *dmreq);
83 int (*post)(struct crypt_config *cc, u8 *iv,
84 struct dm_crypt_request *dmreq);
85};
86
87struct iv_essiv_private {
88 struct crypto_hash *hash_tfm;
89 u8 *salt;
90};
91
92struct iv_benbi_private {
93 int shift;
94};
95
96#define LMK_SEED_SIZE 64
97struct iv_lmk_private {
98 struct crypto_shash *hash_tfm;
99 u8 *seed;
100};
101
102
103
104
105
106enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
107
108
109
110
111struct crypt_cpu {
112 struct ablkcipher_request *req;
113
114 void *iv_private;
115 struct crypto_ablkcipher *tfms[0];
116};
117
118
119
120
121
122struct crypt_config {
123 struct dm_dev *dev;
124 sector_t start;
125
126
127
128
129
130 mempool_t *io_pool;
131 mempool_t *req_pool;
132 mempool_t *page_pool;
133 struct bio_set *bs;
134
135 struct workqueue_struct *io_queue;
136 struct workqueue_struct *crypt_queue;
137
138 char *cipher;
139 char *cipher_string;
140
141 struct crypt_iv_operations *iv_gen_ops;
142 union {
143 struct iv_essiv_private essiv;
144 struct iv_benbi_private benbi;
145 struct iv_lmk_private lmk;
146 } iv_gen_private;
147 sector_t iv_offset;
148 unsigned int iv_size;
149
150
151
152
153
154 struct crypt_cpu __percpu *cpu;
155 unsigned tfms_count;
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170 unsigned int dmreq_start;
171
172 unsigned long flags;
173 unsigned int key_size;
174 unsigned int key_parts;
175 u8 key[0];
176};
177
178#define MIN_IOS 16
179#define MIN_POOL_PAGES 32
180#define MIN_BIO_PAGES 8
181
182static struct kmem_cache *_crypt_io_pool;
183
184static void clone_init(struct dm_crypt_io *, struct bio *);
185static void kcryptd_queue_crypt(struct dm_crypt_io *io);
186static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
187
188static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
189{
190 return this_cpu_ptr(cc->cpu);
191}
192
193
194
195
196static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
197{
198 return __this_cpu_ptr(cc->cpu)->tfms[0];
199}
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
239 struct dm_crypt_request *dmreq)
240{
241 memset(iv, 0, cc->iv_size);
242 *(u32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
243
244 return 0;
245}
246
247static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
248 struct dm_crypt_request *dmreq)
249{
250 memset(iv, 0, cc->iv_size);
251 *(u64 *)iv = cpu_to_le64(dmreq->iv_sector);
252
253 return 0;
254}
255
256
257static int crypt_iv_essiv_init(struct crypt_config *cc)
258{
259 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
260 struct hash_desc desc;
261 struct scatterlist sg;
262 struct crypto_cipher *essiv_tfm;
263 int err, cpu;
264
265 sg_init_one(&sg, cc->key, cc->key_size);
266 desc.tfm = essiv->hash_tfm;
267 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
268
269 err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt);
270 if (err)
271 return err;
272
273 for_each_possible_cpu(cpu) {
274 essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private,
275
276 err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
277 crypto_hash_digestsize(essiv->hash_tfm));
278 if (err)
279 return err;
280 }
281
282 return 0;
283}
284
285
286static int crypt_iv_essiv_wipe(struct crypt_config *cc)
287{
288 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
289 unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
290 struct crypto_cipher *essiv_tfm;
291 int cpu, r, err = 0;
292
293 memset(essiv->salt, 0, salt_size);
294
295 for_each_possible_cpu(cpu) {
296 essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private;
297 r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
298 if (r)
299 err = r;
300 }
301
302 return err;
303}
304
305
306static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
307 struct dm_target *ti,
308 u8 *salt, unsigned saltsize)
309{
310 struct crypto_cipher *essiv_tfm;
311 int err;
312
313
314 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
315 if (IS_ERR(essiv_tfm)) {
316 ti->error = "Error allocating crypto tfm for ESSIV";
317 return essiv_tfm;
318 }
319
320 if (crypto_cipher_blocksize(essiv_tfm) !=
321 crypto_ablkcipher_ivsize(any_tfm(cc))) {
322 ti->error = "Block size of ESSIV cipher does "
323 "not match IV size of block cipher";
324 crypto_free_cipher(essiv_tfm);
325 return ERR_PTR(-EINVAL);
326 }
327
328 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
329 if (err) {
330 ti->error = "Failed to set key for ESSIV cipher";
331 crypto_free_cipher(essiv_tfm);
332 return ERR_PTR(err);
333 }
334
335 return essiv_tfm;
336}
337
338static void crypt_iv_essiv_dtr(struct crypt_config *cc)
339{
340 int cpu;
341 struct crypt_cpu *cpu_cc;
342 struct crypto_cipher *essiv_tfm;
343 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
344
345 crypto_free_hash(essiv->hash_tfm);
346 essiv->hash_tfm = NULL;
347
348 kzfree(essiv->salt);
349 essiv->salt = NULL;
350
351 for_each_possible_cpu(cpu) {
352 cpu_cc = per_cpu_ptr(cc->cpu, cpu);
353 essiv_tfm = cpu_cc->iv_private;
354
355 if (essiv_tfm)
356 crypto_free_cipher(essiv_tfm);
357
358 cpu_cc->iv_private = NULL;
359 }
360}
361
362static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
363 const char *opts)
364{
365 struct crypto_cipher *essiv_tfm = NULL;
366 struct crypto_hash *hash_tfm = NULL;
367 u8 *salt = NULL;
368 int err, cpu;
369
370 if (!opts) {
371 ti->error = "Digest algorithm missing for ESSIV mode";
372 return -EINVAL;
373 }
374
375
376 hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
377 if (IS_ERR(hash_tfm)) {
378 ti->error = "Error initializing ESSIV hash";
379 err = PTR_ERR(hash_tfm);
380 goto bad;
381 }
382
383 salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL);
384 if (!salt) {
385 ti->error = "Error kmallocing salt storage in ESSIV";
386 err = -ENOMEM;
387 goto bad;
388 }
389
390 cc->iv_gen_private.essiv.salt = salt;
391 cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
392
393 for_each_possible_cpu(cpu) {
394 essiv_tfm = setup_essiv_cpu(cc, ti, salt,
395 crypto_hash_digestsize(hash_tfm));
396 if (IS_ERR(essiv_tfm)) {
397 crypt_iv_essiv_dtr(cc);
398 return PTR_ERR(essiv_tfm);
399 }
400 per_cpu_ptr(cc->cpu, cpu)->iv_private = essiv_tfm;
401 }
402
403 return 0;
404
405bad:
406 if (hash_tfm && !IS_ERR(hash_tfm))
407 crypto_free_hash(hash_tfm);
408 kfree(salt);
409 return err;
410}
411
412static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
413 struct dm_crypt_request *dmreq)
414{
415 struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private;
416
417 memset(iv, 0, cc->iv_size);
418 *(u64 *)iv = cpu_to_le64(dmreq->iv_sector);
419 crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
420
421 return 0;
422}
423
424static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
425 const char *opts)
426{
427 unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc));
428 int log = ilog2(bs);
429
430
431
432
433 if (1 << log != bs) {
434 ti->error = "cypher blocksize is not a power of 2";
435 return -EINVAL;
436 }
437
438 if (log > 9) {
439 ti->error = "cypher blocksize is > 512";
440 return -EINVAL;
441 }
442
443 cc->iv_gen_private.benbi.shift = 9 - log;
444
445 return 0;
446}
447
448static void crypt_iv_benbi_dtr(struct crypt_config *cc)
449{
450}
451
452static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
453 struct dm_crypt_request *dmreq)
454{
455 __be64 val;
456
457 memset(iv, 0, cc->iv_size - sizeof(u64));
458
459 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
460 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
461
462 return 0;
463}
464
465static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
466 struct dm_crypt_request *dmreq)
467{
468 memset(iv, 0, cc->iv_size);
469
470 return 0;
471}
472
473static void crypt_iv_lmk_dtr(struct crypt_config *cc)
474{
475 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
476
477 if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
478 crypto_free_shash(lmk->hash_tfm);
479 lmk->hash_tfm = NULL;
480
481 kzfree(lmk->seed);
482 lmk->seed = NULL;
483}
484
485static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
486 const char *opts)
487{
488 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
489
490 lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
491 if (IS_ERR(lmk->hash_tfm)) {
492 ti->error = "Error initializing LMK hash";
493 return PTR_ERR(lmk->hash_tfm);
494 }
495
496
497 if (cc->key_parts == cc->tfms_count) {
498 lmk->seed = NULL;
499 return 0;
500 }
501
502 lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
503 if (!lmk->seed) {
504 crypt_iv_lmk_dtr(cc);
505 ti->error = "Error kmallocing seed storage in LMK";
506 return -ENOMEM;
507 }
508
509 return 0;
510}
511
512static int crypt_iv_lmk_init(struct crypt_config *cc)
513{
514 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
515 int subkey_size = cc->key_size / cc->key_parts;
516
517
518 if (lmk->seed)
519 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
520 crypto_shash_digestsize(lmk->hash_tfm));
521
522 return 0;
523}
524
525static int crypt_iv_lmk_wipe(struct crypt_config *cc)
526{
527 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
528
529 if (lmk->seed)
530 memset(lmk->seed, 0, LMK_SEED_SIZE);
531
532 return 0;
533}
534
535static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
536 struct dm_crypt_request *dmreq,
537 u8 *data)
538{
539 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
540 struct {
541 struct shash_desc desc;
542 char ctx[crypto_shash_descsize(lmk->hash_tfm)];
543 } sdesc;
544 struct md5_state md5state;
545 u32 buf[4];
546 int i, r;
547
548 sdesc.desc.tfm = lmk->hash_tfm;
549 sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
550
551 r = crypto_shash_init(&sdesc.desc);
552 if (r)
553 return r;
554
555 if (lmk->seed) {
556 r = crypto_shash_update(&sdesc.desc, lmk->seed, LMK_SEED_SIZE);
557 if (r)
558 return r;
559 }
560
561
562 r = crypto_shash_update(&sdesc.desc, data + 16, 16 * 31);
563 if (r)
564 return r;
565
566
567 buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
568 buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
569 buf[2] = cpu_to_le32(4024);
570 buf[3] = 0;
571 r = crypto_shash_update(&sdesc.desc, (u8 *)buf, sizeof(buf));
572 if (r)
573 return r;
574
575
576 r = crypto_shash_export(&sdesc.desc, &md5state);
577 if (r)
578 return r;
579
580 for (i = 0; i < MD5_HASH_WORDS; i++)
581 __cpu_to_le32s(&md5state.hash[i]);
582 memcpy(iv, &md5state.hash, cc->iv_size);
583
584 return 0;
585}
586
587static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
588 struct dm_crypt_request *dmreq)
589{
590 u8 *src;
591 int r = 0;
592
593 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
594 src = kmap_atomic(sg_page(&dmreq->sg_in), KM_USER0);
595 r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset);
596 kunmap_atomic(src, KM_USER0);
597 } else
598 memset(iv, 0, cc->iv_size);
599
600 return r;
601}
602
603static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
604 struct dm_crypt_request *dmreq)
605{
606 u8 *dst;
607 int r;
608
609 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
610 return 0;
611
612 dst = kmap_atomic(sg_page(&dmreq->sg_out), KM_USER0);
613 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset);
614
615
616 if (!r)
617 crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size);
618
619 kunmap_atomic(dst, KM_USER0);
620 return r;
621}
622
623static struct crypt_iv_operations crypt_iv_plain_ops = {
624 .generator = crypt_iv_plain_gen
625};
626
627static struct crypt_iv_operations crypt_iv_plain64_ops = {
628 .generator = crypt_iv_plain64_gen
629};
630
631static struct crypt_iv_operations crypt_iv_essiv_ops = {
632 .ctr = crypt_iv_essiv_ctr,
633 .dtr = crypt_iv_essiv_dtr,
634 .init = crypt_iv_essiv_init,
635 .wipe = crypt_iv_essiv_wipe,
636 .generator = crypt_iv_essiv_gen
637};
638
639static struct crypt_iv_operations crypt_iv_benbi_ops = {
640 .ctr = crypt_iv_benbi_ctr,
641 .dtr = crypt_iv_benbi_dtr,
642 .generator = crypt_iv_benbi_gen
643};
644
645static struct crypt_iv_operations crypt_iv_null_ops = {
646 .generator = crypt_iv_null_gen
647};
648
649static struct crypt_iv_operations crypt_iv_lmk_ops = {
650 .ctr = crypt_iv_lmk_ctr,
651 .dtr = crypt_iv_lmk_dtr,
652 .init = crypt_iv_lmk_init,
653 .wipe = crypt_iv_lmk_wipe,
654 .generator = crypt_iv_lmk_gen,
655 .post = crypt_iv_lmk_post
656};
657
658static void crypt_convert_init(struct crypt_config *cc,
659 struct convert_context *ctx,
660 struct bio *bio_out, struct bio *bio_in,
661 sector_t sector)
662{
663 ctx->bio_in = bio_in;
664 ctx->bio_out = bio_out;
665 ctx->offset_in = 0;
666 ctx->offset_out = 0;
667 ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
668 ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
669 ctx->sector = sector + cc->iv_offset;
670 init_completion(&ctx->restart);
671}
672
673static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
674 struct ablkcipher_request *req)
675{
676 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
677}
678
679static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
680 struct dm_crypt_request *dmreq)
681{
682 return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
683}
684
685static u8 *iv_of_dmreq(struct crypt_config *cc,
686 struct dm_crypt_request *dmreq)
687{
688 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
689 crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
690}
691
692static int crypt_convert_block(struct crypt_config *cc,
693 struct convert_context *ctx,
694 struct ablkcipher_request *req)
695{
696 struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
697 struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
698 struct dm_crypt_request *dmreq;
699 u8 *iv;
700 int r = 0;
701
702 dmreq = dmreq_of_req(cc, req);
703 iv = iv_of_dmreq(cc, dmreq);
704
705 dmreq->iv_sector = ctx->sector;
706 dmreq->ctx = ctx;
707 sg_init_table(&dmreq->sg_in, 1);
708 sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
709 bv_in->bv_offset + ctx->offset_in);
710
711 sg_init_table(&dmreq->sg_out, 1);
712 sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT,
713 bv_out->bv_offset + ctx->offset_out);
714
715 ctx->offset_in += 1 << SECTOR_SHIFT;
716 if (ctx->offset_in >= bv_in->bv_len) {
717 ctx->offset_in = 0;
718 ctx->idx_in++;
719 }
720
721 ctx->offset_out += 1 << SECTOR_SHIFT;
722 if (ctx->offset_out >= bv_out->bv_len) {
723 ctx->offset_out = 0;
724 ctx->idx_out++;
725 }
726
727 if (cc->iv_gen_ops) {
728 r = cc->iv_gen_ops->generator(cc, iv, dmreq);
729 if (r < 0)
730 return r;
731 }
732
733 ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
734 1 << SECTOR_SHIFT, iv);
735
736 if (bio_data_dir(ctx->bio_in) == WRITE)
737 r = crypto_ablkcipher_encrypt(req);
738 else
739 r = crypto_ablkcipher_decrypt(req);
740
741 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
742 r = cc->iv_gen_ops->post(cc, iv, dmreq);
743
744 return r;
745}
746
747static void kcryptd_async_done(struct crypto_async_request *async_req,
748 int error);
749
750static void crypt_alloc_req(struct crypt_config *cc,
751 struct convert_context *ctx)
752{
753 struct crypt_cpu *this_cc = this_crypt_config(cc);
754 unsigned key_index = ctx->sector & (cc->tfms_count - 1);
755
756 if (!this_cc->req)
757 this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
758
759 ablkcipher_request_set_tfm(this_cc->req, this_cc->tfms[key_index]);
760 ablkcipher_request_set_callback(this_cc->req,
761 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
762 kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
763}
764
765
766
767
768static int crypt_convert(struct crypt_config *cc,
769 struct convert_context *ctx)
770{
771 struct crypt_cpu *this_cc = this_crypt_config(cc);
772 int r;
773
774 atomic_set(&ctx->pending, 1);
775
776 while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
777 ctx->idx_out < ctx->bio_out->bi_vcnt) {
778
779 crypt_alloc_req(cc, ctx);
780
781 atomic_inc(&ctx->pending);
782
783 r = crypt_convert_block(cc, ctx, this_cc->req);
784
785 switch (r) {
786
787 case -EBUSY:
788 wait_for_completion(&ctx->restart);
789 INIT_COMPLETION(ctx->restart);
790
791 case -EINPROGRESS:
792 this_cc->req = NULL;
793 ctx->sector++;
794 continue;
795
796
797 case 0:
798 atomic_dec(&ctx->pending);
799 ctx->sector++;
800 cond_resched();
801 continue;
802
803
804 default:
805 atomic_dec(&ctx->pending);
806 return r;
807 }
808 }
809
810 return 0;
811}
812
813static void dm_crypt_bio_destructor(struct bio *bio)
814{
815 struct dm_crypt_io *io = bio->bi_private;
816 struct crypt_config *cc = io->target->private;
817
818 bio_free(bio, cc->bs);
819}
820
821
822
823
824
825
826
827static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
828 unsigned *out_of_pages)
829{
830 struct crypt_config *cc = io->target->private;
831 struct bio *clone;
832 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
833 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
834 unsigned i, len;
835 struct page *page;
836
837 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
838 if (!clone)
839 return NULL;
840
841 clone_init(io, clone);
842 *out_of_pages = 0;
843
844 for (i = 0; i < nr_iovecs; i++) {
845 page = mempool_alloc(cc->page_pool, gfp_mask);
846 if (!page) {
847 *out_of_pages = 1;
848 break;
849 }
850
851
852
853
854
855
856 if (i == (MIN_BIO_PAGES - 1))
857 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
858
859 len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
860
861 if (!bio_add_page(clone, page, len, 0)) {
862 mempool_free(page, cc->page_pool);
863 break;
864 }
865
866 size -= len;
867 }
868
869 if (!clone->bi_size) {
870 bio_put(clone);
871 return NULL;
872 }
873
874 return clone;
875}
876
877static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
878{
879 unsigned int i;
880 struct bio_vec *bv;
881
882 for (i = 0; i < clone->bi_vcnt; i++) {
883 bv = bio_iovec_idx(clone, i);
884 BUG_ON(!bv->bv_page);
885 mempool_free(bv->bv_page, cc->page_pool);
886 bv->bv_page = NULL;
887 }
888}
889
890static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti,
891 struct bio *bio, sector_t sector)
892{
893 struct crypt_config *cc = ti->private;
894 struct dm_crypt_io *io;
895
896 io = mempool_alloc(cc->io_pool, GFP_NOIO);
897 io->target = ti;
898 io->base_bio = bio;
899 io->sector = sector;
900 io->error = 0;
901 io->base_io = NULL;
902 atomic_set(&io->pending, 0);
903
904 return io;
905}
906
907static void crypt_inc_pending(struct dm_crypt_io *io)
908{
909 atomic_inc(&io->pending);
910}
911
912
913
914
915
916
917static void crypt_dec_pending(struct dm_crypt_io *io)
918{
919 struct crypt_config *cc = io->target->private;
920 struct bio *base_bio = io->base_bio;
921 struct dm_crypt_io *base_io = io->base_io;
922 int error = io->error;
923
924 if (!atomic_dec_and_test(&io->pending))
925 return;
926
927 mempool_free(io, cc->io_pool);
928
929 if (likely(!base_io))
930 bio_endio(base_bio, error);
931 else {
932 if (error && !base_io->error)
933 base_io->error = error;
934 crypt_dec_pending(base_io);
935 }
936}
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955static void crypt_endio(struct bio *clone, int error)
956{
957 struct dm_crypt_io *io = clone->bi_private;
958 struct crypt_config *cc = io->target->private;
959 unsigned rw = bio_data_dir(clone);
960
961 if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
962 error = -EIO;
963
964
965
966
967 if (rw == WRITE)
968 crypt_free_buffer_pages(cc, clone);
969
970 bio_put(clone);
971
972 if (rw == READ && !error) {
973 kcryptd_queue_crypt(io);
974 return;
975 }
976
977 if (unlikely(error))
978 io->error = error;
979
980 crypt_dec_pending(io);
981}
982
983static void clone_init(struct dm_crypt_io *io, struct bio *clone)
984{
985 struct crypt_config *cc = io->target->private;
986
987 clone->bi_private = io;
988 clone->bi_end_io = crypt_endio;
989 clone->bi_bdev = cc->dev->bdev;
990 clone->bi_rw = io->base_bio->bi_rw;
991 clone->bi_destructor = dm_crypt_bio_destructor;
992}
993
994static void kcryptd_unplug(struct crypt_config *cc)
995{
996 blk_unplug(bdev_get_queue(cc->dev->bdev));
997}
998
999static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
1000{
1001 struct crypt_config *cc = io->target->private;
1002 struct bio *base_bio = io->base_bio;
1003 struct bio *clone;
1004
1005
1006
1007
1008
1009
1010 clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs);
1011 if (!clone) {
1012 kcryptd_unplug(cc);
1013 return 1;
1014 }
1015
1016 crypt_inc_pending(io);
1017
1018 clone_init(io, clone);
1019 clone->bi_idx = 0;
1020 clone->bi_vcnt = bio_segments(base_bio);
1021 clone->bi_size = base_bio->bi_size;
1022 clone->bi_sector = cc->start + io->sector;
1023 memcpy(clone->bi_io_vec, bio_iovec(base_bio),
1024 sizeof(struct bio_vec) * clone->bi_vcnt);
1025
1026 generic_make_request(clone);
1027 return 0;
1028}
1029
1030static void kcryptd_io_write(struct dm_crypt_io *io)
1031{
1032 struct bio *clone = io->ctx.bio_out;
1033 generic_make_request(clone);
1034}
1035
1036static void kcryptd_io(struct work_struct *work)
1037{
1038 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1039
1040 if (bio_data_dir(io->base_bio) == READ) {
1041 crypt_inc_pending(io);
1042 if (kcryptd_io_read(io, GFP_NOIO))
1043 io->error = -ENOMEM;
1044 crypt_dec_pending(io);
1045 } else
1046 kcryptd_io_write(io);
1047}
1048
1049static void kcryptd_queue_io(struct dm_crypt_io *io)
1050{
1051 struct crypt_config *cc = io->target->private;
1052
1053 INIT_WORK(&io->work, kcryptd_io);
1054 queue_work(cc->io_queue, &io->work);
1055}
1056
1057static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
1058 int error, int async)
1059{
1060 struct bio *clone = io->ctx.bio_out;
1061 struct crypt_config *cc = io->target->private;
1062
1063 if (unlikely(error < 0)) {
1064 crypt_free_buffer_pages(cc, clone);
1065 bio_put(clone);
1066 io->error = -EIO;
1067 crypt_dec_pending(io);
1068 return;
1069 }
1070
1071
1072 BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
1073
1074 clone->bi_sector = cc->start + io->sector;
1075
1076 if (async)
1077 kcryptd_queue_io(io);
1078 else
1079 generic_make_request(clone);
1080}
1081
1082static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1083{
1084 struct crypt_config *cc = io->target->private;
1085 struct bio *clone;
1086 struct dm_crypt_io *new_io;
1087 int crypt_finished;
1088 unsigned out_of_pages = 0;
1089 unsigned remaining = io->base_bio->bi_size;
1090 sector_t sector = io->sector;
1091 int r;
1092
1093
1094
1095
1096 crypt_inc_pending(io);
1097 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
1098
1099
1100
1101
1102
1103 while (remaining) {
1104 clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
1105 if (unlikely(!clone)) {
1106 io->error = -ENOMEM;
1107 break;
1108 }
1109
1110 io->ctx.bio_out = clone;
1111 io->ctx.idx_out = 0;
1112
1113 remaining -= clone->bi_size;
1114 sector += bio_sectors(clone);
1115
1116 crypt_inc_pending(io);
1117 r = crypt_convert(cc, &io->ctx);
1118 crypt_finished = atomic_dec_and_test(&io->ctx.pending);
1119
1120
1121 if (crypt_finished) {
1122 kcryptd_crypt_write_io_submit(io, r, 0);
1123
1124
1125
1126
1127
1128 if (unlikely(r < 0))
1129 break;
1130
1131 io->sector = sector;
1132 }
1133
1134
1135
1136
1137
1138 if (unlikely(out_of_pages))
1139 congestion_wait(BLK_RW_ASYNC, HZ/100);
1140
1141
1142
1143
1144
1145 if (unlikely(!crypt_finished && remaining)) {
1146 new_io = crypt_io_alloc(io->target, io->base_bio,
1147 sector);
1148 crypt_inc_pending(new_io);
1149 crypt_convert_init(cc, &new_io->ctx, NULL,
1150 io->base_bio, sector);
1151 new_io->ctx.idx_in = io->ctx.idx_in;
1152 new_io->ctx.offset_in = io->ctx.offset_in;
1153
1154
1155
1156
1157
1158 if (!io->base_io)
1159 new_io->base_io = io;
1160 else {
1161 new_io->base_io = io->base_io;
1162 crypt_inc_pending(io->base_io);
1163 crypt_dec_pending(io);
1164 }
1165
1166 io = new_io;
1167 }
1168 }
1169
1170 crypt_dec_pending(io);
1171}
1172
1173static void kcryptd_crypt_read_done(struct dm_crypt_io *io, int error)
1174{
1175 if (unlikely(error < 0))
1176 io->error = -EIO;
1177
1178 crypt_dec_pending(io);
1179}
1180
1181static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
1182{
1183 struct crypt_config *cc = io->target->private;
1184 int r = 0;
1185
1186 crypt_inc_pending(io);
1187
1188 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
1189 io->sector);
1190
1191 r = crypt_convert(cc, &io->ctx);
1192
1193 if (atomic_dec_and_test(&io->ctx.pending))
1194 kcryptd_crypt_read_done(io, r);
1195
1196 crypt_dec_pending(io);
1197}
1198
1199static void kcryptd_async_done(struct crypto_async_request *async_req,
1200 int error)
1201{
1202 struct dm_crypt_request *dmreq = async_req->data;
1203 struct convert_context *ctx = dmreq->ctx;
1204 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
1205 struct crypt_config *cc = io->target->private;
1206
1207 if (error == -EINPROGRESS) {
1208 complete(&ctx->restart);
1209 return;
1210 }
1211
1212 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
1213 error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
1214
1215 mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
1216
1217 if (!atomic_dec_and_test(&ctx->pending))
1218 return;
1219
1220 if (bio_data_dir(io->base_bio) == READ)
1221 kcryptd_crypt_read_done(io, error);
1222 else
1223 kcryptd_crypt_write_io_submit(io, error, 1);
1224}
1225
1226static void kcryptd_crypt(struct work_struct *work)
1227{
1228 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1229
1230 if (bio_data_dir(io->base_bio) == READ)
1231 kcryptd_crypt_read_convert(io);
1232 else
1233 kcryptd_crypt_write_convert(io);
1234}
1235
1236static void kcryptd_queue_crypt(struct dm_crypt_io *io)
1237{
1238 struct crypt_config *cc = io->target->private;
1239
1240 INIT_WORK(&io->work, kcryptd_crypt);
1241 queue_work(cc->crypt_queue, &io->work);
1242}
1243
1244
1245
1246
1247static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
1248{
1249 char buffer[3];
1250 char *endp;
1251 unsigned int i;
1252
1253 buffer[2] = '\0';
1254
1255 for (i = 0; i < size; i++) {
1256 buffer[0] = *hex++;
1257 buffer[1] = *hex++;
1258
1259 key[i] = (u8)simple_strtoul(buffer, &endp, 16);
1260
1261 if (endp != &buffer[2])
1262 return -EINVAL;
1263 }
1264
1265 if (*hex != '\0')
1266 return -EINVAL;
1267
1268 return 0;
1269}
1270
1271
1272
1273
1274static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
1275{
1276 unsigned int i;
1277
1278 for (i = 0; i < size; i++) {
1279 sprintf(hex, "%02x", *key);
1280 hex += 2;
1281 key++;
1282 }
1283}
1284
1285static void crypt_free_tfms(struct crypt_config *cc, int cpu)
1286{
1287 struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu);
1288 unsigned i;
1289
1290 for (i = 0; i < cc->tfms_count; i++)
1291 if (cpu_cc->tfms[i] && !IS_ERR(cpu_cc->tfms[i])) {
1292 crypto_free_ablkcipher(cpu_cc->tfms[i]);
1293 cpu_cc->tfms[i] = NULL;
1294 }
1295}
1296
1297static int crypt_alloc_tfms(struct crypt_config *cc, int cpu, char *ciphermode)
1298{
1299 struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu);
1300 unsigned i;
1301 int err;
1302
1303 for (i = 0; i < cc->tfms_count; i++) {
1304 cpu_cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
1305 if (IS_ERR(cpu_cc->tfms[i])) {
1306 err = PTR_ERR(cpu_cc->tfms[i]);
1307 crypt_free_tfms(cc, cpu);
1308 return err;
1309 }
1310 }
1311
1312 return 0;
1313}
1314
1315static int crypt_setkey_allcpus(struct crypt_config *cc)
1316{
1317 unsigned subkey_size = cc->key_size >> ilog2(cc->tfms_count);
1318 int cpu, err = 0, i, r;
1319
1320 for_each_possible_cpu(cpu) {
1321 for (i = 0; i < cc->tfms_count; i++) {
1322 r = crypto_ablkcipher_setkey(per_cpu_ptr(cc->cpu, cpu)->tfms[i],
1323 cc->key + (i * subkey_size), subkey_size);
1324 if (r)
1325 err = r;
1326 }
1327 }
1328
1329 return err;
1330}
1331
1332static int crypt_set_key(struct crypt_config *cc, char *key)
1333{
1334
1335 if (cc->key_size != (strlen(key) >> 1))
1336 return -EINVAL;
1337
1338
1339 if (!cc->key_size && strcmp(key, "-"))
1340 return -EINVAL;
1341
1342 if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
1343 return -EINVAL;
1344
1345 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1346
1347 return crypt_setkey_allcpus(cc);
1348}
1349
1350static int crypt_wipe_key(struct crypt_config *cc)
1351{
1352 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1353 memset(&cc->key, 0, cc->key_size * sizeof(u8));
1354
1355 return crypt_setkey_allcpus(cc);
1356}
1357
1358static void crypt_dtr(struct dm_target *ti)
1359{
1360 struct crypt_config *cc = ti->private;
1361 struct crypt_cpu *cpu_cc;
1362 int cpu;
1363
1364 ti->private = NULL;
1365
1366 if (!cc)
1367 return;
1368
1369 if (cc->io_queue)
1370 destroy_workqueue(cc->io_queue);
1371 if (cc->crypt_queue)
1372 destroy_workqueue(cc->crypt_queue);
1373
1374 if (cc->cpu)
1375 for_each_possible_cpu(cpu) {
1376 cpu_cc = per_cpu_ptr(cc->cpu, cpu);
1377 if (cpu_cc->req)
1378 mempool_free(cpu_cc->req, cc->req_pool);
1379 crypt_free_tfms(cc, cpu);
1380 }
1381
1382 if (cc->bs)
1383 bioset_free(cc->bs);
1384
1385 if (cc->page_pool)
1386 mempool_destroy(cc->page_pool);
1387 if (cc->req_pool)
1388 mempool_destroy(cc->req_pool);
1389 if (cc->io_pool)
1390 mempool_destroy(cc->io_pool);
1391
1392 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
1393 cc->iv_gen_ops->dtr(cc);
1394
1395 if (cc->dev)
1396 dm_put_device(ti, cc->dev);
1397
1398 if (cc->cpu)
1399 free_percpu(cc->cpu);
1400
1401 kzfree(cc->cipher);
1402 kzfree(cc->cipher_string);
1403
1404
1405 kzfree(cc);
1406}
1407
1408static int crypt_ctr_cipher(struct dm_target *ti,
1409 char *cipher_in, char *key)
1410{
1411 struct crypt_config *cc = ti->private;
1412 char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount;
1413 char *cipher_api = NULL;
1414 int cpu, ret = -EINVAL;
1415
1416
1417 if (strchr(cipher_in, '(')) {
1418 ti->error = "Bad cipher specification";
1419 return -EINVAL;
1420 }
1421
1422 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
1423 if (!cc->cipher_string)
1424 goto bad_mem;
1425
1426
1427
1428
1429
1430 tmp = cipher_in;
1431 keycount = strsep(&tmp, "-");
1432 cipher = strsep(&keycount, ":");
1433
1434 if (!keycount)
1435 cc->tfms_count = 1;
1436 else if (sscanf(keycount, "%u", &cc->tfms_count) != 1 ||
1437 !is_power_of_2(cc->tfms_count)) {
1438 ti->error = "Bad cipher key count specification";
1439 return -EINVAL;
1440 }
1441 cc->key_parts = cc->tfms_count;
1442
1443 cc->cipher = kstrdup(cipher, GFP_KERNEL);
1444 if (!cc->cipher)
1445 goto bad_mem;
1446
1447 chainmode = strsep(&tmp, "-");
1448 ivopts = strsep(&tmp, "-");
1449 ivmode = strsep(&ivopts, ":");
1450
1451 if (tmp)
1452 DMWARN("Ignoring unexpected additional cipher options");
1453
1454 cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)) +
1455 cc->tfms_count * sizeof(*(cc->cpu->tfms)),
1456 __alignof__(struct crypt_cpu));
1457 if (!cc->cpu) {
1458 ti->error = "Cannot allocate per cpu state";
1459 goto bad_mem;
1460 }
1461
1462
1463
1464
1465
1466 if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) {
1467 chainmode = "cbc";
1468 ivmode = "plain";
1469 }
1470
1471 if (strcmp(chainmode, "ecb") && !ivmode) {
1472 ti->error = "IV mechanism required";
1473 return -EINVAL;
1474 }
1475
1476 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
1477 if (!cipher_api)
1478 goto bad_mem;
1479
1480 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
1481 "%s(%s)", chainmode, cipher);
1482 if (ret < 0) {
1483 kfree(cipher_api);
1484 goto bad_mem;
1485 }
1486
1487
1488 for_each_possible_cpu(cpu) {
1489 ret = crypt_alloc_tfms(cc, cpu, cipher_api);
1490 if (ret < 0) {
1491 ti->error = "Error allocating crypto tfm";
1492 goto bad;
1493 }
1494 }
1495
1496
1497 ret = crypt_set_key(cc, key);
1498 if (ret < 0) {
1499 ti->error = "Error decoding and setting key";
1500 goto bad;
1501 }
1502
1503
1504 cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
1505 if (cc->iv_size)
1506
1507 cc->iv_size = max(cc->iv_size,
1508 (unsigned int)(sizeof(u64) / sizeof(u8)));
1509 else if (ivmode) {
1510 DMWARN("Selected cipher does not support IVs");
1511 ivmode = NULL;
1512 }
1513
1514
1515 if (ivmode == NULL)
1516 cc->iv_gen_ops = NULL;
1517 else if (strcmp(ivmode, "plain") == 0)
1518 cc->iv_gen_ops = &crypt_iv_plain_ops;
1519 else if (strcmp(ivmode, "plain64") == 0)
1520 cc->iv_gen_ops = &crypt_iv_plain64_ops;
1521 else if (strcmp(ivmode, "essiv") == 0)
1522 cc->iv_gen_ops = &crypt_iv_essiv_ops;
1523 else if (strcmp(ivmode, "benbi") == 0)
1524 cc->iv_gen_ops = &crypt_iv_benbi_ops;
1525 else if (strcmp(ivmode, "null") == 0)
1526 cc->iv_gen_ops = &crypt_iv_null_ops;
1527 else if (strcmp(ivmode, "lmk") == 0) {
1528 cc->iv_gen_ops = &crypt_iv_lmk_ops;
1529
1530
1531
1532
1533 if (cc->key_size % cc->key_parts)
1534 cc->key_parts++;
1535 } else {
1536 ret = -EINVAL;
1537 ti->error = "Invalid IV mode";
1538 goto bad;
1539 }
1540
1541
1542 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
1543 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
1544 if (ret < 0) {
1545 ti->error = "Error creating IV";
1546 goto bad;
1547 }
1548 }
1549
1550
1551 if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
1552 ret = cc->iv_gen_ops->init(cc);
1553 if (ret < 0) {
1554 ti->error = "Error initialising IV";
1555 goto bad;
1556 }
1557 }
1558
1559 ret = 0;
1560bad:
1561 kfree(cipher_api);
1562 return ret;
1563
1564bad_mem:
1565 ti->error = "Cannot allocate cipher strings";
1566 return -ENOMEM;
1567}
1568
1569
1570
1571
1572
1573static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1574{
1575 struct crypt_config *cc;
1576 unsigned int key_size;
1577 unsigned long long tmpll;
1578 int ret;
1579
1580 if (argc != 5) {
1581 ti->error = "Not enough arguments";
1582 return -EINVAL;
1583 }
1584
1585 key_size = strlen(argv[1]) >> 1;
1586
1587 cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
1588 if (!cc) {
1589 ti->error = "Cannot allocate encryption context";
1590 return -ENOMEM;
1591 }
1592 cc->key_size = key_size;
1593
1594 ti->private = cc;
1595 ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
1596 if (ret < 0)
1597 goto bad;
1598
1599 ret = -ENOMEM;
1600 cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
1601 if (!cc->io_pool) {
1602 ti->error = "Cannot allocate crypt io mempool";
1603 goto bad;
1604 }
1605
1606 cc->dmreq_start = sizeof(struct ablkcipher_request);
1607 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
1608 cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment());
1609 cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) &
1610 ~(crypto_tfm_ctx_alignment() - 1);
1611
1612 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
1613 sizeof(struct dm_crypt_request) + cc->iv_size);
1614 if (!cc->req_pool) {
1615 ti->error = "Cannot allocate crypt request mempool";
1616 goto bad;
1617 }
1618
1619 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
1620 if (!cc->page_pool) {
1621 ti->error = "Cannot allocate page mempool";
1622 goto bad;
1623 }
1624
1625 cc->bs = bioset_create(MIN_IOS, 0);
1626 if (!cc->bs) {
1627 ti->error = "Cannot allocate crypt bioset";
1628 goto bad;
1629 }
1630
1631 ret = -EINVAL;
1632 if (sscanf(argv[2], "%llu", &tmpll) != 1) {
1633 ti->error = "Invalid iv_offset sector";
1634 goto bad;
1635 }
1636 cc->iv_offset = tmpll;
1637
1638 if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) {
1639 ti->error = "Device lookup failed";
1640 goto bad;
1641 }
1642
1643 if (sscanf(argv[4], "%llu", &tmpll) != 1) {
1644 ti->error = "Invalid device sector";
1645 goto bad;
1646 }
1647 cc->start = tmpll;
1648
1649 ret = -ENOMEM;
1650 cc->io_queue = alloc_workqueue("kcryptd_io",
1651 WQ_NON_REENTRANT|
1652 WQ_MEM_RECLAIM,
1653 1);
1654 if (!cc->io_queue) {
1655 ti->error = "Couldn't create kcryptd io queue";
1656 goto bad;
1657 }
1658
1659 cc->crypt_queue = alloc_workqueue("kcryptd",
1660 WQ_NON_REENTRANT|
1661 WQ_CPU_INTENSIVE|
1662 WQ_MEM_RECLAIM,
1663 1);
1664 if (!cc->crypt_queue) {
1665 ti->error = "Couldn't create kcryptd queue";
1666 goto bad;
1667 }
1668
1669 ti->num_flush_requests = 1;
1670 return 0;
1671
1672bad:
1673 crypt_dtr(ti);
1674 return ret;
1675}
1676
1677static int crypt_map(struct dm_target *ti, struct bio *bio,
1678 union map_info *map_context)
1679{
1680 struct dm_crypt_io *io;
1681 struct crypt_config *cc;
1682
1683 if (bio->bi_rw & REQ_FLUSH) {
1684 cc = ti->private;
1685 bio->bi_bdev = cc->dev->bdev;
1686 return DM_MAPIO_REMAPPED;
1687 }
1688
1689 io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector));
1690
1691 if (bio_data_dir(io->base_bio) == READ) {
1692 if (kcryptd_io_read(io, GFP_NOWAIT))
1693 kcryptd_queue_io(io);
1694 } else
1695 kcryptd_queue_crypt(io);
1696
1697 return DM_MAPIO_SUBMITTED;
1698}
1699
1700static int crypt_status(struct dm_target *ti, status_type_t type,
1701 char *result, unsigned int maxlen)
1702{
1703 struct crypt_config *cc = ti->private;
1704 unsigned int sz = 0;
1705
1706 switch (type) {
1707 case STATUSTYPE_INFO:
1708 result[0] = '\0';
1709 break;
1710
1711 case STATUSTYPE_TABLE:
1712 DMEMIT("%s ", cc->cipher_string);
1713
1714 if (cc->key_size > 0) {
1715 if ((maxlen - sz) < ((cc->key_size << 1) + 1))
1716 return -ENOMEM;
1717
1718 crypt_encode_key(result + sz, cc->key, cc->key_size);
1719 sz += cc->key_size << 1;
1720 } else {
1721 if (sz >= maxlen)
1722 return -ENOMEM;
1723 result[sz++] = '-';
1724 }
1725
1726 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
1727 cc->dev->name, (unsigned long long)cc->start);
1728 break;
1729 }
1730 return 0;
1731}
1732
1733static void crypt_postsuspend(struct dm_target *ti)
1734{
1735 struct crypt_config *cc = ti->private;
1736
1737 set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1738}
1739
1740static int crypt_preresume(struct dm_target *ti)
1741{
1742 struct crypt_config *cc = ti->private;
1743
1744 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
1745 DMERR("aborting resume - crypt key is not set.");
1746 return -EAGAIN;
1747 }
1748
1749 return 0;
1750}
1751
1752static void crypt_resume(struct dm_target *ti)
1753{
1754 struct crypt_config *cc = ti->private;
1755
1756 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1757}
1758
1759
1760
1761
1762
1763static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
1764{
1765 struct crypt_config *cc = ti->private;
1766 int ret = -EINVAL;
1767
1768 if (argc < 2)
1769 goto error;
1770
1771 if (!strnicmp(argv[0], MESG_STR("key"))) {
1772 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
1773 DMWARN("not suspended during key manipulation.");
1774 return -EINVAL;
1775 }
1776 if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) {
1777 ret = crypt_set_key(cc, argv[2]);
1778 if (ret)
1779 return ret;
1780 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
1781 ret = cc->iv_gen_ops->init(cc);
1782 return ret;
1783 }
1784 if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) {
1785 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
1786 ret = cc->iv_gen_ops->wipe(cc);
1787 if (ret)
1788 return ret;
1789 }
1790 return crypt_wipe_key(cc);
1791 }
1792 }
1793
1794error:
1795 DMWARN("unrecognised message received.");
1796 return -EINVAL;
1797}
1798
1799static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
1800 struct bio_vec *biovec, int max_size)
1801{
1802 struct crypt_config *cc = ti->private;
1803 struct request_queue *q = bdev_get_queue(cc->dev->bdev);
1804
1805 if (!q->merge_bvec_fn)
1806 return max_size;
1807
1808 bvm->bi_bdev = cc->dev->bdev;
1809 bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector);
1810
1811 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
1812}
1813
1814static int crypt_iterate_devices(struct dm_target *ti,
1815 iterate_devices_callout_fn fn, void *data)
1816{
1817 struct crypt_config *cc = ti->private;
1818
1819 return fn(ti, cc->dev, cc->start, ti->len, data);
1820}
1821
1822static struct target_type crypt_target = {
1823 .name = "crypt",
1824 .version = {1, 10, 0},
1825 .module = THIS_MODULE,
1826 .ctr = crypt_ctr,
1827 .dtr = crypt_dtr,
1828 .map = crypt_map,
1829 .status = crypt_status,
1830 .postsuspend = crypt_postsuspend,
1831 .preresume = crypt_preresume,
1832 .resume = crypt_resume,
1833 .message = crypt_message,
1834 .merge = crypt_merge,
1835 .iterate_devices = crypt_iterate_devices,
1836};
1837
1838static int __init dm_crypt_init(void)
1839{
1840 int r;
1841
1842 _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
1843 if (!_crypt_io_pool)
1844 return -ENOMEM;
1845
1846 r = dm_register_target(&crypt_target);
1847 if (r < 0) {
1848 DMERR("register failed %d", r);
1849 kmem_cache_destroy(_crypt_io_pool);
1850 }
1851
1852 return r;
1853}
1854
1855static void __exit dm_crypt_exit(void)
1856{
1857 dm_unregister_target(&crypt_target);
1858 kmem_cache_destroy(_crypt_io_pool);
1859}
1860
1861module_init(dm_crypt_init);
1862module_exit(dm_crypt_exit);
1863
1864MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
1865MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
1866MODULE_LICENSE("GPL");
1867