linux/drivers/md/dm-crypt.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
   3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
   4 * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
   5 *
   6 * This file is released under the GPL.
   7 */
   8
   9#include <linux/err.h>
  10#include <linux/module.h>
  11#include <linux/init.h>
  12#include <linux/kernel.h>
  13#include <linux/bio.h>
  14#include <linux/blkdev.h>
  15#include <linux/mempool.h>
  16#include <linux/slab.h>
  17#include <linux/crypto.h>
  18#include <linux/workqueue.h>
  19#include <linux/backing-dev.h>
  20#include <asm/atomic.h>
  21#include <linux/scatterlist.h>
  22#include <asm/page.h>
  23#include <asm/unaligned.h>
  24
  25#include "dm.h"
  26
  27#define DM_MSG_PREFIX "crypt"
  28#define MESG_STR(x) x, sizeof(x)
  29
  30/*
  31 * per bio private data
  32 */
  33struct dm_crypt_io {
  34        struct dm_target *target;
  35        struct bio *base_bio;
  36        struct work_struct work;
  37        atomic_t pending;
  38        int error;
  39};
  40
  41/*
  42 * context holding the current state of a multi-part conversion
  43 */
  44struct convert_context {
  45        struct bio *bio_in;
  46        struct bio *bio_out;
  47        unsigned int offset_in;
  48        unsigned int offset_out;
  49        unsigned int idx_in;
  50        unsigned int idx_out;
  51        sector_t sector;
  52        int write;
  53};
  54
  55struct crypt_config;
  56
  57struct crypt_iv_operations {
  58        int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
  59                   const char *opts);
  60        void (*dtr)(struct crypt_config *cc);
  61        const char *(*status)(struct crypt_config *cc);
  62        int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector);
  63};
  64
  65/*
  66 * Crypt: maps a linear range of a block device
  67 * and encrypts / decrypts at the same time.
  68 */
  69enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
  70struct crypt_config {
  71        struct dm_dev *dev;
  72        sector_t start;
  73
  74        /*
  75         * pool for per bio private data and
  76         * for encryption buffer pages
  77         */
  78        mempool_t *io_pool;
  79        mempool_t *page_pool;
  80        struct bio_set *bs;
  81
  82        struct workqueue_struct *io_queue;
  83        struct workqueue_struct *crypt_queue;
  84        /*
  85         * crypto related data
  86         */
  87        struct crypt_iv_operations *iv_gen_ops;
  88        char *iv_mode;
  89        union {
  90                struct crypto_cipher *essiv_tfm;
  91                int benbi_shift;
  92        } iv_gen_private;
  93        sector_t iv_offset;
  94        unsigned int iv_size;
  95
  96        char cipher[CRYPTO_MAX_ALG_NAME];
  97        char chainmode[CRYPTO_MAX_ALG_NAME];
  98        struct crypto_blkcipher *tfm;
  99        unsigned long flags;
 100        unsigned int key_size;
 101        u8 key[0];
 102};
 103
 104#define MIN_IOS        16
 105#define MIN_POOL_PAGES 32
 106#define MIN_BIO_PAGES  8
 107
 108static struct kmem_cache *_crypt_io_pool;
 109
 110static void clone_init(struct dm_crypt_io *, struct bio *);
 111
 112/*
 113 * Different IV generation algorithms:
 114 *
 115 * plain: the initial vector is the 32-bit little-endian version of the sector
 116 *        number, padded with zeros if necessary.
 117 *
 118 * essiv: "encrypted sector|salt initial vector", the sector number is
 119 *        encrypted with the bulk cipher using a salt as key. The salt
 120 *        should be derived from the bulk cipher's key via hashing.
 121 *
 122 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
 123 *        (needed for LRW-32-AES and possible other narrow block modes)
 124 *
 125 * null: the initial vector is always zero.  Provides compatibility with
 126 *       obsolete loop_fish2 devices.  Do not use for new devices.
 127 *
 128 * plumb: unimplemented, see:
 129 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
 130 */
 131
 132static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
 133{
 134        memset(iv, 0, cc->iv_size);
 135        *(u32 *)iv = cpu_to_le32(sector & 0xffffffff);
 136
 137        return 0;
 138}
 139
 140static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
 141                              const char *opts)
 142{
 143        struct crypto_cipher *essiv_tfm;
 144        struct crypto_hash *hash_tfm;
 145        struct hash_desc desc;
 146        struct scatterlist sg;
 147        unsigned int saltsize;
 148        u8 *salt;
 149        int err;
 150
 151        if (opts == NULL) {
 152                ti->error = "Digest algorithm missing for ESSIV mode";
 153                return -EINVAL;
 154        }
 155
 156        /* Hash the cipher key with the given hash algorithm */
 157        hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
 158        if (IS_ERR(hash_tfm)) {
 159                ti->error = "Error initializing ESSIV hash";
 160                return PTR_ERR(hash_tfm);
 161        }
 162
 163        saltsize = crypto_hash_digestsize(hash_tfm);
 164        salt = kmalloc(saltsize, GFP_KERNEL);
 165        if (salt == NULL) {
 166                ti->error = "Error kmallocing salt storage in ESSIV";
 167                crypto_free_hash(hash_tfm);
 168                return -ENOMEM;
 169        }
 170
 171        sg_init_one(&sg, cc->key, cc->key_size);
 172        desc.tfm = hash_tfm;
 173        desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 174        err = crypto_hash_digest(&desc, &sg, cc->key_size, salt);
 175        crypto_free_hash(hash_tfm);
 176
 177        if (err) {
 178                ti->error = "Error calculating hash in ESSIV";
 179                kfree(salt);
 180                return err;
 181        }
 182
 183        /* Setup the essiv_tfm with the given salt */
 184        essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
 185        if (IS_ERR(essiv_tfm)) {
 186                ti->error = "Error allocating crypto tfm for ESSIV";
 187                kfree(salt);
 188                return PTR_ERR(essiv_tfm);
 189        }
 190        if (crypto_cipher_blocksize(essiv_tfm) !=
 191            crypto_blkcipher_ivsize(cc->tfm)) {
 192                ti->error = "Block size of ESSIV cipher does "
 193                            "not match IV size of block cipher";
 194                crypto_free_cipher(essiv_tfm);
 195                kfree(salt);
 196                return -EINVAL;
 197        }
 198        err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
 199        if (err) {
 200                ti->error = "Failed to set key for ESSIV cipher";
 201                crypto_free_cipher(essiv_tfm);
 202                kfree(salt);
 203                return err;
 204        }
 205        kfree(salt);
 206
 207        cc->iv_gen_private.essiv_tfm = essiv_tfm;
 208        return 0;
 209}
 210
 211static void crypt_iv_essiv_dtr(struct crypt_config *cc)
 212{
 213        crypto_free_cipher(cc->iv_gen_private.essiv_tfm);
 214        cc->iv_gen_private.essiv_tfm = NULL;
 215}
 216
 217static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
 218{
 219        memset(iv, 0, cc->iv_size);
 220        *(u64 *)iv = cpu_to_le64(sector);
 221        crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv);
 222        return 0;
 223}
 224
 225static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
 226                              const char *opts)
 227{
 228        unsigned int bs = crypto_blkcipher_blocksize(cc->tfm);
 229        int log = ilog2(bs);
 230
 231        /* we need to calculate how far we must shift the sector count
 232         * to get the cipher block count, we use this shift in _gen */
 233
 234        if (1 << log != bs) {
 235                ti->error = "cypher blocksize is not a power of 2";
 236                return -EINVAL;
 237        }
 238
 239        if (log > 9) {
 240                ti->error = "cypher blocksize is > 512";
 241                return -EINVAL;
 242        }
 243
 244        cc->iv_gen_private.benbi_shift = 9 - log;
 245
 246        return 0;
 247}
 248
 249static void crypt_iv_benbi_dtr(struct crypt_config *cc)
 250{
 251}
 252
 253static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
 254{
 255        __be64 val;
 256
 257        memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
 258
 259        val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1);
 260        put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
 261
 262        return 0;
 263}
 264
 265static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
 266{
 267        memset(iv, 0, cc->iv_size);
 268
 269        return 0;
 270}
 271
 272static struct crypt_iv_operations crypt_iv_plain_ops = {
 273        .generator = crypt_iv_plain_gen
 274};
 275
 276static struct crypt_iv_operations crypt_iv_essiv_ops = {
 277        .ctr       = crypt_iv_essiv_ctr,
 278        .dtr       = crypt_iv_essiv_dtr,
 279        .generator = crypt_iv_essiv_gen
 280};
 281
 282static struct crypt_iv_operations crypt_iv_benbi_ops = {
 283        .ctr       = crypt_iv_benbi_ctr,
 284        .dtr       = crypt_iv_benbi_dtr,
 285        .generator = crypt_iv_benbi_gen
 286};
 287
 288static struct crypt_iv_operations crypt_iv_null_ops = {
 289        .generator = crypt_iv_null_gen
 290};
 291
 292static int
 293crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
 294                          struct scatterlist *in, unsigned int length,
 295                          int write, sector_t sector)
 296{
 297        u8 iv[cc->iv_size] __attribute__ ((aligned(__alignof__(u64))));
 298        struct blkcipher_desc desc = {
 299                .tfm = cc->tfm,
 300                .info = iv,
 301                .flags = CRYPTO_TFM_REQ_MAY_SLEEP,
 302        };
 303        int r;
 304
 305        if (cc->iv_gen_ops) {
 306                r = cc->iv_gen_ops->generator(cc, iv, sector);
 307                if (r < 0)
 308                        return r;
 309
 310                if (write)
 311                        r = crypto_blkcipher_encrypt_iv(&desc, out, in, length);
 312                else
 313                        r = crypto_blkcipher_decrypt_iv(&desc, out, in, length);
 314        } else {
 315                if (write)
 316                        r = crypto_blkcipher_encrypt(&desc, out, in, length);
 317                else
 318                        r = crypto_blkcipher_decrypt(&desc, out, in, length);
 319        }
 320
 321        return r;
 322}
 323
 324static void crypt_convert_init(struct crypt_config *cc,
 325                               struct convert_context *ctx,
 326                               struct bio *bio_out, struct bio *bio_in,
 327                               sector_t sector, int write)
 328{
 329        ctx->bio_in = bio_in;
 330        ctx->bio_out = bio_out;
 331        ctx->offset_in = 0;
 332        ctx->offset_out = 0;
 333        ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
 334        ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
 335        ctx->sector = sector + cc->iv_offset;
 336        ctx->write = write;
 337}
 338
 339/*
 340 * Encrypt / decrypt data from one bio to another one (can be the same one)
 341 */
 342static int crypt_convert(struct crypt_config *cc,
 343                         struct convert_context *ctx)
 344{
 345        int r = 0;
 346
 347        while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
 348              ctx->idx_out < ctx->bio_out->bi_vcnt) {
 349                struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
 350                struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
 351                struct scatterlist sg_in, sg_out;
 352
 353                sg_init_table(&sg_in, 1);
 354                sg_set_page(&sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, bv_in->bv_offset + ctx->offset_in);
 355
 356                sg_init_table(&sg_out, 1);
 357                sg_set_page(&sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, bv_out->bv_offset + ctx->offset_out);
 358
 359                ctx->offset_in += sg_in.length;
 360                if (ctx->offset_in >= bv_in->bv_len) {
 361                        ctx->offset_in = 0;
 362                        ctx->idx_in++;
 363                }
 364
 365                ctx->offset_out += sg_out.length;
 366                if (ctx->offset_out >= bv_out->bv_len) {
 367                        ctx->offset_out = 0;
 368                        ctx->idx_out++;
 369                }
 370
 371                r = crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length,
 372                                              ctx->write, ctx->sector);
 373                if (r < 0)
 374                        break;
 375
 376                ctx->sector++;
 377        }
 378
 379        return r;
 380}
 381
 382static void dm_crypt_bio_destructor(struct bio *bio)
 383{
 384        struct dm_crypt_io *io = bio->bi_private;
 385        struct crypt_config *cc = io->target->private;
 386
 387        bio_free(bio, cc->bs);
 388}
 389
 390/*
 391 * Generate a new unfragmented bio with the given size
 392 * This should never violate the device limitations
 393 * May return a smaller bio when running out of pages
 394 */
 395static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
 396{
 397        struct crypt_config *cc = io->target->private;
 398        struct bio *clone;
 399        unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 400        gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
 401        unsigned i, len;
 402        struct page *page;
 403
 404        clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
 405        if (!clone)
 406                return NULL;
 407
 408        clone_init(io, clone);
 409
 410        for (i = 0; i < nr_iovecs; i++) {
 411                page = mempool_alloc(cc->page_pool, gfp_mask);
 412                if (!page)
 413                        break;
 414
 415                /*
 416                 * if additional pages cannot be allocated without waiting,
 417                 * return a partially allocated bio, the caller will then try
 418                 * to allocate additional bios while submitting this partial bio
 419                 */
 420                if (i == (MIN_BIO_PAGES - 1))
 421                        gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
 422
 423                len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
 424
 425                if (!bio_add_page(clone, page, len, 0)) {
 426                        mempool_free(page, cc->page_pool);
 427                        break;
 428                }
 429
 430                size -= len;
 431        }
 432
 433        if (!clone->bi_size) {
 434                bio_put(clone);
 435                return NULL;
 436        }
 437
 438        return clone;
 439}
 440
 441static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
 442{
 443        unsigned int i;
 444        struct bio_vec *bv;
 445
 446        for (i = 0; i < clone->bi_vcnt; i++) {
 447                bv = bio_iovec_idx(clone, i);
 448                BUG_ON(!bv->bv_page);
 449                mempool_free(bv->bv_page, cc->page_pool);
 450                bv->bv_page = NULL;
 451        }
 452}
 453
 454/*
 455 * One of the bios was finished. Check for completion of
 456 * the whole request and correctly clean up the buffer.
 457 */
 458static void crypt_dec_pending(struct dm_crypt_io *io, int error)
 459{
 460        struct crypt_config *cc = (struct crypt_config *) io->target->private;
 461
 462        if (error < 0)
 463                io->error = error;
 464
 465        if (!atomic_dec_and_test(&io->pending))
 466                return;
 467
 468        bio_endio(io->base_bio, io->error);
 469
 470        mempool_free(io, cc->io_pool);
 471}
 472
 473/*
 474 * kcryptd/kcryptd_io:
 475 *
 476 * Needed because it would be very unwise to do decryption in an
 477 * interrupt context.
 478 *
 479 * kcryptd performs the actual encryption or decryption.
 480 *
 481 * kcryptd_io performs the IO submission.
 482 *
 483 * They must be separated as otherwise the final stages could be
 484 * starved by new requests which can block in the first stages due
 485 * to memory allocation.
 486 */
 487static void kcryptd_do_work(struct work_struct *work);
 488static void kcryptd_do_crypt(struct work_struct *work);
 489
 490static void kcryptd_queue_io(struct dm_crypt_io *io)
 491{
 492        struct crypt_config *cc = io->target->private;
 493
 494        INIT_WORK(&io->work, kcryptd_do_work);
 495        queue_work(cc->io_queue, &io->work);
 496}
 497
 498static void kcryptd_queue_crypt(struct dm_crypt_io *io)
 499{
 500        struct crypt_config *cc = io->target->private;
 501
 502        INIT_WORK(&io->work, kcryptd_do_crypt);
 503        queue_work(cc->crypt_queue, &io->work);
 504}
 505
 506static void crypt_endio(struct bio *clone, int error)
 507{
 508        struct dm_crypt_io *io = clone->bi_private;
 509        struct crypt_config *cc = io->target->private;
 510        unsigned read_io = bio_data_dir(clone) == READ;
 511
 512        if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
 513                error = -EIO;
 514
 515        /*
 516         * free the processed pages
 517         */
 518        if (!read_io) {
 519                crypt_free_buffer_pages(cc, clone);
 520                goto out;
 521        }
 522
 523        if (unlikely(error))
 524                goto out;
 525
 526        bio_put(clone);
 527        kcryptd_queue_crypt(io);
 528        return;
 529
 530out:
 531        bio_put(clone);
 532        crypt_dec_pending(io, error);
 533}
 534
 535static void clone_init(struct dm_crypt_io *io, struct bio *clone)
 536{
 537        struct crypt_config *cc = io->target->private;
 538
 539        clone->bi_private = io;
 540        clone->bi_end_io  = crypt_endio;
 541        clone->bi_bdev    = cc->dev->bdev;
 542        clone->bi_rw      = io->base_bio->bi_rw;
 543        clone->bi_destructor = dm_crypt_bio_destructor;
 544}
 545
 546static void process_read(struct dm_crypt_io *io)
 547{
 548        struct crypt_config *cc = io->target->private;
 549        struct bio *base_bio = io->base_bio;
 550        struct bio *clone;
 551        sector_t sector = base_bio->bi_sector - io->target->begin;
 552
 553        atomic_inc(&io->pending);
 554
 555        /*
 556         * The block layer might modify the bvec array, so always
 557         * copy the required bvecs because we need the original
 558         * one in order to decrypt the whole bio data *afterwards*.
 559         */
 560        clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs);
 561        if (unlikely(!clone)) {
 562                crypt_dec_pending(io, -ENOMEM);
 563                return;
 564        }
 565
 566        clone_init(io, clone);
 567        clone->bi_idx = 0;
 568        clone->bi_vcnt = bio_segments(base_bio);
 569        clone->bi_size = base_bio->bi_size;
 570        clone->bi_sector = cc->start + sector;
 571        memcpy(clone->bi_io_vec, bio_iovec(base_bio),
 572               sizeof(struct bio_vec) * clone->bi_vcnt);
 573
 574        generic_make_request(clone);
 575}
 576
 577static void process_write(struct dm_crypt_io *io)
 578{
 579        struct crypt_config *cc = io->target->private;
 580        struct bio *base_bio = io->base_bio;
 581        struct bio *clone;
 582        struct convert_context ctx;
 583        unsigned remaining = base_bio->bi_size;
 584        sector_t sector = base_bio->bi_sector - io->target->begin;
 585
 586        atomic_inc(&io->pending);
 587
 588        crypt_convert_init(cc, &ctx, NULL, base_bio, sector, 1);
 589
 590        /*
 591         * The allocated buffers can be smaller than the whole bio,
 592         * so repeat the whole process until all the data can be handled.
 593         */
 594        while (remaining) {
 595                clone = crypt_alloc_buffer(io, remaining);
 596                if (unlikely(!clone)) {
 597                        crypt_dec_pending(io, -ENOMEM);
 598                        return;
 599                }
 600
 601                ctx.bio_out = clone;
 602                ctx.idx_out = 0;
 603
 604                if (unlikely(crypt_convert(cc, &ctx) < 0)) {
 605                        crypt_free_buffer_pages(cc, clone);
 606                        bio_put(clone);
 607                        crypt_dec_pending(io, -EIO);
 608                        return;
 609                }
 610
 611                /* crypt_convert should have filled the clone bio */
 612                BUG_ON(ctx.idx_out < clone->bi_vcnt);
 613
 614                clone->bi_sector = cc->start + sector;
 615                remaining -= clone->bi_size;
 616                sector += bio_sectors(clone);
 617
 618                /* Grab another reference to the io struct
 619                 * before we kick off the request */
 620                if (remaining)
 621                        atomic_inc(&io->pending);
 622
 623                generic_make_request(clone);
 624
 625                /* Do not reference clone after this - it
 626                 * may be gone already. */
 627
 628                /* out of memory -> run queues */
 629                if (remaining)
 630                        congestion_wait(WRITE, HZ/100);
 631        }
 632}
 633
 634static void process_read_endio(struct dm_crypt_io *io)
 635{
 636        struct crypt_config *cc = io->target->private;
 637        struct convert_context ctx;
 638
 639        crypt_convert_init(cc, &ctx, io->base_bio, io->base_bio,
 640                           io->base_bio->bi_sector - io->target->begin, 0);
 641
 642        crypt_dec_pending(io, crypt_convert(cc, &ctx));
 643}
 644
 645static void kcryptd_do_work(struct work_struct *work)
 646{
 647        struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
 648
 649        if (bio_data_dir(io->base_bio) == READ)
 650                process_read(io);
 651}
 652
 653static void kcryptd_do_crypt(struct work_struct *work)
 654{
 655        struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
 656
 657        if (bio_data_dir(io->base_bio) == READ)
 658                process_read_endio(io);
 659        else
 660                process_write(io);
 661}
 662
 663/*
 664 * Decode key from its hex representation
 665 */
 666static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
 667{
 668        char buffer[3];
 669        char *endp;
 670        unsigned int i;
 671
 672        buffer[2] = '\0';
 673
 674        for (i = 0; i < size; i++) {
 675                buffer[0] = *hex++;
 676                buffer[1] = *hex++;
 677
 678                key[i] = (u8)simple_strtoul(buffer, &endp, 16);
 679
 680                if (endp != &buffer[2])
 681                        return -EINVAL;
 682        }
 683
 684        if (*hex != '\0')
 685                return -EINVAL;
 686
 687        return 0;
 688}
 689
 690/*
 691 * Encode key into its hex representation
 692 */
 693static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
 694{
 695        unsigned int i;
 696
 697        for (i = 0; i < size; i++) {
 698                sprintf(hex, "%02x", *key);
 699                hex += 2;
 700                key++;
 701        }
 702}
 703
 704static int crypt_set_key(struct crypt_config *cc, char *key)
 705{
 706        unsigned key_size = strlen(key) >> 1;
 707
 708        if (cc->key_size && cc->key_size != key_size)
 709                return -EINVAL;
 710
 711        cc->key_size = key_size; /* initial settings */
 712
 713        if ((!key_size && strcmp(key, "-")) ||
 714           (key_size && crypt_decode_key(cc->key, key, key_size) < 0))
 715                return -EINVAL;
 716
 717        set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
 718
 719        return 0;
 720}
 721
 722static int crypt_wipe_key(struct crypt_config *cc)
 723{
 724        clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
 725        memset(&cc->key, 0, cc->key_size * sizeof(u8));
 726        return 0;
 727}
 728
 729/*
 730 * Construct an encryption mapping:
 731 * <cipher> <key> <iv_offset> <dev_path> <start>
 732 */
 733static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 734{
 735        struct crypt_config *cc;
 736        struct crypto_blkcipher *tfm;
 737        char *tmp;
 738        char *cipher;
 739        char *chainmode;
 740        char *ivmode;
 741        char *ivopts;
 742        unsigned int key_size;
 743        unsigned long long tmpll;
 744
 745        if (argc != 5) {
 746                ti->error = "Not enough arguments";
 747                return -EINVAL;
 748        }
 749
 750        tmp = argv[0];
 751        cipher = strsep(&tmp, "-");
 752        chainmode = strsep(&tmp, "-");
 753        ivopts = strsep(&tmp, "-");
 754        ivmode = strsep(&ivopts, ":");
 755
 756        if (tmp)
 757                DMWARN("Unexpected additional cipher options");
 758
 759        key_size = strlen(argv[1]) >> 1;
 760
 761        cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
 762        if (cc == NULL) {
 763                ti->error =
 764                        "Cannot allocate transparent encryption context";
 765                return -ENOMEM;
 766        }
 767
 768        if (crypt_set_key(cc, argv[1])) {
 769                ti->error = "Error decoding key";
 770                goto bad_cipher;
 771        }
 772
 773        /* Compatiblity mode for old dm-crypt cipher strings */
 774        if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) {
 775                chainmode = "cbc";
 776                ivmode = "plain";
 777        }
 778
 779        if (strcmp(chainmode, "ecb") && !ivmode) {
 780                ti->error = "This chaining mode requires an IV mechanism";
 781                goto bad_cipher;
 782        }
 783
 784        if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)",
 785                     chainmode, cipher) >= CRYPTO_MAX_ALG_NAME) {
 786                ti->error = "Chain mode + cipher name is too long";
 787                goto bad_cipher;
 788        }
 789
 790        tfm = crypto_alloc_blkcipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
 791        if (IS_ERR(tfm)) {
 792                ti->error = "Error allocating crypto tfm";
 793                goto bad_cipher;
 794        }
 795
 796        strcpy(cc->cipher, cipher);
 797        strcpy(cc->chainmode, chainmode);
 798        cc->tfm = tfm;
 799
 800        /*
 801         * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi".
 802         * See comments at iv code
 803         */
 804
 805        if (ivmode == NULL)
 806                cc->iv_gen_ops = NULL;
 807        else if (strcmp(ivmode, "plain") == 0)
 808                cc->iv_gen_ops = &crypt_iv_plain_ops;
 809        else if (strcmp(ivmode, "essiv") == 0)
 810                cc->iv_gen_ops = &crypt_iv_essiv_ops;
 811        else if (strcmp(ivmode, "benbi") == 0)
 812                cc->iv_gen_ops = &crypt_iv_benbi_ops;
 813        else if (strcmp(ivmode, "null") == 0)
 814                cc->iv_gen_ops = &crypt_iv_null_ops;
 815        else {
 816                ti->error = "Invalid IV mode";
 817                goto bad_ivmode;
 818        }
 819
 820        if (cc->iv_gen_ops && cc->iv_gen_ops->ctr &&
 821            cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0)
 822                goto bad_ivmode;
 823
 824        cc->iv_size = crypto_blkcipher_ivsize(tfm);
 825        if (cc->iv_size)
 826                /* at least a 64 bit sector number should fit in our buffer */
 827                cc->iv_size = max(cc->iv_size,
 828                                  (unsigned int)(sizeof(u64) / sizeof(u8)));
 829        else {
 830                if (cc->iv_gen_ops) {
 831                        DMWARN("Selected cipher does not support IVs");
 832                        if (cc->iv_gen_ops->dtr)
 833                                cc->iv_gen_ops->dtr(cc);
 834                        cc->iv_gen_ops = NULL;
 835                }
 836        }
 837
 838        cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
 839        if (!cc->io_pool) {
 840                ti->error = "Cannot allocate crypt io mempool";
 841                goto bad_slab_pool;
 842        }
 843
 844        cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
 845        if (!cc->page_pool) {
 846                ti->error = "Cannot allocate page mempool";
 847                goto bad_page_pool;
 848        }
 849
 850        cc->bs = bioset_create(MIN_IOS, MIN_IOS);
 851        if (!cc->bs) {
 852                ti->error = "Cannot allocate crypt bioset";
 853                goto bad_bs;
 854        }
 855
 856        if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) {
 857                ti->error = "Error setting key";
 858                goto bad_device;
 859        }
 860
 861        if (sscanf(argv[2], "%llu", &tmpll) != 1) {
 862                ti->error = "Invalid iv_offset sector";
 863                goto bad_device;
 864        }
 865        cc->iv_offset = tmpll;
 866
 867        if (sscanf(argv[4], "%llu", &tmpll) != 1) {
 868                ti->error = "Invalid device sector";
 869                goto bad_device;
 870        }
 871        cc->start = tmpll;
 872
 873        if (dm_get_device(ti, argv[3], cc->start, ti->len,
 874                          dm_table_get_mode(ti->table), &cc->dev)) {
 875                ti->error = "Device lookup failed";
 876                goto bad_device;
 877        }
 878
 879        if (ivmode && cc->iv_gen_ops) {
 880                if (ivopts)
 881                        *(ivopts - 1) = ':';
 882                cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL);
 883                if (!cc->iv_mode) {
 884                        ti->error = "Error kmallocing iv_mode string";
 885                        goto bad_ivmode_string;
 886                }
 887                strcpy(cc->iv_mode, ivmode);
 888        } else
 889                cc->iv_mode = NULL;
 890
 891        cc->io_queue = create_singlethread_workqueue("kcryptd_io");
 892        if (!cc->io_queue) {
 893                ti->error = "Couldn't create kcryptd io queue";
 894                goto bad_io_queue;
 895        }
 896
 897        cc->crypt_queue = create_singlethread_workqueue("kcryptd");
 898        if (!cc->crypt_queue) {
 899                ti->error = "Couldn't create kcryptd queue";
 900                goto bad_crypt_queue;
 901        }
 902
 903        ti->private = cc;
 904        return 0;
 905
 906bad_crypt_queue:
 907        destroy_workqueue(cc->io_queue);
 908bad_io_queue:
 909        kfree(cc->iv_mode);
 910bad_ivmode_string:
 911        dm_put_device(ti, cc->dev);
 912bad_device:
 913        bioset_free(cc->bs);
 914bad_bs:
 915        mempool_destroy(cc->page_pool);
 916bad_page_pool:
 917        mempool_destroy(cc->io_pool);
 918bad_slab_pool:
 919        if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
 920                cc->iv_gen_ops->dtr(cc);
 921bad_ivmode:
 922        crypto_free_blkcipher(tfm);
 923bad_cipher:
 924        /* Must zero key material before freeing */
 925        memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
 926        kfree(cc);
 927        return -EINVAL;
 928}
 929
 930static void crypt_dtr(struct dm_target *ti)
 931{
 932        struct crypt_config *cc = (struct crypt_config *) ti->private;
 933
 934        destroy_workqueue(cc->io_queue);
 935        destroy_workqueue(cc->crypt_queue);
 936
 937        bioset_free(cc->bs);
 938        mempool_destroy(cc->page_pool);
 939        mempool_destroy(cc->io_pool);
 940
 941        kfree(cc->iv_mode);
 942        if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
 943                cc->iv_gen_ops->dtr(cc);
 944        crypto_free_blkcipher(cc->tfm);
 945        dm_put_device(ti, cc->dev);
 946
 947        /* Must zero key material before freeing */
 948        memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
 949        kfree(cc);
 950}
 951
 952static int crypt_map(struct dm_target *ti, struct bio *bio,
 953                     union map_info *map_context)
 954{
 955        struct crypt_config *cc = ti->private;
 956        struct dm_crypt_io *io;
 957
 958        io = mempool_alloc(cc->io_pool, GFP_NOIO);
 959        io->target = ti;
 960        io->base_bio = bio;
 961        io->error = 0;
 962        atomic_set(&io->pending, 0);
 963
 964        if (bio_data_dir(io->base_bio) == READ)
 965                kcryptd_queue_io(io);
 966        else
 967                kcryptd_queue_crypt(io);
 968
 969        return DM_MAPIO_SUBMITTED;
 970}
 971
 972static int crypt_status(struct dm_target *ti, status_type_t type,
 973                        char *result, unsigned int maxlen)
 974{
 975        struct crypt_config *cc = (struct crypt_config *) ti->private;
 976        unsigned int sz = 0;
 977
 978        switch (type) {
 979        case STATUSTYPE_INFO:
 980                result[0] = '\0';
 981                break;
 982
 983        case STATUSTYPE_TABLE:
 984                if (cc->iv_mode)
 985                        DMEMIT("%s-%s-%s ", cc->cipher, cc->chainmode,
 986                               cc->iv_mode);
 987                else
 988                        DMEMIT("%s-%s ", cc->cipher, cc->chainmode);
 989
 990                if (cc->key_size > 0) {
 991                        if ((maxlen - sz) < ((cc->key_size << 1) + 1))
 992                                return -ENOMEM;
 993
 994                        crypt_encode_key(result + sz, cc->key, cc->key_size);
 995                        sz += cc->key_size << 1;
 996                } else {
 997                        if (sz >= maxlen)
 998                                return -ENOMEM;
 999                        result[sz++] = '-';
1000                }
1001
1002                DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
1003                                cc->dev->name, (unsigned long long)cc->start);
1004                break;
1005        }
1006        return 0;
1007}
1008
1009static void crypt_postsuspend(struct dm_target *ti)
1010{
1011        struct crypt_config *cc = ti->private;
1012
1013        set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1014}
1015
1016static int crypt_preresume(struct dm_target *ti)
1017{
1018        struct crypt_config *cc = ti->private;
1019
1020        if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
1021                DMERR("aborting resume - crypt key is not set.");
1022                return -EAGAIN;
1023        }
1024
1025        return 0;
1026}
1027
1028static void crypt_resume(struct dm_target *ti)
1029{
1030        struct crypt_config *cc = ti->private;
1031
1032        clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1033}
1034
1035/* Message interface
1036 *      key set <key>
1037 *      key wipe
1038 */
1039static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
1040{
1041        struct crypt_config *cc = ti->private;
1042
1043        if (argc < 2)
1044                goto error;
1045
1046        if (!strnicmp(argv[0], MESG_STR("key"))) {
1047                if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
1048                        DMWARN("not suspended during key manipulation.");
1049                        return -EINVAL;
1050                }
1051                if (argc == 3 && !strnicmp(argv[1], MESG_STR("set")))
1052                        return crypt_set_key(cc, argv[2]);
1053                if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe")))
1054                        return crypt_wipe_key(cc);
1055        }
1056
1057error:
1058        DMWARN("unrecognised message received.");
1059        return -EINVAL;
1060}
1061
1062static struct target_type crypt_target = {
1063        .name   = "crypt",
1064        .version= {1, 5, 0},
1065        .module = THIS_MODULE,
1066        .ctr    = crypt_ctr,
1067        .dtr    = crypt_dtr,
1068        .map    = crypt_map,
1069        .status = crypt_status,
1070        .postsuspend = crypt_postsuspend,
1071        .preresume = crypt_preresume,
1072        .resume = crypt_resume,
1073        .message = crypt_message,
1074};
1075
1076static int __init dm_crypt_init(void)
1077{
1078        int r;
1079
1080        _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
1081        if (!_crypt_io_pool)
1082                return -ENOMEM;
1083
1084        r = dm_register_target(&crypt_target);
1085        if (r < 0) {
1086                DMERR("register failed %d", r);
1087                kmem_cache_destroy(_crypt_io_pool);
1088        }
1089
1090        return r;
1091}
1092
1093static void __exit dm_crypt_exit(void)
1094{
1095        int r = dm_unregister_target(&crypt_target);
1096
1097        if (r < 0)
1098                DMERR("unregister failed %d", r);
1099
1100        kmem_cache_destroy(_crypt_io_pool);
1101}
1102
1103module_init(dm_crypt_init);
1104module_exit(dm_crypt_exit);
1105
1106MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
1107MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
1108MODULE_LICENSE("GPL");
1109