linux/crypto/xts.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* XTS: as defined in IEEE1619/D16
   3 *      http://grouper.ieee.org/groups/1619/email/pdf00086.pdf
   4 *
   5 * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org>
   6 *
   7 * Based on ecb.c
   8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
   9 */
  10#include <crypto/internal/skcipher.h>
  11#include <crypto/scatterwalk.h>
  12#include <linux/err.h>
  13#include <linux/init.h>
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/scatterlist.h>
  17#include <linux/slab.h>
  18
  19#include <crypto/xts.h>
  20#include <crypto/b128ops.h>
  21#include <crypto/gf128mul.h>
  22
  23struct xts_tfm_ctx {
  24        struct crypto_skcipher *child;
  25        struct crypto_cipher *tweak;
  26};
  27
  28struct xts_instance_ctx {
  29        struct crypto_skcipher_spawn spawn;
  30        char name[CRYPTO_MAX_ALG_NAME];
  31};
  32
  33struct xts_request_ctx {
  34        le128 t;
  35        struct scatterlist *tail;
  36        struct scatterlist sg[2];
  37        struct skcipher_request subreq;
  38};
  39
  40static int xts_setkey(struct crypto_skcipher *parent, const u8 *key,
  41                      unsigned int keylen)
  42{
  43        struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(parent);
  44        struct crypto_skcipher *child;
  45        struct crypto_cipher *tweak;
  46        int err;
  47
  48        err = xts_verify_key(parent, key, keylen);
  49        if (err)
  50                return err;
  51
  52        keylen /= 2;
  53
  54        /* we need two cipher instances: one to compute the initial 'tweak'
  55         * by encrypting the IV (usually the 'plain' iv) and the other
  56         * one to encrypt and decrypt the data */
  57
  58        /* tweak cipher, uses Key2 i.e. the second half of *key */
  59        tweak = ctx->tweak;
  60        crypto_cipher_clear_flags(tweak, CRYPTO_TFM_REQ_MASK);
  61        crypto_cipher_set_flags(tweak, crypto_skcipher_get_flags(parent) &
  62                                       CRYPTO_TFM_REQ_MASK);
  63        err = crypto_cipher_setkey(tweak, key + keylen, keylen);
  64        if (err)
  65                return err;
  66
  67        /* data cipher, uses Key1 i.e. the first half of *key */
  68        child = ctx->child;
  69        crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
  70        crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
  71                                         CRYPTO_TFM_REQ_MASK);
  72        return crypto_skcipher_setkey(child, key, keylen);
  73}
  74
  75/*
  76 * We compute the tweak masks twice (both before and after the ECB encryption or
  77 * decryption) to avoid having to allocate a temporary buffer and/or make
  78 * mutliple calls to the 'ecb(..)' instance, which usually would be slower than
  79 * just doing the gf128mul_x_ble() calls again.
  80 */
  81static int xts_xor_tweak(struct skcipher_request *req, bool second_pass,
  82                         bool enc)
  83{
  84        struct xts_request_ctx *rctx = skcipher_request_ctx(req);
  85        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  86        const bool cts = (req->cryptlen % XTS_BLOCK_SIZE);
  87        const int bs = XTS_BLOCK_SIZE;
  88        struct skcipher_walk w;
  89        le128 t = rctx->t;
  90        int err;
  91
  92        if (second_pass) {
  93                req = &rctx->subreq;
  94                /* set to our TFM to enforce correct alignment: */
  95                skcipher_request_set_tfm(req, tfm);
  96        }
  97        err = skcipher_walk_virt(&w, req, false);
  98
  99        while (w.nbytes) {
 100                unsigned int avail = w.nbytes;
 101                le128 *wsrc;
 102                le128 *wdst;
 103
 104                wsrc = w.src.virt.addr;
 105                wdst = w.dst.virt.addr;
 106
 107                do {
 108                        if (unlikely(cts) &&
 109                            w.total - w.nbytes + avail < 2 * XTS_BLOCK_SIZE) {
 110                                if (!enc) {
 111                                        if (second_pass)
 112                                                rctx->t = t;
 113                                        gf128mul_x_ble(&t, &t);
 114                                }
 115                                le128_xor(wdst, &t, wsrc);
 116                                if (enc && second_pass)
 117                                        gf128mul_x_ble(&rctx->t, &t);
 118                                skcipher_walk_done(&w, avail - bs);
 119                                return 0;
 120                        }
 121
 122                        le128_xor(wdst++, &t, wsrc++);
 123                        gf128mul_x_ble(&t, &t);
 124                } while ((avail -= bs) >= bs);
 125
 126                err = skcipher_walk_done(&w, avail);
 127        }
 128
 129        return err;
 130}
 131
 132static int xts_xor_tweak_pre(struct skcipher_request *req, bool enc)
 133{
 134        return xts_xor_tweak(req, false, enc);
 135}
 136
 137static int xts_xor_tweak_post(struct skcipher_request *req, bool enc)
 138{
 139        return xts_xor_tweak(req, true, enc);
 140}
 141
 142static void xts_cts_done(struct crypto_async_request *areq, int err)
 143{
 144        struct skcipher_request *req = areq->data;
 145        le128 b;
 146
 147        if (!err) {
 148                struct xts_request_ctx *rctx = skcipher_request_ctx(req);
 149
 150                scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
 151                le128_xor(&b, &rctx->t, &b);
 152                scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 1);
 153        }
 154
 155        skcipher_request_complete(req, err);
 156}
 157
 158static int xts_cts_final(struct skcipher_request *req,
 159                         int (*crypt)(struct skcipher_request *req))
 160{
 161        const struct xts_tfm_ctx *ctx =
 162                crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
 163        int offset = req->cryptlen & ~(XTS_BLOCK_SIZE - 1);
 164        struct xts_request_ctx *rctx = skcipher_request_ctx(req);
 165        struct skcipher_request *subreq = &rctx->subreq;
 166        int tail = req->cryptlen % XTS_BLOCK_SIZE;
 167        le128 b[2];
 168        int err;
 169
 170        rctx->tail = scatterwalk_ffwd(rctx->sg, req->dst,
 171                                      offset - XTS_BLOCK_SIZE);
 172
 173        scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
 174        b[1] = b[0];
 175        scatterwalk_map_and_copy(b, req->src, offset, tail, 0);
 176
 177        le128_xor(b, &rctx->t, b);
 178
 179        scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE + tail, 1);
 180
 181        skcipher_request_set_tfm(subreq, ctx->child);
 182        skcipher_request_set_callback(subreq, req->base.flags, xts_cts_done,
 183                                      req);
 184        skcipher_request_set_crypt(subreq, rctx->tail, rctx->tail,
 185                                   XTS_BLOCK_SIZE, NULL);
 186
 187        err = crypt(subreq);
 188        if (err)
 189                return err;
 190
 191        scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
 192        le128_xor(b, &rctx->t, b);
 193        scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 1);
 194
 195        return 0;
 196}
 197
 198static void xts_encrypt_done(struct crypto_async_request *areq, int err)
 199{
 200        struct skcipher_request *req = areq->data;
 201
 202        if (!err) {
 203                struct xts_request_ctx *rctx = skcipher_request_ctx(req);
 204
 205                rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 206                err = xts_xor_tweak_post(req, true);
 207
 208                if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
 209                        err = xts_cts_final(req, crypto_skcipher_encrypt);
 210                        if (err == -EINPROGRESS)
 211                                return;
 212                }
 213        }
 214
 215        skcipher_request_complete(req, err);
 216}
 217
 218static void xts_decrypt_done(struct crypto_async_request *areq, int err)
 219{
 220        struct skcipher_request *req = areq->data;
 221
 222        if (!err) {
 223                struct xts_request_ctx *rctx = skcipher_request_ctx(req);
 224
 225                rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 226                err = xts_xor_tweak_post(req, false);
 227
 228                if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
 229                        err = xts_cts_final(req, crypto_skcipher_decrypt);
 230                        if (err == -EINPROGRESS)
 231                                return;
 232                }
 233        }
 234
 235        skcipher_request_complete(req, err);
 236}
 237
 238static int xts_init_crypt(struct skcipher_request *req,
 239                          crypto_completion_t compl)
 240{
 241        const struct xts_tfm_ctx *ctx =
 242                crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
 243        struct xts_request_ctx *rctx = skcipher_request_ctx(req);
 244        struct skcipher_request *subreq = &rctx->subreq;
 245
 246        if (req->cryptlen < XTS_BLOCK_SIZE)
 247                return -EINVAL;
 248
 249        skcipher_request_set_tfm(subreq, ctx->child);
 250        skcipher_request_set_callback(subreq, req->base.flags, compl, req);
 251        skcipher_request_set_crypt(subreq, req->dst, req->dst,
 252                                   req->cryptlen & ~(XTS_BLOCK_SIZE - 1), NULL);
 253
 254        /* calculate first value of T */
 255        crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv);
 256
 257        return 0;
 258}
 259
 260static int xts_encrypt(struct skcipher_request *req)
 261{
 262        struct xts_request_ctx *rctx = skcipher_request_ctx(req);
 263        struct skcipher_request *subreq = &rctx->subreq;
 264        int err;
 265
 266        err = xts_init_crypt(req, xts_encrypt_done) ?:
 267              xts_xor_tweak_pre(req, true) ?:
 268              crypto_skcipher_encrypt(subreq) ?:
 269              xts_xor_tweak_post(req, true);
 270
 271        if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
 272                return err;
 273
 274        return xts_cts_final(req, crypto_skcipher_encrypt);
 275}
 276
 277static int xts_decrypt(struct skcipher_request *req)
 278{
 279        struct xts_request_ctx *rctx = skcipher_request_ctx(req);
 280        struct skcipher_request *subreq = &rctx->subreq;
 281        int err;
 282
 283        err = xts_init_crypt(req, xts_decrypt_done) ?:
 284              xts_xor_tweak_pre(req, false) ?:
 285              crypto_skcipher_decrypt(subreq) ?:
 286              xts_xor_tweak_post(req, false);
 287
 288        if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
 289                return err;
 290
 291        return xts_cts_final(req, crypto_skcipher_decrypt);
 292}
 293
 294static int xts_init_tfm(struct crypto_skcipher *tfm)
 295{
 296        struct skcipher_instance *inst = skcipher_alg_instance(tfm);
 297        struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
 298        struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
 299        struct crypto_skcipher *child;
 300        struct crypto_cipher *tweak;
 301
 302        child = crypto_spawn_skcipher(&ictx->spawn);
 303        if (IS_ERR(child))
 304                return PTR_ERR(child);
 305
 306        ctx->child = child;
 307
 308        tweak = crypto_alloc_cipher(ictx->name, 0, 0);
 309        if (IS_ERR(tweak)) {
 310                crypto_free_skcipher(ctx->child);
 311                return PTR_ERR(tweak);
 312        }
 313
 314        ctx->tweak = tweak;
 315
 316        crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) +
 317                                         sizeof(struct xts_request_ctx));
 318
 319        return 0;
 320}
 321
 322static void xts_exit_tfm(struct crypto_skcipher *tfm)
 323{
 324        struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
 325
 326        crypto_free_skcipher(ctx->child);
 327        crypto_free_cipher(ctx->tweak);
 328}
 329
 330static void xts_free_instance(struct skcipher_instance *inst)
 331{
 332        struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
 333
 334        crypto_drop_skcipher(&ictx->spawn);
 335        kfree(inst);
 336}
 337
 338static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
 339{
 340        struct skcipher_instance *inst;
 341        struct xts_instance_ctx *ctx;
 342        struct skcipher_alg *alg;
 343        const char *cipher_name;
 344        u32 mask;
 345        int err;
 346
 347        err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
 348        if (err)
 349                return err;
 350
 351        cipher_name = crypto_attr_alg_name(tb[1]);
 352        if (IS_ERR(cipher_name))
 353                return PTR_ERR(cipher_name);
 354
 355        inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
 356        if (!inst)
 357                return -ENOMEM;
 358
 359        ctx = skcipher_instance_ctx(inst);
 360
 361        err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
 362                                   cipher_name, 0, mask);
 363        if (err == -ENOENT) {
 364                err = -ENAMETOOLONG;
 365                if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
 366                             cipher_name) >= CRYPTO_MAX_ALG_NAME)
 367                        goto err_free_inst;
 368
 369                err = crypto_grab_skcipher(&ctx->spawn,
 370                                           skcipher_crypto_instance(inst),
 371                                           ctx->name, 0, mask);
 372        }
 373
 374        if (err)
 375                goto err_free_inst;
 376
 377        alg = crypto_skcipher_spawn_alg(&ctx->spawn);
 378
 379        err = -EINVAL;
 380        if (alg->base.cra_blocksize != XTS_BLOCK_SIZE)
 381                goto err_free_inst;
 382
 383        if (crypto_skcipher_alg_ivsize(alg))
 384                goto err_free_inst;
 385
 386        err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts",
 387                                  &alg->base);
 388        if (err)
 389                goto err_free_inst;
 390
 391        err = -EINVAL;
 392        cipher_name = alg->base.cra_name;
 393
 394        /* Alas we screwed up the naming so we have to mangle the
 395         * cipher name.
 396         */
 397        if (!strncmp(cipher_name, "ecb(", 4)) {
 398                unsigned len;
 399
 400                len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
 401                if (len < 2 || len >= sizeof(ctx->name))
 402                        goto err_free_inst;
 403
 404                if (ctx->name[len - 1] != ')')
 405                        goto err_free_inst;
 406
 407                ctx->name[len - 1] = 0;
 408
 409                if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
 410                             "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) {
 411                        err = -ENAMETOOLONG;
 412                        goto err_free_inst;
 413                }
 414        } else
 415                goto err_free_inst;
 416
 417        inst->alg.base.cra_priority = alg->base.cra_priority;
 418        inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE;
 419        inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
 420                                       (__alignof__(u64) - 1);
 421
 422        inst->alg.ivsize = XTS_BLOCK_SIZE;
 423        inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2;
 424        inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2;
 425
 426        inst->alg.base.cra_ctxsize = sizeof(struct xts_tfm_ctx);
 427
 428        inst->alg.init = xts_init_tfm;
 429        inst->alg.exit = xts_exit_tfm;
 430
 431        inst->alg.setkey = xts_setkey;
 432        inst->alg.encrypt = xts_encrypt;
 433        inst->alg.decrypt = xts_decrypt;
 434
 435        inst->free = xts_free_instance;
 436
 437        err = skcipher_register_instance(tmpl, inst);
 438        if (err) {
 439err_free_inst:
 440                xts_free_instance(inst);
 441        }
 442        return err;
 443}
 444
 445static struct crypto_template xts_tmpl = {
 446        .name = "xts",
 447        .create = xts_create,
 448        .module = THIS_MODULE,
 449};
 450
 451static int __init xts_module_init(void)
 452{
 453        return crypto_register_template(&xts_tmpl);
 454}
 455
 456static void __exit xts_module_exit(void)
 457{
 458        crypto_unregister_template(&xts_tmpl);
 459}
 460
 461subsys_initcall(xts_module_init);
 462module_exit(xts_module_exit);
 463
 464MODULE_LICENSE("GPL");
 465MODULE_DESCRIPTION("XTS block cipher mode");
 466MODULE_ALIAS_CRYPTO("xts");
 467