linux/crypto/xts.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* XTS: as defined in IEEE1619/D16
   3 *      http://grouper.ieee.org/groups/1619/email/pdf00086.pdf
   4 *
   5 * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org>
   6 *
   7 * Based on ecb.c
   8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
   9 */
  10#include <crypto/internal/cipher.h>
  11#include <crypto/internal/skcipher.h>
  12#include <crypto/scatterwalk.h>
  13#include <linux/err.h>
  14#include <linux/init.h>
  15#include <linux/kernel.h>
  16#include <linux/module.h>
  17#include <linux/scatterlist.h>
  18#include <linux/slab.h>
  19
  20#include <crypto/xts.h>
  21#include <crypto/b128ops.h>
  22#include <crypto/gf128mul.h>
  23
  24struct xts_tfm_ctx {
  25        struct crypto_skcipher *child;
  26        struct crypto_cipher *tweak;
  27};
  28
  29struct xts_instance_ctx {
  30        struct crypto_skcipher_spawn spawn;
  31        char name[CRYPTO_MAX_ALG_NAME];
  32};
  33
  34struct xts_request_ctx {
  35        le128 t;
  36        struct scatterlist *tail;
  37        struct scatterlist sg[2];
  38        struct skcipher_request subreq;
  39};
  40
  41static int xts_setkey(struct crypto_skcipher *parent, const u8 *key,
  42                      unsigned int keylen)
  43{
  44        struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(parent);
  45        struct crypto_skcipher *child;
  46        struct crypto_cipher *tweak;
  47        int err;
  48
  49        err = xts_verify_key(parent, key, keylen);
  50        if (err)
  51                return err;
  52
  53        keylen /= 2;
  54
  55        /* we need two cipher instances: one to compute the initial 'tweak'
  56         * by encrypting the IV (usually the 'plain' iv) and the other
  57         * one to encrypt and decrypt the data */
  58
  59        /* tweak cipher, uses Key2 i.e. the second half of *key */
  60        tweak = ctx->tweak;
  61        crypto_cipher_clear_flags(tweak, CRYPTO_TFM_REQ_MASK);
  62        crypto_cipher_set_flags(tweak, crypto_skcipher_get_flags(parent) &
  63                                       CRYPTO_TFM_REQ_MASK);
  64        err = crypto_cipher_setkey(tweak, key + keylen, keylen);
  65        if (err)
  66                return err;
  67
  68        /* data cipher, uses Key1 i.e. the first half of *key */
  69        child = ctx->child;
  70        crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
  71        crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
  72                                         CRYPTO_TFM_REQ_MASK);
  73        return crypto_skcipher_setkey(child, key, keylen);
  74}
  75
  76/*
  77 * We compute the tweak masks twice (both before and after the ECB encryption or
  78 * decryption) to avoid having to allocate a temporary buffer and/or make
  79 * mutliple calls to the 'ecb(..)' instance, which usually would be slower than
  80 * just doing the gf128mul_x_ble() calls again.
  81 */
  82static int xts_xor_tweak(struct skcipher_request *req, bool second_pass,
  83                         bool enc)
  84{
  85        struct xts_request_ctx *rctx = skcipher_request_ctx(req);
  86        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  87        const bool cts = (req->cryptlen % XTS_BLOCK_SIZE);
  88        const int bs = XTS_BLOCK_SIZE;
  89        struct skcipher_walk w;
  90        le128 t = rctx->t;
  91        int err;
  92
  93        if (second_pass) {
  94                req = &rctx->subreq;
  95                /* set to our TFM to enforce correct alignment: */
  96                skcipher_request_set_tfm(req, tfm);
  97        }
  98        err = skcipher_walk_virt(&w, req, false);
  99
 100        while (w.nbytes) {
 101                unsigned int avail = w.nbytes;
 102                le128 *wsrc;
 103                le128 *wdst;
 104
 105                wsrc = w.src.virt.addr;
 106                wdst = w.dst.virt.addr;
 107
 108                do {
 109                        if (unlikely(cts) &&
 110                            w.total - w.nbytes + avail < 2 * XTS_BLOCK_SIZE) {
 111                                if (!enc) {
 112                                        if (second_pass)
 113                                                rctx->t = t;
 114                                        gf128mul_x_ble(&t, &t);
 115                                }
 116                                le128_xor(wdst, &t, wsrc);
 117                                if (enc && second_pass)
 118                                        gf128mul_x_ble(&rctx->t, &t);
 119                                skcipher_walk_done(&w, avail - bs);
 120                                return 0;
 121                        }
 122
 123                        le128_xor(wdst++, &t, wsrc++);
 124                        gf128mul_x_ble(&t, &t);
 125                } while ((avail -= bs) >= bs);
 126
 127                err = skcipher_walk_done(&w, avail);
 128        }
 129
 130        return err;
 131}
 132
 133static int xts_xor_tweak_pre(struct skcipher_request *req, bool enc)
 134{
 135        return xts_xor_tweak(req, false, enc);
 136}
 137
 138static int xts_xor_tweak_post(struct skcipher_request *req, bool enc)
 139{
 140        return xts_xor_tweak(req, true, enc);
 141}
 142
 143static void xts_cts_done(struct crypto_async_request *areq, int err)
 144{
 145        struct skcipher_request *req = areq->data;
 146        le128 b;
 147
 148        if (!err) {
 149                struct xts_request_ctx *rctx = skcipher_request_ctx(req);
 150
 151                scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
 152                le128_xor(&b, &rctx->t, &b);
 153                scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 1);
 154        }
 155
 156        skcipher_request_complete(req, err);
 157}
 158
 159static int xts_cts_final(struct skcipher_request *req,
 160                         int (*crypt)(struct skcipher_request *req))
 161{
 162        const struct xts_tfm_ctx *ctx =
 163                crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
 164        int offset = req->cryptlen & ~(XTS_BLOCK_SIZE - 1);
 165        struct xts_request_ctx *rctx = skcipher_request_ctx(req);
 166        struct skcipher_request *subreq = &rctx->subreq;
 167        int tail = req->cryptlen % XTS_BLOCK_SIZE;
 168        le128 b[2];
 169        int err;
 170
 171        rctx->tail = scatterwalk_ffwd(rctx->sg, req->dst,
 172                                      offset - XTS_BLOCK_SIZE);
 173
 174        scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
 175        b[1] = b[0];
 176        scatterwalk_map_and_copy(b, req->src, offset, tail, 0);
 177
 178        le128_xor(b, &rctx->t, b);
 179
 180        scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE + tail, 1);
 181
 182        skcipher_request_set_tfm(subreq, ctx->child);
 183        skcipher_request_set_callback(subreq, req->base.flags, xts_cts_done,
 184                                      req);
 185        skcipher_request_set_crypt(subreq, rctx->tail, rctx->tail,
 186                                   XTS_BLOCK_SIZE, NULL);
 187
 188        err = crypt(subreq);
 189        if (err)
 190                return err;
 191
 192        scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
 193        le128_xor(b, &rctx->t, b);
 194        scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 1);
 195
 196        return 0;
 197}
 198
 199static void xts_encrypt_done(struct crypto_async_request *areq, int err)
 200{
 201        struct skcipher_request *req = areq->data;
 202
 203        if (!err) {
 204                struct xts_request_ctx *rctx = skcipher_request_ctx(req);
 205
 206                rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 207                err = xts_xor_tweak_post(req, true);
 208
 209                if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
 210                        err = xts_cts_final(req, crypto_skcipher_encrypt);
 211                        if (err == -EINPROGRESS)
 212                                return;
 213                }
 214        }
 215
 216        skcipher_request_complete(req, err);
 217}
 218
 219static void xts_decrypt_done(struct crypto_async_request *areq, int err)
 220{
 221        struct skcipher_request *req = areq->data;
 222
 223        if (!err) {
 224                struct xts_request_ctx *rctx = skcipher_request_ctx(req);
 225
 226                rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 227                err = xts_xor_tweak_post(req, false);
 228
 229                if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
 230                        err = xts_cts_final(req, crypto_skcipher_decrypt);
 231                        if (err == -EINPROGRESS)
 232                                return;
 233                }
 234        }
 235
 236        skcipher_request_complete(req, err);
 237}
 238
 239static int xts_init_crypt(struct skcipher_request *req,
 240                          crypto_completion_t compl)
 241{
 242        const struct xts_tfm_ctx *ctx =
 243                crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
 244        struct xts_request_ctx *rctx = skcipher_request_ctx(req);
 245        struct skcipher_request *subreq = &rctx->subreq;
 246
 247        if (req->cryptlen < XTS_BLOCK_SIZE)
 248                return -EINVAL;
 249
 250        skcipher_request_set_tfm(subreq, ctx->child);
 251        skcipher_request_set_callback(subreq, req->base.flags, compl, req);
 252        skcipher_request_set_crypt(subreq, req->dst, req->dst,
 253                                   req->cryptlen & ~(XTS_BLOCK_SIZE - 1), NULL);
 254
 255        /* calculate first value of T */
 256        crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv);
 257
 258        return 0;
 259}
 260
 261static int xts_encrypt(struct skcipher_request *req)
 262{
 263        struct xts_request_ctx *rctx = skcipher_request_ctx(req);
 264        struct skcipher_request *subreq = &rctx->subreq;
 265        int err;
 266
 267        err = xts_init_crypt(req, xts_encrypt_done) ?:
 268              xts_xor_tweak_pre(req, true) ?:
 269              crypto_skcipher_encrypt(subreq) ?:
 270              xts_xor_tweak_post(req, true);
 271
 272        if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
 273                return err;
 274
 275        return xts_cts_final(req, crypto_skcipher_encrypt);
 276}
 277
 278static int xts_decrypt(struct skcipher_request *req)
 279{
 280        struct xts_request_ctx *rctx = skcipher_request_ctx(req);
 281        struct skcipher_request *subreq = &rctx->subreq;
 282        int err;
 283
 284        err = xts_init_crypt(req, xts_decrypt_done) ?:
 285              xts_xor_tweak_pre(req, false) ?:
 286              crypto_skcipher_decrypt(subreq) ?:
 287              xts_xor_tweak_post(req, false);
 288
 289        if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
 290                return err;
 291
 292        return xts_cts_final(req, crypto_skcipher_decrypt);
 293}
 294
 295static int xts_init_tfm(struct crypto_skcipher *tfm)
 296{
 297        struct skcipher_instance *inst = skcipher_alg_instance(tfm);
 298        struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
 299        struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
 300        struct crypto_skcipher *child;
 301        struct crypto_cipher *tweak;
 302
 303        child = crypto_spawn_skcipher(&ictx->spawn);
 304        if (IS_ERR(child))
 305                return PTR_ERR(child);
 306
 307        ctx->child = child;
 308
 309        tweak = crypto_alloc_cipher(ictx->name, 0, 0);
 310        if (IS_ERR(tweak)) {
 311                crypto_free_skcipher(ctx->child);
 312                return PTR_ERR(tweak);
 313        }
 314
 315        ctx->tweak = tweak;
 316
 317        crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) +
 318                                         sizeof(struct xts_request_ctx));
 319
 320        return 0;
 321}
 322
 323static void xts_exit_tfm(struct crypto_skcipher *tfm)
 324{
 325        struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
 326
 327        crypto_free_skcipher(ctx->child);
 328        crypto_free_cipher(ctx->tweak);
 329}
 330
 331static void xts_free_instance(struct skcipher_instance *inst)
 332{
 333        struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
 334
 335        crypto_drop_skcipher(&ictx->spawn);
 336        kfree(inst);
 337}
 338
 339static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
 340{
 341        struct skcipher_instance *inst;
 342        struct xts_instance_ctx *ctx;
 343        struct skcipher_alg *alg;
 344        const char *cipher_name;
 345        u32 mask;
 346        int err;
 347
 348        err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
 349        if (err)
 350                return err;
 351
 352        cipher_name = crypto_attr_alg_name(tb[1]);
 353        if (IS_ERR(cipher_name))
 354                return PTR_ERR(cipher_name);
 355
 356        inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
 357        if (!inst)
 358                return -ENOMEM;
 359
 360        ctx = skcipher_instance_ctx(inst);
 361
 362        err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
 363                                   cipher_name, 0, mask);
 364        if (err == -ENOENT) {
 365                err = -ENAMETOOLONG;
 366                if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
 367                             cipher_name) >= CRYPTO_MAX_ALG_NAME)
 368                        goto err_free_inst;
 369
 370                err = crypto_grab_skcipher(&ctx->spawn,
 371                                           skcipher_crypto_instance(inst),
 372                                           ctx->name, 0, mask);
 373        }
 374
 375        if (err)
 376                goto err_free_inst;
 377
 378        alg = crypto_skcipher_spawn_alg(&ctx->spawn);
 379
 380        err = -EINVAL;
 381        if (alg->base.cra_blocksize != XTS_BLOCK_SIZE)
 382                goto err_free_inst;
 383
 384        if (crypto_skcipher_alg_ivsize(alg))
 385                goto err_free_inst;
 386
 387        err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts",
 388                                  &alg->base);
 389        if (err)
 390                goto err_free_inst;
 391
 392        err = -EINVAL;
 393        cipher_name = alg->base.cra_name;
 394
 395        /* Alas we screwed up the naming so we have to mangle the
 396         * cipher name.
 397         */
 398        if (!strncmp(cipher_name, "ecb(", 4)) {
 399                unsigned len;
 400
 401                len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
 402                if (len < 2 || len >= sizeof(ctx->name))
 403                        goto err_free_inst;
 404
 405                if (ctx->name[len - 1] != ')')
 406                        goto err_free_inst;
 407
 408                ctx->name[len - 1] = 0;
 409
 410                if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
 411                             "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) {
 412                        err = -ENAMETOOLONG;
 413                        goto err_free_inst;
 414                }
 415        } else
 416                goto err_free_inst;
 417
 418        inst->alg.base.cra_priority = alg->base.cra_priority;
 419        inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE;
 420        inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
 421                                       (__alignof__(u64) - 1);
 422
 423        inst->alg.ivsize = XTS_BLOCK_SIZE;
 424        inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2;
 425        inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2;
 426
 427        inst->alg.base.cra_ctxsize = sizeof(struct xts_tfm_ctx);
 428
 429        inst->alg.init = xts_init_tfm;
 430        inst->alg.exit = xts_exit_tfm;
 431
 432        inst->alg.setkey = xts_setkey;
 433        inst->alg.encrypt = xts_encrypt;
 434        inst->alg.decrypt = xts_decrypt;
 435
 436        inst->free = xts_free_instance;
 437
 438        err = skcipher_register_instance(tmpl, inst);
 439        if (err) {
 440err_free_inst:
 441                xts_free_instance(inst);
 442        }
 443        return err;
 444}
 445
 446static struct crypto_template xts_tmpl = {
 447        .name = "xts",
 448        .create = xts_create,
 449        .module = THIS_MODULE,
 450};
 451
 452static int __init xts_module_init(void)
 453{
 454        return crypto_register_template(&xts_tmpl);
 455}
 456
 457static void __exit xts_module_exit(void)
 458{
 459        crypto_unregister_template(&xts_tmpl);
 460}
 461
 462subsys_initcall(xts_module_init);
 463module_exit(xts_module_exit);
 464
 465MODULE_LICENSE("GPL");
 466MODULE_DESCRIPTION("XTS block cipher mode");
 467MODULE_ALIAS_CRYPTO("xts");
 468MODULE_IMPORT_NS(CRYPTO_INTERNAL);
 469