linux/arch/x86/crypto/glue_helper.c
<<
>>
Prefs
   1/*
   2 * Shared glue code for 128bit block ciphers
   3 *
   4 * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
   5 *
   6 * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
   7 *   Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
   8 * CTR part based on code (crypto/ctr.c) by:
   9 *   (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License as published by
  13 * the Free Software Foundation; either version 2 of the License, or
  14 * (at your option) any later version.
  15 *
  16 * This program is distributed in the hope that it will be useful,
  17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  19 * GNU General Public License for more details.
  20 *
  21 * You should have received a copy of the GNU General Public License
  22 * along with this program; if not, write to the Free Software
  23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
  24 * USA
  25 *
  26 */
  27
  28#include <linux/module.h>
  29#include <crypto/b128ops.h>
  30#include <crypto/lrw.h>
  31#include <crypto/xts.h>
  32#include <asm/crypto/glue_helper.h>
  33#include <crypto/scatterwalk.h>
  34
  35static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
  36                                   struct blkcipher_desc *desc,
  37                                   struct blkcipher_walk *walk)
  38{
  39        void *ctx = crypto_blkcipher_ctx(desc->tfm);
  40        const unsigned int bsize = 128 / 8;
  41        unsigned int nbytes, i, func_bytes;
  42        bool fpu_enabled = false;
  43        int err;
  44
  45        err = blkcipher_walk_virt(desc, walk);
  46
  47        while ((nbytes = walk->nbytes)) {
  48                u8 *wsrc = walk->src.virt.addr;
  49                u8 *wdst = walk->dst.virt.addr;
  50
  51                fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
  52                                             desc, fpu_enabled, nbytes);
  53
  54                for (i = 0; i < gctx->num_funcs; i++) {
  55                        func_bytes = bsize * gctx->funcs[i].num_blocks;
  56
  57                        /* Process multi-block batch */
  58                        if (nbytes >= func_bytes) {
  59                                do {
  60                                        gctx->funcs[i].fn_u.ecb(ctx, wdst,
  61                                                                wsrc);
  62
  63                                        wsrc += func_bytes;
  64                                        wdst += func_bytes;
  65                                        nbytes -= func_bytes;
  66                                } while (nbytes >= func_bytes);
  67
  68                                if (nbytes < bsize)
  69                                        goto done;
  70                        }
  71                }
  72
  73done:
  74                err = blkcipher_walk_done(desc, walk, nbytes);
  75        }
  76
  77        glue_fpu_end(fpu_enabled);
  78        return err;
  79}
  80
  81int glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
  82                          struct blkcipher_desc *desc, struct scatterlist *dst,
  83                          struct scatterlist *src, unsigned int nbytes)
  84{
  85        struct blkcipher_walk walk;
  86
  87        blkcipher_walk_init(&walk, dst, src, nbytes);
  88        return __glue_ecb_crypt_128bit(gctx, desc, &walk);
  89}
  90EXPORT_SYMBOL_GPL(glue_ecb_crypt_128bit);
  91
  92static unsigned int __glue_cbc_encrypt_128bit(const common_glue_func_t fn,
  93                                              struct blkcipher_desc *desc,
  94                                              struct blkcipher_walk *walk)
  95{
  96        void *ctx = crypto_blkcipher_ctx(desc->tfm);
  97        const unsigned int bsize = 128 / 8;
  98        unsigned int nbytes = walk->nbytes;
  99        u128 *src = (u128 *)walk->src.virt.addr;
 100        u128 *dst = (u128 *)walk->dst.virt.addr;
 101        u128 *iv = (u128 *)walk->iv;
 102
 103        do {
 104                u128_xor(dst, src, iv);
 105                fn(ctx, (u8 *)dst, (u8 *)dst);
 106                iv = dst;
 107
 108                src += 1;
 109                dst += 1;
 110                nbytes -= bsize;
 111        } while (nbytes >= bsize);
 112
 113        *(u128 *)walk->iv = *iv;
 114        return nbytes;
 115}
 116
 117int glue_cbc_encrypt_128bit(const common_glue_func_t fn,
 118                            struct blkcipher_desc *desc,
 119                            struct scatterlist *dst,
 120                            struct scatterlist *src, unsigned int nbytes)
 121{
 122        struct blkcipher_walk walk;
 123        int err;
 124
 125        blkcipher_walk_init(&walk, dst, src, nbytes);
 126        err = blkcipher_walk_virt(desc, &walk);
 127
 128        while ((nbytes = walk.nbytes)) {
 129                nbytes = __glue_cbc_encrypt_128bit(fn, desc, &walk);
 130                err = blkcipher_walk_done(desc, &walk, nbytes);
 131        }
 132
 133        return err;
 134}
 135EXPORT_SYMBOL_GPL(glue_cbc_encrypt_128bit);
 136
 137static unsigned int
 138__glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
 139                          struct blkcipher_desc *desc,
 140                          struct blkcipher_walk *walk)
 141{
 142        void *ctx = crypto_blkcipher_ctx(desc->tfm);
 143        const unsigned int bsize = 128 / 8;
 144        unsigned int nbytes = walk->nbytes;
 145        u128 *src = (u128 *)walk->src.virt.addr;
 146        u128 *dst = (u128 *)walk->dst.virt.addr;
 147        u128 last_iv;
 148        unsigned int num_blocks, func_bytes;
 149        unsigned int i;
 150
 151        /* Start of the last block. */
 152        src += nbytes / bsize - 1;
 153        dst += nbytes / bsize - 1;
 154
 155        last_iv = *src;
 156
 157        for (i = 0; i < gctx->num_funcs; i++) {
 158                num_blocks = gctx->funcs[i].num_blocks;
 159                func_bytes = bsize * num_blocks;
 160
 161                /* Process multi-block batch */
 162                if (nbytes >= func_bytes) {
 163                        do {
 164                                nbytes -= func_bytes - bsize;
 165                                src -= num_blocks - 1;
 166                                dst -= num_blocks - 1;
 167
 168                                gctx->funcs[i].fn_u.cbc(ctx, dst, src);
 169
 170                                nbytes -= bsize;
 171                                if (nbytes < bsize)
 172                                        goto done;
 173
 174                                u128_xor(dst, dst, src - 1);
 175                                src -= 1;
 176                                dst -= 1;
 177                        } while (nbytes >= func_bytes);
 178
 179                        if (nbytes < bsize)
 180                                goto done;
 181                }
 182        }
 183
 184done:
 185        u128_xor(dst, dst, (u128 *)walk->iv);
 186        *(u128 *)walk->iv = last_iv;
 187
 188        return nbytes;
 189}
 190
 191int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
 192                            struct blkcipher_desc *desc,
 193                            struct scatterlist *dst,
 194                            struct scatterlist *src, unsigned int nbytes)
 195{
 196        const unsigned int bsize = 128 / 8;
 197        bool fpu_enabled = false;
 198        struct blkcipher_walk walk;
 199        int err;
 200
 201        blkcipher_walk_init(&walk, dst, src, nbytes);
 202        err = blkcipher_walk_virt(desc, &walk);
 203
 204        while ((nbytes = walk.nbytes)) {
 205                fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
 206                                             desc, fpu_enabled, nbytes);
 207                nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk);
 208                err = blkcipher_walk_done(desc, &walk, nbytes);
 209        }
 210
 211        glue_fpu_end(fpu_enabled);
 212        return err;
 213}
 214EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit);
 215
 216static void glue_ctr_crypt_final_128bit(const common_glue_ctr_func_t fn_ctr,
 217                                        struct blkcipher_desc *desc,
 218                                        struct blkcipher_walk *walk)
 219{
 220        void *ctx = crypto_blkcipher_ctx(desc->tfm);
 221        u8 *src = (u8 *)walk->src.virt.addr;
 222        u8 *dst = (u8 *)walk->dst.virt.addr;
 223        unsigned int nbytes = walk->nbytes;
 224        le128 ctrblk;
 225        u128 tmp;
 226
 227        be128_to_le128(&ctrblk, (be128 *)walk->iv);
 228
 229        memcpy(&tmp, src, nbytes);
 230        fn_ctr(ctx, &tmp, &tmp, &ctrblk);
 231        memcpy(dst, &tmp, nbytes);
 232
 233        le128_to_be128((be128 *)walk->iv, &ctrblk);
 234}
 235EXPORT_SYMBOL_GPL(glue_ctr_crypt_final_128bit);
 236
 237static unsigned int __glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
 238                                            struct blkcipher_desc *desc,
 239                                            struct blkcipher_walk *walk)
 240{
 241        const unsigned int bsize = 128 / 8;
 242        void *ctx = crypto_blkcipher_ctx(desc->tfm);
 243        unsigned int nbytes = walk->nbytes;
 244        u128 *src = (u128 *)walk->src.virt.addr;
 245        u128 *dst = (u128 *)walk->dst.virt.addr;
 246        le128 ctrblk;
 247        unsigned int num_blocks, func_bytes;
 248        unsigned int i;
 249
 250        be128_to_le128(&ctrblk, (be128 *)walk->iv);
 251
 252        /* Process multi-block batch */
 253        for (i = 0; i < gctx->num_funcs; i++) {
 254                num_blocks = gctx->funcs[i].num_blocks;
 255                func_bytes = bsize * num_blocks;
 256
 257                if (nbytes >= func_bytes) {
 258                        do {
 259                                gctx->funcs[i].fn_u.ctr(ctx, dst, src, &ctrblk);
 260
 261                                src += num_blocks;
 262                                dst += num_blocks;
 263                                nbytes -= func_bytes;
 264                        } while (nbytes >= func_bytes);
 265
 266                        if (nbytes < bsize)
 267                                goto done;
 268                }
 269        }
 270
 271done:
 272        le128_to_be128((be128 *)walk->iv, &ctrblk);
 273        return nbytes;
 274}
 275
 276int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
 277                          struct blkcipher_desc *desc, struct scatterlist *dst,
 278                          struct scatterlist *src, unsigned int nbytes)
 279{
 280        const unsigned int bsize = 128 / 8;
 281        bool fpu_enabled = false;
 282        struct blkcipher_walk walk;
 283        int err;
 284
 285        blkcipher_walk_init(&walk, dst, src, nbytes);
 286        err = blkcipher_walk_virt_block(desc, &walk, bsize);
 287
 288        while ((nbytes = walk.nbytes) >= bsize) {
 289                fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
 290                                             desc, fpu_enabled, nbytes);
 291                nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk);
 292                err = blkcipher_walk_done(desc, &walk, nbytes);
 293        }
 294
 295        glue_fpu_end(fpu_enabled);
 296
 297        if (walk.nbytes) {
 298                glue_ctr_crypt_final_128bit(
 299                        gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk);
 300                err = blkcipher_walk_done(desc, &walk, 0);
 301        }
 302
 303        return err;
 304}
 305EXPORT_SYMBOL_GPL(glue_ctr_crypt_128bit);
 306
 307static unsigned int __glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
 308                                            void *ctx,
 309                                            struct blkcipher_desc *desc,
 310                                            struct blkcipher_walk *walk)
 311{
 312        const unsigned int bsize = 128 / 8;
 313        unsigned int nbytes = walk->nbytes;
 314        u128 *src = (u128 *)walk->src.virt.addr;
 315        u128 *dst = (u128 *)walk->dst.virt.addr;
 316        unsigned int num_blocks, func_bytes;
 317        unsigned int i;
 318
 319        /* Process multi-block batch */
 320        for (i = 0; i < gctx->num_funcs; i++) {
 321                num_blocks = gctx->funcs[i].num_blocks;
 322                func_bytes = bsize * num_blocks;
 323
 324                if (nbytes >= func_bytes) {
 325                        do {
 326                                gctx->funcs[i].fn_u.xts(ctx, dst, src,
 327                                                        (le128 *)walk->iv);
 328
 329                                src += num_blocks;
 330                                dst += num_blocks;
 331                                nbytes -= func_bytes;
 332                        } while (nbytes >= func_bytes);
 333
 334                        if (nbytes < bsize)
 335                                goto done;
 336                }
 337        }
 338
 339done:
 340        return nbytes;
 341}
 342
 343/* for implementations implementing faster XTS IV generator */
 344int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
 345                          struct blkcipher_desc *desc, struct scatterlist *dst,
 346                          struct scatterlist *src, unsigned int nbytes,
 347                          void (*tweak_fn)(void *ctx, u8 *dst, const u8 *src),
 348                          void *tweak_ctx, void *crypt_ctx)
 349{
 350        const unsigned int bsize = 128 / 8;
 351        bool fpu_enabled = false;
 352        struct blkcipher_walk walk;
 353        int err;
 354
 355        blkcipher_walk_init(&walk, dst, src, nbytes);
 356
 357        err = blkcipher_walk_virt(desc, &walk);
 358        nbytes = walk.nbytes;
 359        if (!nbytes)
 360                return err;
 361
 362        /* set minimum length to bsize, for tweak_fn */
 363        fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
 364                                     desc, fpu_enabled,
 365                                     nbytes < bsize ? bsize : nbytes);
 366
 367        /* calculate first value of T */
 368        tweak_fn(tweak_ctx, walk.iv, walk.iv);
 369
 370        while (nbytes) {
 371                nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk);
 372
 373                err = blkcipher_walk_done(desc, &walk, nbytes);
 374                nbytes = walk.nbytes;
 375        }
 376
 377        glue_fpu_end(fpu_enabled);
 378
 379        return err;
 380}
 381EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit);
 382
 383void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, le128 *iv,
 384                               common_glue_func_t fn)
 385{
 386        le128 ivblk = *iv;
 387
 388        /* generate next IV */
 389        le128_gf128mul_x_ble(iv, &ivblk);
 390
 391        /* CC <- T xor C */
 392        u128_xor(dst, src, (u128 *)&ivblk);
 393
 394        /* PP <- D(Key2,CC) */
 395        fn(ctx, (u8 *)dst, (u8 *)dst);
 396
 397        /* P <- T xor PP */
 398        u128_xor(dst, dst, (u128 *)&ivblk);
 399}
 400EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit_one);
 401
 402MODULE_LICENSE("GPL");
 403