linux/arch/x86/crypto/morus640_glue.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * The MORUS-640 Authenticated-Encryption Algorithm
   4 *   Common x86 SIMD glue skeleton
   5 *
   6 * Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com>
   7 * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
   8 */
   9
  10#include <crypto/internal/aead.h>
  11#include <crypto/internal/skcipher.h>
  12#include <crypto/morus640_glue.h>
  13#include <crypto/scatterwalk.h>
  14#include <linux/err.h>
  15#include <linux/init.h>
  16#include <linux/kernel.h>
  17#include <linux/module.h>
  18#include <linux/scatterlist.h>
  19#include <asm/fpu/api.h>
  20
  21struct morus640_state {
  22        struct morus640_block s[MORUS_STATE_BLOCKS];
  23};
  24
  25struct morus640_ops {
  26        int (*skcipher_walk_init)(struct skcipher_walk *walk,
  27                                  struct aead_request *req, bool atomic);
  28
  29        void (*crypt_blocks)(void *state, const void *src, void *dst,
  30                             unsigned int length);
  31        void (*crypt_tail)(void *state, const void *src, void *dst,
  32                           unsigned int length);
  33};
  34
  35static void crypto_morus640_glue_process_ad(
  36                struct morus640_state *state,
  37                const struct morus640_glue_ops *ops,
  38                struct scatterlist *sg_src, unsigned int assoclen)
  39{
  40        struct scatter_walk walk;
  41        struct morus640_block buf;
  42        unsigned int pos = 0;
  43
  44        scatterwalk_start(&walk, sg_src);
  45        while (assoclen != 0) {
  46                unsigned int size = scatterwalk_clamp(&walk, assoclen);
  47                unsigned int left = size;
  48                void *mapped = scatterwalk_map(&walk);
  49                const u8 *src = (const u8 *)mapped;
  50
  51                if (pos + size >= MORUS640_BLOCK_SIZE) {
  52                        if (pos > 0) {
  53                                unsigned int fill = MORUS640_BLOCK_SIZE - pos;
  54                                memcpy(buf.bytes + pos, src, fill);
  55                                ops->ad(state, buf.bytes, MORUS640_BLOCK_SIZE);
  56                                pos = 0;
  57                                left -= fill;
  58                                src += fill;
  59                        }
  60
  61                        ops->ad(state, src, left);
  62                        src += left & ~(MORUS640_BLOCK_SIZE - 1);
  63                        left &= MORUS640_BLOCK_SIZE - 1;
  64                }
  65
  66                memcpy(buf.bytes + pos, src, left);
  67
  68                pos += left;
  69                assoclen -= size;
  70                scatterwalk_unmap(mapped);
  71                scatterwalk_advance(&walk, size);
  72                scatterwalk_done(&walk, 0, assoclen);
  73        }
  74
  75        if (pos > 0) {
  76                memset(buf.bytes + pos, 0, MORUS640_BLOCK_SIZE - pos);
  77                ops->ad(state, buf.bytes, MORUS640_BLOCK_SIZE);
  78        }
  79}
  80
  81static void crypto_morus640_glue_process_crypt(struct morus640_state *state,
  82                                               struct morus640_ops ops,
  83                                               struct skcipher_walk *walk)
  84{
  85        while (walk->nbytes >= MORUS640_BLOCK_SIZE) {
  86                ops.crypt_blocks(state, walk->src.virt.addr,
  87                                 walk->dst.virt.addr,
  88                                 round_down(walk->nbytes, MORUS640_BLOCK_SIZE));
  89                skcipher_walk_done(walk, walk->nbytes % MORUS640_BLOCK_SIZE);
  90        }
  91
  92        if (walk->nbytes) {
  93                ops.crypt_tail(state, walk->src.virt.addr, walk->dst.virt.addr,
  94                               walk->nbytes);
  95                skcipher_walk_done(walk, 0);
  96        }
  97}
  98
  99int crypto_morus640_glue_setkey(struct crypto_aead *aead, const u8 *key,
 100                                unsigned int keylen)
 101{
 102        struct morus640_ctx *ctx = crypto_aead_ctx(aead);
 103
 104        if (keylen != MORUS640_BLOCK_SIZE) {
 105                crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
 106                return -EINVAL;
 107        }
 108
 109        memcpy(ctx->key.bytes, key, MORUS640_BLOCK_SIZE);
 110        return 0;
 111}
 112EXPORT_SYMBOL_GPL(crypto_morus640_glue_setkey);
 113
 114int crypto_morus640_glue_setauthsize(struct crypto_aead *tfm,
 115                                     unsigned int authsize)
 116{
 117        return (authsize <= MORUS_MAX_AUTH_SIZE) ? 0 : -EINVAL;
 118}
 119EXPORT_SYMBOL_GPL(crypto_morus640_glue_setauthsize);
 120
 121static void crypto_morus640_glue_crypt(struct aead_request *req,
 122                                       struct morus640_ops ops,
 123                                       unsigned int cryptlen,
 124                                       struct morus640_block *tag_xor)
 125{
 126        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 127        struct morus640_ctx *ctx = crypto_aead_ctx(tfm);
 128        struct morus640_state state;
 129        struct skcipher_walk walk;
 130
 131        ops.skcipher_walk_init(&walk, req, true);
 132
 133        kernel_fpu_begin();
 134
 135        ctx->ops->init(&state, &ctx->key, req->iv);
 136        crypto_morus640_glue_process_ad(&state, ctx->ops, req->src, req->assoclen);
 137        crypto_morus640_glue_process_crypt(&state, ops, &walk);
 138        ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen);
 139
 140        kernel_fpu_end();
 141}
 142
 143int crypto_morus640_glue_encrypt(struct aead_request *req)
 144{
 145        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 146        struct morus640_ctx *ctx = crypto_aead_ctx(tfm);
 147        struct morus640_ops OPS = {
 148                .skcipher_walk_init = skcipher_walk_aead_encrypt,
 149                .crypt_blocks = ctx->ops->enc,
 150                .crypt_tail = ctx->ops->enc_tail,
 151        };
 152
 153        struct morus640_block tag = {};
 154        unsigned int authsize = crypto_aead_authsize(tfm);
 155        unsigned int cryptlen = req->cryptlen;
 156
 157        crypto_morus640_glue_crypt(req, OPS, cryptlen, &tag);
 158
 159        scatterwalk_map_and_copy(tag.bytes, req->dst,
 160                                 req->assoclen + cryptlen, authsize, 1);
 161        return 0;
 162}
 163EXPORT_SYMBOL_GPL(crypto_morus640_glue_encrypt);
 164
 165int crypto_morus640_glue_decrypt(struct aead_request *req)
 166{
 167        static const u8 zeros[MORUS640_BLOCK_SIZE] = {};
 168
 169        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 170        struct morus640_ctx *ctx = crypto_aead_ctx(tfm);
 171        struct morus640_ops OPS = {
 172                .skcipher_walk_init = skcipher_walk_aead_decrypt,
 173                .crypt_blocks = ctx->ops->dec,
 174                .crypt_tail = ctx->ops->dec_tail,
 175        };
 176
 177        struct morus640_block tag;
 178        unsigned int authsize = crypto_aead_authsize(tfm);
 179        unsigned int cryptlen = req->cryptlen - authsize;
 180
 181        scatterwalk_map_and_copy(tag.bytes, req->src,
 182                                 req->assoclen + cryptlen, authsize, 0);
 183
 184        crypto_morus640_glue_crypt(req, OPS, cryptlen, &tag);
 185
 186        return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0;
 187}
 188EXPORT_SYMBOL_GPL(crypto_morus640_glue_decrypt);
 189
 190void crypto_morus640_glue_init_ops(struct crypto_aead *aead,
 191                                   const struct morus640_glue_ops *ops)
 192{
 193        struct morus640_ctx *ctx = crypto_aead_ctx(aead);
 194        ctx->ops = ops;
 195}
 196EXPORT_SYMBOL_GPL(crypto_morus640_glue_init_ops);
 197
 198MODULE_LICENSE("GPL");
 199MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
 200MODULE_DESCRIPTION("MORUS-640 AEAD mode -- glue for x86 optimizations");
 201