linux/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * AMD Cryptographic Coprocessor (CCP) AES CMAC crypto API support
   4 *
   5 * Copyright (C) 2013,2018 Advanced Micro Devices, Inc.
   6 *
   7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
   8 */
   9
  10#include <linux/module.h>
  11#include <linux/sched.h>
  12#include <linux/delay.h>
  13#include <linux/scatterlist.h>
  14#include <linux/crypto.h>
  15#include <crypto/algapi.h>
  16#include <crypto/aes.h>
  17#include <crypto/hash.h>
  18#include <crypto/internal/hash.h>
  19#include <crypto/scatterwalk.h>
  20
  21#include "ccp-crypto.h"
  22
  23static int ccp_aes_cmac_complete(struct crypto_async_request *async_req,
  24                                 int ret)
  25{
  26        struct ahash_request *req = ahash_request_cast(async_req);
  27        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  28        struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
  29        unsigned int digest_size = crypto_ahash_digestsize(tfm);
  30
  31        if (ret)
  32                goto e_free;
  33
  34        if (rctx->hash_rem) {
  35                /* Save remaining data to buffer */
  36                unsigned int offset = rctx->nbytes - rctx->hash_rem;
  37
  38                scatterwalk_map_and_copy(rctx->buf, rctx->src,
  39                                         offset, rctx->hash_rem, 0);
  40                rctx->buf_count = rctx->hash_rem;
  41        } else {
  42                rctx->buf_count = 0;
  43        }
  44
  45        /* Update result area if supplied */
  46        if (req->result && rctx->final)
  47                memcpy(req->result, rctx->iv, digest_size);
  48
  49e_free:
  50        sg_free_table(&rctx->data_sg);
  51
  52        return ret;
  53}
  54
  55static int ccp_do_cmac_update(struct ahash_request *req, unsigned int nbytes,
  56                              unsigned int final)
  57{
  58        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  59        struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
  60        struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
  61        struct scatterlist *sg, *cmac_key_sg = NULL;
  62        unsigned int block_size =
  63                crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
  64        unsigned int need_pad, sg_count;
  65        gfp_t gfp;
  66        u64 len;
  67        int ret;
  68
  69        if (!ctx->u.aes.key_len)
  70                return -EINVAL;
  71
  72        if (nbytes)
  73                rctx->null_msg = 0;
  74
  75        len = (u64)rctx->buf_count + (u64)nbytes;
  76
  77        if (!final && (len <= block_size)) {
  78                scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src,
  79                                         0, nbytes, 0);
  80                rctx->buf_count += nbytes;
  81
  82                return 0;
  83        }
  84
  85        rctx->src = req->src;
  86        rctx->nbytes = nbytes;
  87
  88        rctx->final = final;
  89        rctx->hash_rem = final ? 0 : len & (block_size - 1);
  90        rctx->hash_cnt = len - rctx->hash_rem;
  91        if (!final && !rctx->hash_rem) {
  92                /* CCP can't do zero length final, so keep some data around */
  93                rctx->hash_cnt -= block_size;
  94                rctx->hash_rem = block_size;
  95        }
  96
  97        if (final && (rctx->null_msg || (len & (block_size - 1))))
  98                need_pad = 1;
  99        else
 100                need_pad = 0;
 101
 102        sg_init_one(&rctx->iv_sg, rctx->iv, sizeof(rctx->iv));
 103
 104        /* Build the data scatterlist table - allocate enough entries for all
 105         * possible data pieces (buffer, input data, padding)
 106         */
 107        sg_count = (nbytes) ? sg_nents(req->src) + 2 : 2;
 108        gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
 109                GFP_KERNEL : GFP_ATOMIC;
 110        ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp);
 111        if (ret)
 112                return ret;
 113
 114        sg = NULL;
 115        if (rctx->buf_count) {
 116                sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count);
 117                sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg);
 118                if (!sg) {
 119                        ret = -EINVAL;
 120                        goto e_free;
 121                }
 122        }
 123
 124        if (nbytes) {
 125                sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src);
 126                if (!sg) {
 127                        ret = -EINVAL;
 128                        goto e_free;
 129                }
 130        }
 131
 132        if (need_pad) {
 133                int pad_length = block_size - (len & (block_size - 1));
 134
 135                rctx->hash_cnt += pad_length;
 136
 137                memset(rctx->pad, 0, sizeof(rctx->pad));
 138                rctx->pad[0] = 0x80;
 139                sg_init_one(&rctx->pad_sg, rctx->pad, pad_length);
 140                sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->pad_sg);
 141                if (!sg) {
 142                        ret = -EINVAL;
 143                        goto e_free;
 144                }
 145        }
 146        if (sg) {
 147                sg_mark_end(sg);
 148                sg = rctx->data_sg.sgl;
 149        }
 150
 151        /* Initialize the K1/K2 scatterlist */
 152        if (final)
 153                cmac_key_sg = (need_pad) ? &ctx->u.aes.k2_sg
 154                                         : &ctx->u.aes.k1_sg;
 155
 156        memset(&rctx->cmd, 0, sizeof(rctx->cmd));
 157        INIT_LIST_HEAD(&rctx->cmd.entry);
 158        rctx->cmd.engine = CCP_ENGINE_AES;
 159        rctx->cmd.u.aes.type = ctx->u.aes.type;
 160        rctx->cmd.u.aes.mode = ctx->u.aes.mode;
 161        rctx->cmd.u.aes.action = CCP_AES_ACTION_ENCRYPT;
 162        rctx->cmd.u.aes.key = &ctx->u.aes.key_sg;
 163        rctx->cmd.u.aes.key_len = ctx->u.aes.key_len;
 164        rctx->cmd.u.aes.iv = &rctx->iv_sg;
 165        rctx->cmd.u.aes.iv_len = AES_BLOCK_SIZE;
 166        rctx->cmd.u.aes.src = sg;
 167        rctx->cmd.u.aes.src_len = rctx->hash_cnt;
 168        rctx->cmd.u.aes.dst = NULL;
 169        rctx->cmd.u.aes.cmac_key = cmac_key_sg;
 170        rctx->cmd.u.aes.cmac_key_len = ctx->u.aes.kn_len;
 171        rctx->cmd.u.aes.cmac_final = final;
 172
 173        ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
 174
 175        return ret;
 176
 177e_free:
 178        sg_free_table(&rctx->data_sg);
 179
 180        return ret;
 181}
 182
 183static int ccp_aes_cmac_init(struct ahash_request *req)
 184{
 185        struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
 186
 187        memset(rctx, 0, sizeof(*rctx));
 188
 189        rctx->null_msg = 1;
 190
 191        return 0;
 192}
 193
 194static int ccp_aes_cmac_update(struct ahash_request *req)
 195{
 196        return ccp_do_cmac_update(req, req->nbytes, 0);
 197}
 198
 199static int ccp_aes_cmac_final(struct ahash_request *req)
 200{
 201        return ccp_do_cmac_update(req, 0, 1);
 202}
 203
 204static int ccp_aes_cmac_finup(struct ahash_request *req)
 205{
 206        return ccp_do_cmac_update(req, req->nbytes, 1);
 207}
 208
 209static int ccp_aes_cmac_digest(struct ahash_request *req)
 210{
 211        int ret;
 212
 213        ret = ccp_aes_cmac_init(req);
 214        if (ret)
 215                return ret;
 216
 217        return ccp_aes_cmac_finup(req);
 218}
 219
 220static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
 221{
 222        struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
 223        struct ccp_aes_cmac_exp_ctx state;
 224
 225        /* Don't let anything leak to 'out' */
 226        memset(&state, 0, sizeof(state));
 227
 228        state.null_msg = rctx->null_msg;
 229        memcpy(state.iv, rctx->iv, sizeof(state.iv));
 230        state.buf_count = rctx->buf_count;
 231        memcpy(state.buf, rctx->buf, sizeof(state.buf));
 232
 233        /* 'out' may not be aligned so memcpy from local variable */
 234        memcpy(out, &state, sizeof(state));
 235
 236        return 0;
 237}
 238
 239static int ccp_aes_cmac_import(struct ahash_request *req, const void *in)
 240{
 241        struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
 242        struct ccp_aes_cmac_exp_ctx state;
 243
 244        /* 'in' may not be aligned so memcpy to local variable */
 245        memcpy(&state, in, sizeof(state));
 246
 247        memset(rctx, 0, sizeof(*rctx));
 248        rctx->null_msg = state.null_msg;
 249        memcpy(rctx->iv, state.iv, sizeof(rctx->iv));
 250        rctx->buf_count = state.buf_count;
 251        memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
 252
 253        return 0;
 254}
 255
 256static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
 257                               unsigned int key_len)
 258{
 259        struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
 260        struct ccp_crypto_ahash_alg *alg =
 261                ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm));
 262        u64 k0_hi, k0_lo, k1_hi, k1_lo, k2_hi, k2_lo;
 263        u64 rb_hi = 0x00, rb_lo = 0x87;
 264        struct crypto_aes_ctx aes;
 265        __be64 *gk;
 266        int ret;
 267
 268        switch (key_len) {
 269        case AES_KEYSIZE_128:
 270                ctx->u.aes.type = CCP_AES_TYPE_128;
 271                break;
 272        case AES_KEYSIZE_192:
 273                ctx->u.aes.type = CCP_AES_TYPE_192;
 274                break;
 275        case AES_KEYSIZE_256:
 276                ctx->u.aes.type = CCP_AES_TYPE_256;
 277                break;
 278        default:
 279                return -EINVAL;
 280        }
 281        ctx->u.aes.mode = alg->mode;
 282
 283        /* Set to zero until complete */
 284        ctx->u.aes.key_len = 0;
 285
 286        /* Set the key for the AES cipher used to generate the keys */
 287        ret = aes_expandkey(&aes, key, key_len);
 288        if (ret)
 289                return ret;
 290
 291        /* Encrypt a block of zeroes - use key area in context */
 292        memset(ctx->u.aes.key, 0, sizeof(ctx->u.aes.key));
 293        aes_encrypt(&aes, ctx->u.aes.key, ctx->u.aes.key);
 294        memzero_explicit(&aes, sizeof(aes));
 295
 296        /* Generate K1 and K2 */
 297        k0_hi = be64_to_cpu(*((__be64 *)ctx->u.aes.key));
 298        k0_lo = be64_to_cpu(*((__be64 *)ctx->u.aes.key + 1));
 299
 300        k1_hi = (k0_hi << 1) | (k0_lo >> 63);
 301        k1_lo = k0_lo << 1;
 302        if (ctx->u.aes.key[0] & 0x80) {
 303                k1_hi ^= rb_hi;
 304                k1_lo ^= rb_lo;
 305        }
 306        gk = (__be64 *)ctx->u.aes.k1;
 307        *gk = cpu_to_be64(k1_hi);
 308        gk++;
 309        *gk = cpu_to_be64(k1_lo);
 310
 311        k2_hi = (k1_hi << 1) | (k1_lo >> 63);
 312        k2_lo = k1_lo << 1;
 313        if (ctx->u.aes.k1[0] & 0x80) {
 314                k2_hi ^= rb_hi;
 315                k2_lo ^= rb_lo;
 316        }
 317        gk = (__be64 *)ctx->u.aes.k2;
 318        *gk = cpu_to_be64(k2_hi);
 319        gk++;
 320        *gk = cpu_to_be64(k2_lo);
 321
 322        ctx->u.aes.kn_len = sizeof(ctx->u.aes.k1);
 323        sg_init_one(&ctx->u.aes.k1_sg, ctx->u.aes.k1, sizeof(ctx->u.aes.k1));
 324        sg_init_one(&ctx->u.aes.k2_sg, ctx->u.aes.k2, sizeof(ctx->u.aes.k2));
 325
 326        /* Save the supplied key */
 327        memset(ctx->u.aes.key, 0, sizeof(ctx->u.aes.key));
 328        memcpy(ctx->u.aes.key, key, key_len);
 329        ctx->u.aes.key_len = key_len;
 330        sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
 331
 332        return ret;
 333}
 334
 335static int ccp_aes_cmac_cra_init(struct crypto_tfm *tfm)
 336{
 337        struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
 338        struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
 339
 340        ctx->complete = ccp_aes_cmac_complete;
 341        ctx->u.aes.key_len = 0;
 342
 343        crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_aes_cmac_req_ctx));
 344
 345        return 0;
 346}
 347
 348int ccp_register_aes_cmac_algs(struct list_head *head)
 349{
 350        struct ccp_crypto_ahash_alg *ccp_alg;
 351        struct ahash_alg *alg;
 352        struct hash_alg_common *halg;
 353        struct crypto_alg *base;
 354        int ret;
 355
 356        ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
 357        if (!ccp_alg)
 358                return -ENOMEM;
 359
 360        INIT_LIST_HEAD(&ccp_alg->entry);
 361        ccp_alg->mode = CCP_AES_MODE_CMAC;
 362
 363        alg = &ccp_alg->alg;
 364        alg->init = ccp_aes_cmac_init;
 365        alg->update = ccp_aes_cmac_update;
 366        alg->final = ccp_aes_cmac_final;
 367        alg->finup = ccp_aes_cmac_finup;
 368        alg->digest = ccp_aes_cmac_digest;
 369        alg->export = ccp_aes_cmac_export;
 370        alg->import = ccp_aes_cmac_import;
 371        alg->setkey = ccp_aes_cmac_setkey;
 372
 373        halg = &alg->halg;
 374        halg->digestsize = AES_BLOCK_SIZE;
 375        halg->statesize = sizeof(struct ccp_aes_cmac_exp_ctx);
 376
 377        base = &halg->base;
 378        snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)");
 379        snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "cmac-aes-ccp");
 380        base->cra_flags = CRYPTO_ALG_ASYNC |
 381                          CRYPTO_ALG_ALLOCATES_MEMORY |
 382                          CRYPTO_ALG_KERN_DRIVER_ONLY |
 383                          CRYPTO_ALG_NEED_FALLBACK;
 384        base->cra_blocksize = AES_BLOCK_SIZE;
 385        base->cra_ctxsize = sizeof(struct ccp_ctx);
 386        base->cra_priority = CCP_CRA_PRIORITY;
 387        base->cra_init = ccp_aes_cmac_cra_init;
 388        base->cra_module = THIS_MODULE;
 389
 390        ret = crypto_register_ahash(alg);
 391        if (ret) {
 392                pr_err("%s ahash algorithm registration error (%d)\n",
 393                       base->cra_name, ret);
 394                kfree(ccp_alg);
 395                return ret;
 396        }
 397
 398        list_add(&ccp_alg->entry, head);
 399
 400        return 0;
 401}
 402