linux/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * AMD Cryptographic Coprocessor (CCP) AES CMAC crypto API support
   4 *
   5 * Copyright (C) 2013,2018 Advanced Micro Devices, Inc.
   6 *
   7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
   8 */
   9
  10#include <linux/module.h>
  11#include <linux/sched.h>
  12#include <linux/delay.h>
  13#include <linux/scatterlist.h>
  14#include <linux/crypto.h>
  15#include <crypto/algapi.h>
  16#include <crypto/aes.h>
  17#include <crypto/hash.h>
  18#include <crypto/internal/hash.h>
  19#include <crypto/scatterwalk.h>
  20
  21#include "ccp-crypto.h"
  22
  23static int ccp_aes_cmac_complete(struct crypto_async_request *async_req,
  24                                 int ret)
  25{
  26        struct ahash_request *req = ahash_request_cast(async_req);
  27        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  28        struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
  29        unsigned int digest_size = crypto_ahash_digestsize(tfm);
  30
  31        if (ret)
  32                goto e_free;
  33
  34        if (rctx->hash_rem) {
  35                /* Save remaining data to buffer */
  36                unsigned int offset = rctx->nbytes - rctx->hash_rem;
  37
  38                scatterwalk_map_and_copy(rctx->buf, rctx->src,
  39                                         offset, rctx->hash_rem, 0);
  40                rctx->buf_count = rctx->hash_rem;
  41        } else {
  42                rctx->buf_count = 0;
  43        }
  44
  45        /* Update result area if supplied */
  46        if (req->result && rctx->final)
  47                memcpy(req->result, rctx->iv, digest_size);
  48
  49e_free:
  50        sg_free_table(&rctx->data_sg);
  51
  52        return ret;
  53}
  54
  55static int ccp_do_cmac_update(struct ahash_request *req, unsigned int nbytes,
  56                              unsigned int final)
  57{
  58        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  59        struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
  60        struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
  61        struct scatterlist *sg, *cmac_key_sg = NULL;
  62        unsigned int block_size =
  63                crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
  64        unsigned int need_pad, sg_count;
  65        gfp_t gfp;
  66        u64 len;
  67        int ret;
  68
  69        if (!ctx->u.aes.key_len)
  70                return -EINVAL;
  71
  72        if (nbytes)
  73                rctx->null_msg = 0;
  74
  75        len = (u64)rctx->buf_count + (u64)nbytes;
  76
  77        if (!final && (len <= block_size)) {
  78                scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src,
  79                                         0, nbytes, 0);
  80                rctx->buf_count += nbytes;
  81
  82                return 0;
  83        }
  84
  85        rctx->src = req->src;
  86        rctx->nbytes = nbytes;
  87
  88        rctx->final = final;
  89        rctx->hash_rem = final ? 0 : len & (block_size - 1);
  90        rctx->hash_cnt = len - rctx->hash_rem;
  91        if (!final && !rctx->hash_rem) {
  92                /* CCP can't do zero length final, so keep some data around */
  93                rctx->hash_cnt -= block_size;
  94                rctx->hash_rem = block_size;
  95        }
  96
  97        if (final && (rctx->null_msg || (len & (block_size - 1))))
  98                need_pad = 1;
  99        else
 100                need_pad = 0;
 101
 102        sg_init_one(&rctx->iv_sg, rctx->iv, sizeof(rctx->iv));
 103
 104        /* Build the data scatterlist table - allocate enough entries for all
 105         * possible data pieces (buffer, input data, padding)
 106         */
 107        sg_count = (nbytes) ? sg_nents(req->src) + 2 : 2;
 108        gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
 109                GFP_KERNEL : GFP_ATOMIC;
 110        ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp);
 111        if (ret)
 112                return ret;
 113
 114        sg = NULL;
 115        if (rctx->buf_count) {
 116                sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count);
 117                sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg);
 118                if (!sg) {
 119                        ret = -EINVAL;
 120                        goto e_free;
 121                }
 122        }
 123
 124        if (nbytes) {
 125                sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src);
 126                if (!sg) {
 127                        ret = -EINVAL;
 128                        goto e_free;
 129                }
 130        }
 131
 132        if (need_pad) {
 133                int pad_length = block_size - (len & (block_size - 1));
 134
 135                rctx->hash_cnt += pad_length;
 136
 137                memset(rctx->pad, 0, sizeof(rctx->pad));
 138                rctx->pad[0] = 0x80;
 139                sg_init_one(&rctx->pad_sg, rctx->pad, pad_length);
 140                sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->pad_sg);
 141                if (!sg) {
 142                        ret = -EINVAL;
 143                        goto e_free;
 144                }
 145        }
 146        if (sg) {
 147                sg_mark_end(sg);
 148                sg = rctx->data_sg.sgl;
 149        }
 150
 151        /* Initialize the K1/K2 scatterlist */
 152        if (final)
 153                cmac_key_sg = (need_pad) ? &ctx->u.aes.k2_sg
 154                                         : &ctx->u.aes.k1_sg;
 155
 156        memset(&rctx->cmd, 0, sizeof(rctx->cmd));
 157        INIT_LIST_HEAD(&rctx->cmd.entry);
 158        rctx->cmd.engine = CCP_ENGINE_AES;
 159        rctx->cmd.u.aes.type = ctx->u.aes.type;
 160        rctx->cmd.u.aes.mode = ctx->u.aes.mode;
 161        rctx->cmd.u.aes.action = CCP_AES_ACTION_ENCRYPT;
 162        rctx->cmd.u.aes.key = &ctx->u.aes.key_sg;
 163        rctx->cmd.u.aes.key_len = ctx->u.aes.key_len;
 164        rctx->cmd.u.aes.iv = &rctx->iv_sg;
 165        rctx->cmd.u.aes.iv_len = AES_BLOCK_SIZE;
 166        rctx->cmd.u.aes.src = sg;
 167        rctx->cmd.u.aes.src_len = rctx->hash_cnt;
 168        rctx->cmd.u.aes.dst = NULL;
 169        rctx->cmd.u.aes.cmac_key = cmac_key_sg;
 170        rctx->cmd.u.aes.cmac_key_len = ctx->u.aes.kn_len;
 171        rctx->cmd.u.aes.cmac_final = final;
 172
 173        ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
 174
 175        return ret;
 176
 177e_free:
 178        sg_free_table(&rctx->data_sg);
 179
 180        return ret;
 181}
 182
 183static int ccp_aes_cmac_init(struct ahash_request *req)
 184{
 185        struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
 186
 187        memset(rctx, 0, sizeof(*rctx));
 188
 189        rctx->null_msg = 1;
 190
 191        return 0;
 192}
 193
 194static int ccp_aes_cmac_update(struct ahash_request *req)
 195{
 196        return ccp_do_cmac_update(req, req->nbytes, 0);
 197}
 198
 199static int ccp_aes_cmac_final(struct ahash_request *req)
 200{
 201        return ccp_do_cmac_update(req, 0, 1);
 202}
 203
 204static int ccp_aes_cmac_finup(struct ahash_request *req)
 205{
 206        return ccp_do_cmac_update(req, req->nbytes, 1);
 207}
 208
 209static int ccp_aes_cmac_digest(struct ahash_request *req)
 210{
 211        int ret;
 212
 213        ret = ccp_aes_cmac_init(req);
 214        if (ret)
 215                return ret;
 216
 217        return ccp_aes_cmac_finup(req);
 218}
 219
 220static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
 221{
 222        struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
 223        struct ccp_aes_cmac_exp_ctx state;
 224
 225        /* Don't let anything leak to 'out' */
 226        memset(&state, 0, sizeof(state));
 227
 228        state.null_msg = rctx->null_msg;
 229        memcpy(state.iv, rctx->iv, sizeof(state.iv));
 230        state.buf_count = rctx->buf_count;
 231        memcpy(state.buf, rctx->buf, sizeof(state.buf));
 232
 233        /* 'out' may not be aligned so memcpy from local variable */
 234        memcpy(out, &state, sizeof(state));
 235
 236        return 0;
 237}
 238
 239static int ccp_aes_cmac_import(struct ahash_request *req, const void *in)
 240{
 241        struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
 242        struct ccp_aes_cmac_exp_ctx state;
 243
 244        /* 'in' may not be aligned so memcpy to local variable */
 245        memcpy(&state, in, sizeof(state));
 246
 247        memset(rctx, 0, sizeof(*rctx));
 248        rctx->null_msg = state.null_msg;
 249        memcpy(rctx->iv, state.iv, sizeof(rctx->iv));
 250        rctx->buf_count = state.buf_count;
 251        memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
 252
 253        return 0;
 254}
 255
 256static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
 257                               unsigned int key_len)
 258{
 259        struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
 260        struct ccp_crypto_ahash_alg *alg =
 261                ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm));
 262        u64 k0_hi, k0_lo, k1_hi, k1_lo, k2_hi, k2_lo;
 263        u64 rb_hi = 0x00, rb_lo = 0x87;
 264        struct crypto_aes_ctx aes;
 265        __be64 *gk;
 266        int ret;
 267
 268        switch (key_len) {
 269        case AES_KEYSIZE_128:
 270                ctx->u.aes.type = CCP_AES_TYPE_128;
 271                break;
 272        case AES_KEYSIZE_192:
 273                ctx->u.aes.type = CCP_AES_TYPE_192;
 274                break;
 275        case AES_KEYSIZE_256:
 276                ctx->u.aes.type = CCP_AES_TYPE_256;
 277                break;
 278        default:
 279                crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 280                return -EINVAL;
 281        }
 282        ctx->u.aes.mode = alg->mode;
 283
 284        /* Set to zero until complete */
 285        ctx->u.aes.key_len = 0;
 286
 287        /* Set the key for the AES cipher used to generate the keys */
 288        ret = aes_expandkey(&aes, key, key_len);
 289        if (ret)
 290                return ret;
 291
 292        /* Encrypt a block of zeroes - use key area in context */
 293        memset(ctx->u.aes.key, 0, sizeof(ctx->u.aes.key));
 294        aes_encrypt(&aes, ctx->u.aes.key, ctx->u.aes.key);
 295        memzero_explicit(&aes, sizeof(aes));
 296
 297        /* Generate K1 and K2 */
 298        k0_hi = be64_to_cpu(*((__be64 *)ctx->u.aes.key));
 299        k0_lo = be64_to_cpu(*((__be64 *)ctx->u.aes.key + 1));
 300
 301        k1_hi = (k0_hi << 1) | (k0_lo >> 63);
 302        k1_lo = k0_lo << 1;
 303        if (ctx->u.aes.key[0] & 0x80) {
 304                k1_hi ^= rb_hi;
 305                k1_lo ^= rb_lo;
 306        }
 307        gk = (__be64 *)ctx->u.aes.k1;
 308        *gk = cpu_to_be64(k1_hi);
 309        gk++;
 310        *gk = cpu_to_be64(k1_lo);
 311
 312        k2_hi = (k1_hi << 1) | (k1_lo >> 63);
 313        k2_lo = k1_lo << 1;
 314        if (ctx->u.aes.k1[0] & 0x80) {
 315                k2_hi ^= rb_hi;
 316                k2_lo ^= rb_lo;
 317        }
 318        gk = (__be64 *)ctx->u.aes.k2;
 319        *gk = cpu_to_be64(k2_hi);
 320        gk++;
 321        *gk = cpu_to_be64(k2_lo);
 322
 323        ctx->u.aes.kn_len = sizeof(ctx->u.aes.k1);
 324        sg_init_one(&ctx->u.aes.k1_sg, ctx->u.aes.k1, sizeof(ctx->u.aes.k1));
 325        sg_init_one(&ctx->u.aes.k2_sg, ctx->u.aes.k2, sizeof(ctx->u.aes.k2));
 326
 327        /* Save the supplied key */
 328        memset(ctx->u.aes.key, 0, sizeof(ctx->u.aes.key));
 329        memcpy(ctx->u.aes.key, key, key_len);
 330        ctx->u.aes.key_len = key_len;
 331        sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
 332
 333        return ret;
 334}
 335
 336static int ccp_aes_cmac_cra_init(struct crypto_tfm *tfm)
 337{
 338        struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
 339        struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
 340
 341        ctx->complete = ccp_aes_cmac_complete;
 342        ctx->u.aes.key_len = 0;
 343
 344        crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_aes_cmac_req_ctx));
 345
 346        return 0;
 347}
 348
 349int ccp_register_aes_cmac_algs(struct list_head *head)
 350{
 351        struct ccp_crypto_ahash_alg *ccp_alg;
 352        struct ahash_alg *alg;
 353        struct hash_alg_common *halg;
 354        struct crypto_alg *base;
 355        int ret;
 356
 357        ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
 358        if (!ccp_alg)
 359                return -ENOMEM;
 360
 361        INIT_LIST_HEAD(&ccp_alg->entry);
 362        ccp_alg->mode = CCP_AES_MODE_CMAC;
 363
 364        alg = &ccp_alg->alg;
 365        alg->init = ccp_aes_cmac_init;
 366        alg->update = ccp_aes_cmac_update;
 367        alg->final = ccp_aes_cmac_final;
 368        alg->finup = ccp_aes_cmac_finup;
 369        alg->digest = ccp_aes_cmac_digest;
 370        alg->export = ccp_aes_cmac_export;
 371        alg->import = ccp_aes_cmac_import;
 372        alg->setkey = ccp_aes_cmac_setkey;
 373
 374        halg = &alg->halg;
 375        halg->digestsize = AES_BLOCK_SIZE;
 376        halg->statesize = sizeof(struct ccp_aes_cmac_exp_ctx);
 377
 378        base = &halg->base;
 379        snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)");
 380        snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "cmac-aes-ccp");
 381        base->cra_flags = CRYPTO_ALG_ASYNC |
 382                          CRYPTO_ALG_KERN_DRIVER_ONLY |
 383                          CRYPTO_ALG_NEED_FALLBACK;
 384        base->cra_blocksize = AES_BLOCK_SIZE;
 385        base->cra_ctxsize = sizeof(struct ccp_ctx);
 386        base->cra_priority = CCP_CRA_PRIORITY;
 387        base->cra_init = ccp_aes_cmac_cra_init;
 388        base->cra_module = THIS_MODULE;
 389
 390        ret = crypto_register_ahash(alg);
 391        if (ret) {
 392                pr_err("%s ahash algorithm registration error (%d)\n",
 393                       base->cra_name, ret);
 394                kfree(ccp_alg);
 395                return ret;
 396        }
 397
 398        list_add(&ccp_alg->entry, head);
 399
 400        return 0;
 401}
 402