linux/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
<<
>>
Prefs
   1/*
   2 * AMD Cryptographic Coprocessor (CCP) AES CMAC crypto API support
   3 *
   4 * Copyright (C) 2013 Advanced Micro Devices, Inc.
   5 *
   6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 */
  12
  13#include <linux/module.h>
  14#include <linux/sched.h>
  15#include <linux/delay.h>
  16#include <linux/scatterlist.h>
  17#include <linux/crypto.h>
  18#include <crypto/algapi.h>
  19#include <crypto/aes.h>
  20#include <crypto/hash.h>
  21#include <crypto/internal/hash.h>
  22#include <crypto/scatterwalk.h>
  23
  24#include "ccp-crypto.h"
  25
  26static int ccp_aes_cmac_complete(struct crypto_async_request *async_req,
  27                                 int ret)
  28{
  29        struct ahash_request *req = ahash_request_cast(async_req);
  30        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  31        struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
  32        unsigned int digest_size = crypto_ahash_digestsize(tfm);
  33
  34        if (ret)
  35                goto e_free;
  36
  37        if (rctx->hash_rem) {
  38                /* Save remaining data to buffer */
  39                unsigned int offset = rctx->nbytes - rctx->hash_rem;
  40
  41                scatterwalk_map_and_copy(rctx->buf, rctx->src,
  42                                         offset, rctx->hash_rem, 0);
  43                rctx->buf_count = rctx->hash_rem;
  44        } else {
  45                rctx->buf_count = 0;
  46        }
  47
  48        /* Update result area if supplied */
  49        if (req->result)
  50                memcpy(req->result, rctx->iv, digest_size);
  51
  52e_free:
  53        sg_free_table(&rctx->data_sg);
  54
  55        return ret;
  56}
  57
  58static int ccp_do_cmac_update(struct ahash_request *req, unsigned int nbytes,
  59                              unsigned int final)
  60{
  61        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  62        struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
  63        struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
  64        struct scatterlist *sg, *cmac_key_sg = NULL;
  65        unsigned int block_size =
  66                crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
  67        unsigned int need_pad, sg_count;
  68        gfp_t gfp;
  69        u64 len;
  70        int ret;
  71
  72        if (!ctx->u.aes.key_len)
  73                return -EINVAL;
  74
  75        if (nbytes)
  76                rctx->null_msg = 0;
  77
  78        len = (u64)rctx->buf_count + (u64)nbytes;
  79
  80        if (!final && (len <= block_size)) {
  81                scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src,
  82                                         0, nbytes, 0);
  83                rctx->buf_count += nbytes;
  84
  85                return 0;
  86        }
  87
  88        rctx->src = req->src;
  89        rctx->nbytes = nbytes;
  90
  91        rctx->final = final;
  92        rctx->hash_rem = final ? 0 : len & (block_size - 1);
  93        rctx->hash_cnt = len - rctx->hash_rem;
  94        if (!final && !rctx->hash_rem) {
  95                /* CCP can't do zero length final, so keep some data around */
  96                rctx->hash_cnt -= block_size;
  97                rctx->hash_rem = block_size;
  98        }
  99
 100        if (final && (rctx->null_msg || (len & (block_size - 1))))
 101                need_pad = 1;
 102        else
 103                need_pad = 0;
 104
 105        sg_init_one(&rctx->iv_sg, rctx->iv, sizeof(rctx->iv));
 106
 107        /* Build the data scatterlist table - allocate enough entries for all
 108         * possible data pieces (buffer, input data, padding)
 109         */
 110        sg_count = (nbytes) ? sg_nents(req->src) + 2 : 2;
 111        gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
 112                GFP_KERNEL : GFP_ATOMIC;
 113        ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp);
 114        if (ret)
 115                return ret;
 116
 117        sg = NULL;
 118        if (rctx->buf_count) {
 119                sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count);
 120                sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg);
 121                if (!sg) {
 122                        ret = -EINVAL;
 123                        goto e_free;
 124                }
 125        }
 126
 127        if (nbytes) {
 128                sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src);
 129                if (!sg) {
 130                        ret = -EINVAL;
 131                        goto e_free;
 132                }
 133        }
 134
 135        if (need_pad) {
 136                int pad_length = block_size - (len & (block_size - 1));
 137
 138                rctx->hash_cnt += pad_length;
 139
 140                memset(rctx->pad, 0, sizeof(rctx->pad));
 141                rctx->pad[0] = 0x80;
 142                sg_init_one(&rctx->pad_sg, rctx->pad, pad_length);
 143                sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->pad_sg);
 144                if (!sg) {
 145                        ret = -EINVAL;
 146                        goto e_free;
 147                }
 148        }
 149        if (sg) {
 150                sg_mark_end(sg);
 151                sg = rctx->data_sg.sgl;
 152        }
 153
 154        /* Initialize the K1/K2 scatterlist */
 155        if (final)
 156                cmac_key_sg = (need_pad) ? &ctx->u.aes.k2_sg
 157                                         : &ctx->u.aes.k1_sg;
 158
 159        memset(&rctx->cmd, 0, sizeof(rctx->cmd));
 160        INIT_LIST_HEAD(&rctx->cmd.entry);
 161        rctx->cmd.engine = CCP_ENGINE_AES;
 162        rctx->cmd.u.aes.type = ctx->u.aes.type;
 163        rctx->cmd.u.aes.mode = ctx->u.aes.mode;
 164        rctx->cmd.u.aes.action = CCP_AES_ACTION_ENCRYPT;
 165        rctx->cmd.u.aes.key = &ctx->u.aes.key_sg;
 166        rctx->cmd.u.aes.key_len = ctx->u.aes.key_len;
 167        rctx->cmd.u.aes.iv = &rctx->iv_sg;
 168        rctx->cmd.u.aes.iv_len = AES_BLOCK_SIZE;
 169        rctx->cmd.u.aes.src = sg;
 170        rctx->cmd.u.aes.src_len = rctx->hash_cnt;
 171        rctx->cmd.u.aes.dst = NULL;
 172        rctx->cmd.u.aes.cmac_key = cmac_key_sg;
 173        rctx->cmd.u.aes.cmac_key_len = ctx->u.aes.kn_len;
 174        rctx->cmd.u.aes.cmac_final = final;
 175
 176        ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
 177
 178        return ret;
 179
 180e_free:
 181        sg_free_table(&rctx->data_sg);
 182
 183        return ret;
 184}
 185
 186static int ccp_aes_cmac_init(struct ahash_request *req)
 187{
 188        struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
 189
 190        memset(rctx, 0, sizeof(*rctx));
 191
 192        rctx->null_msg = 1;
 193
 194        return 0;
 195}
 196
 197static int ccp_aes_cmac_update(struct ahash_request *req)
 198{
 199        return ccp_do_cmac_update(req, req->nbytes, 0);
 200}
 201
 202static int ccp_aes_cmac_final(struct ahash_request *req)
 203{
 204        return ccp_do_cmac_update(req, 0, 1);
 205}
 206
 207static int ccp_aes_cmac_finup(struct ahash_request *req)
 208{
 209        return ccp_do_cmac_update(req, req->nbytes, 1);
 210}
 211
 212static int ccp_aes_cmac_digest(struct ahash_request *req)
 213{
 214        int ret;
 215
 216        ret = ccp_aes_cmac_init(req);
 217        if (ret)
 218                return ret;
 219
 220        return ccp_aes_cmac_finup(req);
 221}
 222
 223static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
 224{
 225        struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
 226        struct ccp_aes_cmac_exp_ctx state;
 227
 228        /* Don't let anything leak to 'out' */
 229        memset(&state, 0, sizeof(state));
 230
 231        state.null_msg = rctx->null_msg;
 232        memcpy(state.iv, rctx->iv, sizeof(state.iv));
 233        state.buf_count = rctx->buf_count;
 234        memcpy(state.buf, rctx->buf, sizeof(state.buf));
 235
 236        /* 'out' may not be aligned so memcpy from local variable */
 237        memcpy(out, &state, sizeof(state));
 238
 239        return 0;
 240}
 241
 242static int ccp_aes_cmac_import(struct ahash_request *req, const void *in)
 243{
 244        struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
 245        struct ccp_aes_cmac_exp_ctx state;
 246
 247        /* 'in' may not be aligned so memcpy to local variable */
 248        memcpy(&state, in, sizeof(state));
 249
 250        memset(rctx, 0, sizeof(*rctx));
 251        rctx->null_msg = state.null_msg;
 252        memcpy(rctx->iv, state.iv, sizeof(rctx->iv));
 253        rctx->buf_count = state.buf_count;
 254        memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
 255
 256        return 0;
 257}
 258
 259static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
 260                               unsigned int key_len)
 261{
 262        struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
 263        struct ccp_crypto_ahash_alg *alg =
 264                ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm));
 265        u64 k0_hi, k0_lo, k1_hi, k1_lo, k2_hi, k2_lo;
 266        u64 rb_hi = 0x00, rb_lo = 0x87;
 267        __be64 *gk;
 268        int ret;
 269
 270        switch (key_len) {
 271        case AES_KEYSIZE_128:
 272                ctx->u.aes.type = CCP_AES_TYPE_128;
 273                break;
 274        case AES_KEYSIZE_192:
 275                ctx->u.aes.type = CCP_AES_TYPE_192;
 276                break;
 277        case AES_KEYSIZE_256:
 278                ctx->u.aes.type = CCP_AES_TYPE_256;
 279                break;
 280        default:
 281                crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 282                return -EINVAL;
 283        }
 284        ctx->u.aes.mode = alg->mode;
 285
 286        /* Set to zero until complete */
 287        ctx->u.aes.key_len = 0;
 288
 289        /* Set the key for the AES cipher used to generate the keys */
 290        ret = crypto_cipher_setkey(ctx->u.aes.tfm_cipher, key, key_len);
 291        if (ret)
 292                return ret;
 293
 294        /* Encrypt a block of zeroes - use key area in context */
 295        memset(ctx->u.aes.key, 0, sizeof(ctx->u.aes.key));
 296        crypto_cipher_encrypt_one(ctx->u.aes.tfm_cipher, ctx->u.aes.key,
 297                                  ctx->u.aes.key);
 298
 299        /* Generate K1 and K2 */
 300        k0_hi = be64_to_cpu(*((__be64 *)ctx->u.aes.key));
 301        k0_lo = be64_to_cpu(*((__be64 *)ctx->u.aes.key + 1));
 302
 303        k1_hi = (k0_hi << 1) | (k0_lo >> 63);
 304        k1_lo = k0_lo << 1;
 305        if (ctx->u.aes.key[0] & 0x80) {
 306                k1_hi ^= rb_hi;
 307                k1_lo ^= rb_lo;
 308        }
 309        gk = (__be64 *)ctx->u.aes.k1;
 310        *gk = cpu_to_be64(k1_hi);
 311        gk++;
 312        *gk = cpu_to_be64(k1_lo);
 313
 314        k2_hi = (k1_hi << 1) | (k1_lo >> 63);
 315        k2_lo = k1_lo << 1;
 316        if (ctx->u.aes.k1[0] & 0x80) {
 317                k2_hi ^= rb_hi;
 318                k2_lo ^= rb_lo;
 319        }
 320        gk = (__be64 *)ctx->u.aes.k2;
 321        *gk = cpu_to_be64(k2_hi);
 322        gk++;
 323        *gk = cpu_to_be64(k2_lo);
 324
 325        ctx->u.aes.kn_len = sizeof(ctx->u.aes.k1);
 326        sg_init_one(&ctx->u.aes.k1_sg, ctx->u.aes.k1, sizeof(ctx->u.aes.k1));
 327        sg_init_one(&ctx->u.aes.k2_sg, ctx->u.aes.k2, sizeof(ctx->u.aes.k2));
 328
 329        /* Save the supplied key */
 330        memset(ctx->u.aes.key, 0, sizeof(ctx->u.aes.key));
 331        memcpy(ctx->u.aes.key, key, key_len);
 332        ctx->u.aes.key_len = key_len;
 333        sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
 334
 335        return ret;
 336}
 337
 338static int ccp_aes_cmac_cra_init(struct crypto_tfm *tfm)
 339{
 340        struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
 341        struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
 342        struct crypto_cipher *cipher_tfm;
 343
 344        ctx->complete = ccp_aes_cmac_complete;
 345        ctx->u.aes.key_len = 0;
 346
 347        crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_aes_cmac_req_ctx));
 348
 349        cipher_tfm = crypto_alloc_cipher("aes", 0,
 350                                         CRYPTO_ALG_ASYNC |
 351                                         CRYPTO_ALG_NEED_FALLBACK);
 352        if (IS_ERR(cipher_tfm)) {
 353                pr_warn("could not load aes cipher driver\n");
 354                return PTR_ERR(cipher_tfm);
 355        }
 356        ctx->u.aes.tfm_cipher = cipher_tfm;
 357
 358        return 0;
 359}
 360
 361static void ccp_aes_cmac_cra_exit(struct crypto_tfm *tfm)
 362{
 363        struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
 364
 365        if (ctx->u.aes.tfm_cipher)
 366                crypto_free_cipher(ctx->u.aes.tfm_cipher);
 367        ctx->u.aes.tfm_cipher = NULL;
 368}
 369
 370int ccp_register_aes_cmac_algs(struct list_head *head)
 371{
 372        struct ccp_crypto_ahash_alg *ccp_alg;
 373        struct ahash_alg *alg;
 374        struct hash_alg_common *halg;
 375        struct crypto_alg *base;
 376        int ret;
 377
 378        ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
 379        if (!ccp_alg)
 380                return -ENOMEM;
 381
 382        INIT_LIST_HEAD(&ccp_alg->entry);
 383        ccp_alg->mode = CCP_AES_MODE_CMAC;
 384
 385        alg = &ccp_alg->alg;
 386        alg->init = ccp_aes_cmac_init;
 387        alg->update = ccp_aes_cmac_update;
 388        alg->final = ccp_aes_cmac_final;
 389        alg->finup = ccp_aes_cmac_finup;
 390        alg->digest = ccp_aes_cmac_digest;
 391        alg->export = ccp_aes_cmac_export;
 392        alg->import = ccp_aes_cmac_import;
 393        alg->setkey = ccp_aes_cmac_setkey;
 394
 395        halg = &alg->halg;
 396        halg->digestsize = AES_BLOCK_SIZE;
 397        halg->statesize = sizeof(struct ccp_aes_cmac_exp_ctx);
 398
 399        base = &halg->base;
 400        snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)");
 401        snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "cmac-aes-ccp");
 402        base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC |
 403                          CRYPTO_ALG_KERN_DRIVER_ONLY |
 404                          CRYPTO_ALG_NEED_FALLBACK;
 405        base->cra_blocksize = AES_BLOCK_SIZE;
 406        base->cra_ctxsize = sizeof(struct ccp_ctx);
 407        base->cra_priority = CCP_CRA_PRIORITY;
 408        base->cra_type = &crypto_ahash_type;
 409        base->cra_init = ccp_aes_cmac_cra_init;
 410        base->cra_exit = ccp_aes_cmac_cra_exit;
 411        base->cra_module = THIS_MODULE;
 412
 413        ret = crypto_register_ahash(alg);
 414        if (ret) {
 415                pr_err("%s ahash algorithm registration error (%d)\n",
 416                       base->cra_name, ret);
 417                kfree(ccp_alg);
 418                return ret;
 419        }
 420
 421        list_add(&ccp_alg->entry, head);
 422
 423        return 0;
 424}
 425