linux/drivers/crypto/ccree/cc_aead.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
   3
   4#include <linux/kernel.h>
   5#include <linux/module.h>
   6#include <crypto/algapi.h>
   7#include <crypto/internal/aead.h>
   8#include <crypto/authenc.h>
   9#include <crypto/gcm.h>
  10#include <linux/rtnetlink.h>
  11#include <crypto/internal/des.h>
  12#include "cc_driver.h"
  13#include "cc_buffer_mgr.h"
  14#include "cc_aead.h"
  15#include "cc_request_mgr.h"
  16#include "cc_hash.h"
  17#include "cc_sram_mgr.h"
  18
  19#define template_aead   template_u.aead
  20
  21#define MAX_AEAD_SETKEY_SEQ 12
  22#define MAX_AEAD_PROCESS_SEQ 23
  23
  24#define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
  25#define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
  26
  27#define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
  28
  29struct cc_aead_handle {
  30        u32 sram_workspace_addr;
  31        struct list_head aead_list;
  32};
  33
  34struct cc_hmac_s {
  35        u8 *padded_authkey;
  36        u8 *ipad_opad; /* IPAD, OPAD*/
  37        dma_addr_t padded_authkey_dma_addr;
  38        dma_addr_t ipad_opad_dma_addr;
  39};
  40
  41struct cc_xcbc_s {
  42        u8 *xcbc_keys; /* K1,K2,K3 */
  43        dma_addr_t xcbc_keys_dma_addr;
  44};
  45
  46struct cc_aead_ctx {
  47        struct cc_drvdata *drvdata;
  48        u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
  49        u8 *enckey;
  50        dma_addr_t enckey_dma_addr;
  51        union {
  52                struct cc_hmac_s hmac;
  53                struct cc_xcbc_s xcbc;
  54        } auth_state;
  55        unsigned int enc_keylen;
  56        unsigned int auth_keylen;
  57        unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */
  58        unsigned int hash_len;
  59        enum drv_cipher_mode cipher_mode;
  60        enum cc_flow_mode flow_mode;
  61        enum drv_hash_mode auth_mode;
  62};
  63
  64static void cc_aead_exit(struct crypto_aead *tfm)
  65{
  66        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  67        struct device *dev = drvdata_to_dev(ctx->drvdata);
  68
  69        dev_dbg(dev, "Clearing context @%p for %s\n", crypto_aead_ctx(tfm),
  70                crypto_tfm_alg_name(&tfm->base));
  71
  72        /* Unmap enckey buffer */
  73        if (ctx->enckey) {
  74                dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey,
  75                                  ctx->enckey_dma_addr);
  76                dev_dbg(dev, "Freed enckey DMA buffer enckey_dma_addr=%pad\n",
  77                        &ctx->enckey_dma_addr);
  78                ctx->enckey_dma_addr = 0;
  79                ctx->enckey = NULL;
  80        }
  81
  82        if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
  83                struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
  84
  85                if (xcbc->xcbc_keys) {
  86                        dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3,
  87                                          xcbc->xcbc_keys,
  88                                          xcbc->xcbc_keys_dma_addr);
  89                }
  90                dev_dbg(dev, "Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n",
  91                        &xcbc->xcbc_keys_dma_addr);
  92                xcbc->xcbc_keys_dma_addr = 0;
  93                xcbc->xcbc_keys = NULL;
  94        } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
  95                struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
  96
  97                if (hmac->ipad_opad) {
  98                        dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
  99                                          hmac->ipad_opad,
 100                                          hmac->ipad_opad_dma_addr);
 101                        dev_dbg(dev, "Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n",
 102                                &hmac->ipad_opad_dma_addr);
 103                        hmac->ipad_opad_dma_addr = 0;
 104                        hmac->ipad_opad = NULL;
 105                }
 106                if (hmac->padded_authkey) {
 107                        dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
 108                                          hmac->padded_authkey,
 109                                          hmac->padded_authkey_dma_addr);
 110                        dev_dbg(dev, "Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n",
 111                                &hmac->padded_authkey_dma_addr);
 112                        hmac->padded_authkey_dma_addr = 0;
 113                        hmac->padded_authkey = NULL;
 114                }
 115        }
 116}
 117
 118static unsigned int cc_get_aead_hash_len(struct crypto_aead *tfm)
 119{
 120        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 121
 122        return cc_get_default_hash_len(ctx->drvdata);
 123}
 124
 125static int cc_aead_init(struct crypto_aead *tfm)
 126{
 127        struct aead_alg *alg = crypto_aead_alg(tfm);
 128        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 129        struct cc_crypto_alg *cc_alg =
 130                        container_of(alg, struct cc_crypto_alg, aead_alg);
 131        struct device *dev = drvdata_to_dev(cc_alg->drvdata);
 132
 133        dev_dbg(dev, "Initializing context @%p for %s\n", ctx,
 134                crypto_tfm_alg_name(&tfm->base));
 135
 136        /* Initialize modes in instance */
 137        ctx->cipher_mode = cc_alg->cipher_mode;
 138        ctx->flow_mode = cc_alg->flow_mode;
 139        ctx->auth_mode = cc_alg->auth_mode;
 140        ctx->drvdata = cc_alg->drvdata;
 141        crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx));
 142
 143        /* Allocate key buffer, cache line aligned */
 144        ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
 145                                         &ctx->enckey_dma_addr, GFP_KERNEL);
 146        if (!ctx->enckey) {
 147                dev_err(dev, "Failed allocating key buffer\n");
 148                goto init_failed;
 149        }
 150        dev_dbg(dev, "Allocated enckey buffer in context ctx->enckey=@%p\n",
 151                ctx->enckey);
 152
 153        /* Set default authlen value */
 154
 155        if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
 156                struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
 157                const unsigned int key_size = CC_AES_128_BIT_KEY_SIZE * 3;
 158
 159                /* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */
 160                /* (and temporary for user key - up to 256b) */
 161                xcbc->xcbc_keys = dma_alloc_coherent(dev, key_size,
 162                                                     &xcbc->xcbc_keys_dma_addr,
 163                                                     GFP_KERNEL);
 164                if (!xcbc->xcbc_keys) {
 165                        dev_err(dev, "Failed allocating buffer for XCBC keys\n");
 166                        goto init_failed;
 167                }
 168        } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */
 169                struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
 170                const unsigned int digest_size = 2 * MAX_HMAC_DIGEST_SIZE;
 171                dma_addr_t *pkey_dma = &hmac->padded_authkey_dma_addr;
 172
 173                /* Allocate dma-coherent buffer for IPAD + OPAD */
 174                hmac->ipad_opad = dma_alloc_coherent(dev, digest_size,
 175                                                     &hmac->ipad_opad_dma_addr,
 176                                                     GFP_KERNEL);
 177
 178                if (!hmac->ipad_opad) {
 179                        dev_err(dev, "Failed allocating IPAD/OPAD buffer\n");
 180                        goto init_failed;
 181                }
 182
 183                dev_dbg(dev, "Allocated authkey buffer in context ctx->authkey=@%p\n",
 184                        hmac->ipad_opad);
 185
 186                hmac->padded_authkey = dma_alloc_coherent(dev,
 187                                                          MAX_HMAC_BLOCK_SIZE,
 188                                                          pkey_dma,
 189                                                          GFP_KERNEL);
 190
 191                if (!hmac->padded_authkey) {
 192                        dev_err(dev, "failed to allocate padded_authkey\n");
 193                        goto init_failed;
 194                }
 195        } else {
 196                ctx->auth_state.hmac.ipad_opad = NULL;
 197                ctx->auth_state.hmac.padded_authkey = NULL;
 198        }
 199        ctx->hash_len = cc_get_aead_hash_len(tfm);
 200
 201        return 0;
 202
 203init_failed:
 204        cc_aead_exit(tfm);
 205        return -ENOMEM;
 206}
 207
 208static void cc_aead_complete(struct device *dev, void *cc_req, int err)
 209{
 210        struct aead_request *areq = (struct aead_request *)cc_req;
 211        struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
 212        struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
 213        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 214
 215        /* BACKLOG notification */
 216        if (err == -EINPROGRESS)
 217                goto done;
 218
 219        cc_unmap_aead_request(dev, areq);
 220
 221        /* Restore ordinary iv pointer */
 222        areq->iv = areq_ctx->backup_iv;
 223
 224        if (err)
 225                goto done;
 226
 227        if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
 228                if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
 229                           ctx->authsize) != 0) {
 230                        dev_dbg(dev, "Payload authentication failure, (auth-size=%d, cipher=%d)\n",
 231                                ctx->authsize, ctx->cipher_mode);
 232                        /* In case of payload authentication failure, MUST NOT
 233                         * revealed the decrypted message --> zero its memory.
 234                         */
 235                        sg_zero_buffer(areq->dst, sg_nents(areq->dst),
 236                                       areq->cryptlen, areq->assoclen);
 237                        err = -EBADMSG;
 238                }
 239        /*ENCRYPT*/
 240        } else if (areq_ctx->is_icv_fragmented) {
 241                u32 skip = areq->cryptlen + areq_ctx->dst_offset;
 242
 243                cc_copy_sg_portion(dev, areq_ctx->mac_buf, areq_ctx->dst_sgl,
 244                                   skip, (skip + ctx->authsize),
 245                                   CC_SG_FROM_BUF);
 246        }
 247done:
 248        aead_request_complete(areq, err);
 249}
 250
 251static unsigned int xcbc_setkey(struct cc_hw_desc *desc,
 252                                struct cc_aead_ctx *ctx)
 253{
 254        /* Load the AES key */
 255        hw_desc_init(&desc[0]);
 256        /* We are using for the source/user key the same buffer
 257         * as for the output keys, * because after this key loading it
 258         * is not needed anymore
 259         */
 260        set_din_type(&desc[0], DMA_DLLI,
 261                     ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen,
 262                     NS_BIT);
 263        set_cipher_mode(&desc[0], DRV_CIPHER_ECB);
 264        set_cipher_config0(&desc[0], DRV_CRYPTO_DIRECTION_ENCRYPT);
 265        set_key_size_aes(&desc[0], ctx->auth_keylen);
 266        set_flow_mode(&desc[0], S_DIN_to_AES);
 267        set_setup_mode(&desc[0], SETUP_LOAD_KEY0);
 268
 269        hw_desc_init(&desc[1]);
 270        set_din_const(&desc[1], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
 271        set_flow_mode(&desc[1], DIN_AES_DOUT);
 272        set_dout_dlli(&desc[1], ctx->auth_state.xcbc.xcbc_keys_dma_addr,
 273                      AES_KEYSIZE_128, NS_BIT, 0);
 274
 275        hw_desc_init(&desc[2]);
 276        set_din_const(&desc[2], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
 277        set_flow_mode(&desc[2], DIN_AES_DOUT);
 278        set_dout_dlli(&desc[2], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
 279                                         + AES_KEYSIZE_128),
 280                              AES_KEYSIZE_128, NS_BIT, 0);
 281
 282        hw_desc_init(&desc[3]);
 283        set_din_const(&desc[3], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
 284        set_flow_mode(&desc[3], DIN_AES_DOUT);
 285        set_dout_dlli(&desc[3], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
 286                                          + 2 * AES_KEYSIZE_128),
 287                              AES_KEYSIZE_128, NS_BIT, 0);
 288
 289        return 4;
 290}
 291
 292static unsigned int hmac_setkey(struct cc_hw_desc *desc,
 293                                struct cc_aead_ctx *ctx)
 294{
 295        unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
 296        unsigned int digest_ofs = 0;
 297        unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
 298                        DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
 299        unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
 300                        CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
 301        struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
 302
 303        unsigned int idx = 0;
 304        int i;
 305
 306        /* calc derived HMAC key */
 307        for (i = 0; i < 2; i++) {
 308                /* Load hash initial state */
 309                hw_desc_init(&desc[idx]);
 310                set_cipher_mode(&desc[idx], hash_mode);
 311                set_din_sram(&desc[idx],
 312                             cc_larval_digest_addr(ctx->drvdata,
 313                                                   ctx->auth_mode),
 314                             digest_size);
 315                set_flow_mode(&desc[idx], S_DIN_to_HASH);
 316                set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 317                idx++;
 318
 319                /* Load the hash current length*/
 320                hw_desc_init(&desc[idx]);
 321                set_cipher_mode(&desc[idx], hash_mode);
 322                set_din_const(&desc[idx], 0, ctx->hash_len);
 323                set_flow_mode(&desc[idx], S_DIN_to_HASH);
 324                set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 325                idx++;
 326
 327                /* Prepare ipad key */
 328                hw_desc_init(&desc[idx]);
 329                set_xor_val(&desc[idx], hmac_pad_const[i]);
 330                set_cipher_mode(&desc[idx], hash_mode);
 331                set_flow_mode(&desc[idx], S_DIN_to_HASH);
 332                set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
 333                idx++;
 334
 335                /* Perform HASH update */
 336                hw_desc_init(&desc[idx]);
 337                set_din_type(&desc[idx], DMA_DLLI,
 338                             hmac->padded_authkey_dma_addr,
 339                             SHA256_BLOCK_SIZE, NS_BIT);
 340                set_cipher_mode(&desc[idx], hash_mode);
 341                set_xor_active(&desc[idx]);
 342                set_flow_mode(&desc[idx], DIN_HASH);
 343                idx++;
 344
 345                /* Get the digset */
 346                hw_desc_init(&desc[idx]);
 347                set_cipher_mode(&desc[idx], hash_mode);
 348                set_dout_dlli(&desc[idx],
 349                              (hmac->ipad_opad_dma_addr + digest_ofs),
 350                              digest_size, NS_BIT, 0);
 351                set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 352                set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 353                set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 354                idx++;
 355
 356                digest_ofs += digest_size;
 357        }
 358
 359        return idx;
 360}
 361
 362static int validate_keys_sizes(struct cc_aead_ctx *ctx)
 363{
 364        struct device *dev = drvdata_to_dev(ctx->drvdata);
 365
 366        dev_dbg(dev, "enc_keylen=%u  authkeylen=%u\n",
 367                ctx->enc_keylen, ctx->auth_keylen);
 368
 369        switch (ctx->auth_mode) {
 370        case DRV_HASH_SHA1:
 371        case DRV_HASH_SHA256:
 372                break;
 373        case DRV_HASH_XCBC_MAC:
 374                if (ctx->auth_keylen != AES_KEYSIZE_128 &&
 375                    ctx->auth_keylen != AES_KEYSIZE_192 &&
 376                    ctx->auth_keylen != AES_KEYSIZE_256)
 377                        return -ENOTSUPP;
 378                break;
 379        case DRV_HASH_NULL: /* Not authenc (e.g., CCM) - no auth_key) */
 380                if (ctx->auth_keylen > 0)
 381                        return -EINVAL;
 382                break;
 383        default:
 384                dev_dbg(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
 385                return -EINVAL;
 386        }
 387        /* Check cipher key size */
 388        if (ctx->flow_mode == S_DIN_to_DES) {
 389                if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
 390                        dev_dbg(dev, "Invalid cipher(3DES) key size: %u\n",
 391                                ctx->enc_keylen);
 392                        return -EINVAL;
 393                }
 394        } else { /* Default assumed to be AES ciphers */
 395                if (ctx->enc_keylen != AES_KEYSIZE_128 &&
 396                    ctx->enc_keylen != AES_KEYSIZE_192 &&
 397                    ctx->enc_keylen != AES_KEYSIZE_256) {
 398                        dev_dbg(dev, "Invalid cipher(AES) key size: %u\n",
 399                                ctx->enc_keylen);
 400                        return -EINVAL;
 401                }
 402        }
 403
 404        return 0; /* All tests of keys sizes passed */
 405}
 406
 407/* This function prepers the user key so it can pass to the hmac processing
 408 * (copy to intenral buffer or hash in case of key longer than block
 409 */
 410static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
 411                                 unsigned int keylen)
 412{
 413        dma_addr_t key_dma_addr = 0;
 414        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 415        struct device *dev = drvdata_to_dev(ctx->drvdata);
 416        u32 larval_addr;
 417        struct cc_crypto_req cc_req = {};
 418        unsigned int blocksize;
 419        unsigned int digestsize;
 420        unsigned int hashmode;
 421        unsigned int idx = 0;
 422        int rc = 0;
 423        u8 *key = NULL;
 424        struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
 425        dma_addr_t padded_authkey_dma_addr =
 426                ctx->auth_state.hmac.padded_authkey_dma_addr;
 427
 428        switch (ctx->auth_mode) { /* auth_key required and >0 */
 429        case DRV_HASH_SHA1:
 430                blocksize = SHA1_BLOCK_SIZE;
 431                digestsize = SHA1_DIGEST_SIZE;
 432                hashmode = DRV_HASH_HW_SHA1;
 433                break;
 434        case DRV_HASH_SHA256:
 435        default:
 436                blocksize = SHA256_BLOCK_SIZE;
 437                digestsize = SHA256_DIGEST_SIZE;
 438                hashmode = DRV_HASH_HW_SHA256;
 439        }
 440
 441        if (keylen != 0) {
 442
 443                key = kmemdup(authkey, keylen, GFP_KERNEL);
 444                if (!key)
 445                        return -ENOMEM;
 446
 447                key_dma_addr = dma_map_single(dev, key, keylen, DMA_TO_DEVICE);
 448                if (dma_mapping_error(dev, key_dma_addr)) {
 449                        dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
 450                                key, keylen);
 451                        kfree_sensitive(key);
 452                        return -ENOMEM;
 453                }
 454                if (keylen > blocksize) {
 455                        /* Load hash initial state */
 456                        hw_desc_init(&desc[idx]);
 457                        set_cipher_mode(&desc[idx], hashmode);
 458                        larval_addr = cc_larval_digest_addr(ctx->drvdata,
 459                                                            ctx->auth_mode);
 460                        set_din_sram(&desc[idx], larval_addr, digestsize);
 461                        set_flow_mode(&desc[idx], S_DIN_to_HASH);
 462                        set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 463                        idx++;
 464
 465                        /* Load the hash current length*/
 466                        hw_desc_init(&desc[idx]);
 467                        set_cipher_mode(&desc[idx], hashmode);
 468                        set_din_const(&desc[idx], 0, ctx->hash_len);
 469                        set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
 470                        set_flow_mode(&desc[idx], S_DIN_to_HASH);
 471                        set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 472                        idx++;
 473
 474                        hw_desc_init(&desc[idx]);
 475                        set_din_type(&desc[idx], DMA_DLLI,
 476                                     key_dma_addr, keylen, NS_BIT);
 477                        set_flow_mode(&desc[idx], DIN_HASH);
 478                        idx++;
 479
 480                        /* Get hashed key */
 481                        hw_desc_init(&desc[idx]);
 482                        set_cipher_mode(&desc[idx], hashmode);
 483                        set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
 484                                      digestsize, NS_BIT, 0);
 485                        set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 486                        set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 487                        set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 488                        set_cipher_config0(&desc[idx],
 489                                           HASH_DIGEST_RESULT_LITTLE_ENDIAN);
 490                        idx++;
 491
 492                        hw_desc_init(&desc[idx]);
 493                        set_din_const(&desc[idx], 0, (blocksize - digestsize));
 494                        set_flow_mode(&desc[idx], BYPASS);
 495                        set_dout_dlli(&desc[idx], (padded_authkey_dma_addr +
 496                                      digestsize), (blocksize - digestsize),
 497                                      NS_BIT, 0);
 498                        idx++;
 499                } else {
 500                        hw_desc_init(&desc[idx]);
 501                        set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
 502                                     keylen, NS_BIT);
 503                        set_flow_mode(&desc[idx], BYPASS);
 504                        set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
 505                                      keylen, NS_BIT, 0);
 506                        idx++;
 507
 508                        if ((blocksize - keylen) != 0) {
 509                                hw_desc_init(&desc[idx]);
 510                                set_din_const(&desc[idx], 0,
 511                                              (blocksize - keylen));
 512                                set_flow_mode(&desc[idx], BYPASS);
 513                                set_dout_dlli(&desc[idx],
 514                                              (padded_authkey_dma_addr +
 515                                               keylen),
 516                                              (blocksize - keylen), NS_BIT, 0);
 517                                idx++;
 518                        }
 519                }
 520        } else {
 521                hw_desc_init(&desc[idx]);
 522                set_din_const(&desc[idx], 0, (blocksize - keylen));
 523                set_flow_mode(&desc[idx], BYPASS);
 524                set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
 525                              blocksize, NS_BIT, 0);
 526                idx++;
 527        }
 528
 529        rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
 530        if (rc)
 531                dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 532
 533        if (key_dma_addr)
 534                dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
 535
 536        kfree_sensitive(key);
 537
 538        return rc;
 539}
 540
 541static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
 542                          unsigned int keylen)
 543{
 544        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 545        struct cc_crypto_req cc_req = {};
 546        struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
 547        unsigned int seq_len = 0;
 548        struct device *dev = drvdata_to_dev(ctx->drvdata);
 549        const u8 *enckey, *authkey;
 550        int rc;
 551
 552        dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
 553                ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
 554
 555        /* STAT_PHASE_0: Init and sanity checks */
 556
 557        if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
 558                struct crypto_authenc_keys keys;
 559
 560                rc = crypto_authenc_extractkeys(&keys, key, keylen);
 561                if (rc)
 562                        return rc;
 563                enckey = keys.enckey;
 564                authkey = keys.authkey;
 565                ctx->enc_keylen = keys.enckeylen;
 566                ctx->auth_keylen = keys.authkeylen;
 567
 568                if (ctx->cipher_mode == DRV_CIPHER_CTR) {
 569                        /* the nonce is stored in bytes at end of key */
 570                        if (ctx->enc_keylen <
 571                            (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
 572                                return -EINVAL;
 573                        /* Copy nonce from last 4 bytes in CTR key to
 574                         *  first 4 bytes in CTR IV
 575                         */
 576                        memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen -
 577                               CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
 578                        /* Set CTR key size */
 579                        ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
 580                }
 581        } else { /* non-authenc - has just one key */
 582                enckey = key;
 583                authkey = NULL;
 584                ctx->enc_keylen = keylen;
 585                ctx->auth_keylen = 0;
 586        }
 587
 588        rc = validate_keys_sizes(ctx);
 589        if (rc)
 590                return rc;
 591
 592        /* STAT_PHASE_1: Copy key to ctx */
 593
 594        /* Get key material */
 595        memcpy(ctx->enckey, enckey, ctx->enc_keylen);
 596        if (ctx->enc_keylen == 24)
 597                memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
 598        if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
 599                memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey,
 600                       ctx->auth_keylen);
 601        } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
 602                rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
 603                if (rc)
 604                        return rc;
 605        }
 606
 607        /* STAT_PHASE_2: Create sequence */
 608
 609        switch (ctx->auth_mode) {
 610        case DRV_HASH_SHA1:
 611        case DRV_HASH_SHA256:
 612                seq_len = hmac_setkey(desc, ctx);
 613                break;
 614        case DRV_HASH_XCBC_MAC:
 615                seq_len = xcbc_setkey(desc, ctx);
 616                break;
 617        case DRV_HASH_NULL: /* non-authenc modes, e.g., CCM */
 618                break; /* No auth. key setup */
 619        default:
 620                dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
 621                return -ENOTSUPP;
 622        }
 623
 624        /* STAT_PHASE_3: Submit sequence to HW */
 625
 626        if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
 627                rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len);
 628                if (rc) {
 629                        dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 630                        return rc;
 631                }
 632        }
 633
 634        /* Update STAT_PHASE_3 */
 635        return rc;
 636}
 637
 638static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
 639                               unsigned int keylen)
 640{
 641        struct crypto_authenc_keys keys;
 642        int err;
 643
 644        err = crypto_authenc_extractkeys(&keys, key, keylen);
 645        if (unlikely(err))
 646                return err;
 647
 648        err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?:
 649              cc_aead_setkey(aead, key, keylen);
 650
 651        memzero_explicit(&keys, sizeof(keys));
 652        return err;
 653}
 654
 655static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
 656                                 unsigned int keylen)
 657{
 658        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 659
 660        if (keylen < 3)
 661                return -EINVAL;
 662
 663        keylen -= 3;
 664        memcpy(ctx->ctr_nonce, key + keylen, 3);
 665
 666        return cc_aead_setkey(tfm, key, keylen);
 667}
 668
 669static int cc_aead_setauthsize(struct crypto_aead *authenc,
 670                               unsigned int authsize)
 671{
 672        struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
 673        struct device *dev = drvdata_to_dev(ctx->drvdata);
 674
 675        /* Unsupported auth. sizes */
 676        if (authsize == 0 ||
 677            authsize > crypto_aead_maxauthsize(authenc)) {
 678                return -ENOTSUPP;
 679        }
 680
 681        ctx->authsize = authsize;
 682        dev_dbg(dev, "authlen=%d\n", ctx->authsize);
 683
 684        return 0;
 685}
 686
 687static int cc_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
 688                                      unsigned int authsize)
 689{
 690        switch (authsize) {
 691        case 8:
 692        case 12:
 693        case 16:
 694                break;
 695        default:
 696                return -EINVAL;
 697        }
 698
 699        return cc_aead_setauthsize(authenc, authsize);
 700}
 701
 702static int cc_ccm_setauthsize(struct crypto_aead *authenc,
 703                              unsigned int authsize)
 704{
 705        switch (authsize) {
 706        case 4:
 707        case 6:
 708        case 8:
 709        case 10:
 710        case 12:
 711        case 14:
 712        case 16:
 713                break;
 714        default:
 715                return -EINVAL;
 716        }
 717
 718        return cc_aead_setauthsize(authenc, authsize);
 719}
 720
 721static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
 722                              struct cc_hw_desc desc[], unsigned int *seq_size)
 723{
 724        struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
 725        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 726        struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
 727        enum cc_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
 728        unsigned int idx = *seq_size;
 729        struct device *dev = drvdata_to_dev(ctx->drvdata);
 730
 731        switch (assoc_dma_type) {
 732        case CC_DMA_BUF_DLLI:
 733                dev_dbg(dev, "ASSOC buffer type DLLI\n");
 734                hw_desc_init(&desc[idx]);
 735                set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
 736                             areq_ctx->assoclen, NS_BIT);
 737                set_flow_mode(&desc[idx], flow_mode);
 738                if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
 739                    areq_ctx->cryptlen > 0)
 740                        set_din_not_last_indication(&desc[idx]);
 741                break;
 742        case CC_DMA_BUF_MLLI:
 743                dev_dbg(dev, "ASSOC buffer type MLLI\n");
 744                hw_desc_init(&desc[idx]);
 745                set_din_type(&desc[idx], DMA_MLLI, areq_ctx->assoc.sram_addr,
 746                             areq_ctx->assoc.mlli_nents, NS_BIT);
 747                set_flow_mode(&desc[idx], flow_mode);
 748                if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
 749                    areq_ctx->cryptlen > 0)
 750                        set_din_not_last_indication(&desc[idx]);
 751                break;
 752        case CC_DMA_BUF_NULL:
 753        default:
 754                dev_err(dev, "Invalid ASSOC buffer type\n");
 755        }
 756
 757        *seq_size = (++idx);
 758}
 759
 760static void cc_proc_authen_desc(struct aead_request *areq,
 761                                unsigned int flow_mode,
 762                                struct cc_hw_desc desc[],
 763                                unsigned int *seq_size, int direct)
 764{
 765        struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
 766        enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
 767        unsigned int idx = *seq_size;
 768        struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
 769        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 770        struct device *dev = drvdata_to_dev(ctx->drvdata);
 771
 772        switch (data_dma_type) {
 773        case CC_DMA_BUF_DLLI:
 774        {
 775                struct scatterlist *cipher =
 776                        (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 777                        areq_ctx->dst_sgl : areq_ctx->src_sgl;
 778
 779                unsigned int offset =
 780                        (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 781                        areq_ctx->dst_offset : areq_ctx->src_offset;
 782                dev_dbg(dev, "AUTHENC: SRC/DST buffer type DLLI\n");
 783                hw_desc_init(&desc[idx]);
 784                set_din_type(&desc[idx], DMA_DLLI,
 785                             (sg_dma_address(cipher) + offset),
 786                             areq_ctx->cryptlen, NS_BIT);
 787                set_flow_mode(&desc[idx], flow_mode);
 788                break;
 789        }
 790        case CC_DMA_BUF_MLLI:
 791        {
 792                /* DOUBLE-PASS flow (as default)
 793                 * assoc. + iv + data -compact in one table
 794                 * if assoclen is ZERO only IV perform
 795                 */
 796                u32 mlli_addr = areq_ctx->assoc.sram_addr;
 797                u32 mlli_nents = areq_ctx->assoc.mlli_nents;
 798
 799                if (areq_ctx->is_single_pass) {
 800                        if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 801                                mlli_addr = areq_ctx->dst.sram_addr;
 802                                mlli_nents = areq_ctx->dst.mlli_nents;
 803                        } else {
 804                                mlli_addr = areq_ctx->src.sram_addr;
 805                                mlli_nents = areq_ctx->src.mlli_nents;
 806                        }
 807                }
 808
 809                dev_dbg(dev, "AUTHENC: SRC/DST buffer type MLLI\n");
 810                hw_desc_init(&desc[idx]);
 811                set_din_type(&desc[idx], DMA_MLLI, mlli_addr, mlli_nents,
 812                             NS_BIT);
 813                set_flow_mode(&desc[idx], flow_mode);
 814                break;
 815        }
 816        case CC_DMA_BUF_NULL:
 817        default:
 818                dev_err(dev, "AUTHENC: Invalid SRC/DST buffer type\n");
 819        }
 820
 821        *seq_size = (++idx);
 822}
 823
 824static void cc_proc_cipher_desc(struct aead_request *areq,
 825                                unsigned int flow_mode,
 826                                struct cc_hw_desc desc[],
 827                                unsigned int *seq_size)
 828{
 829        unsigned int idx = *seq_size;
 830        struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
 831        enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
 832        struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
 833        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 834        struct device *dev = drvdata_to_dev(ctx->drvdata);
 835
 836        if (areq_ctx->cryptlen == 0)
 837                return; /*null processing*/
 838
 839        switch (data_dma_type) {
 840        case CC_DMA_BUF_DLLI:
 841                dev_dbg(dev, "CIPHER: SRC/DST buffer type DLLI\n");
 842                hw_desc_init(&desc[idx]);
 843                set_din_type(&desc[idx], DMA_DLLI,
 844                             (sg_dma_address(areq_ctx->src_sgl) +
 845                              areq_ctx->src_offset), areq_ctx->cryptlen,
 846                              NS_BIT);
 847                set_dout_dlli(&desc[idx],
 848                              (sg_dma_address(areq_ctx->dst_sgl) +
 849                               areq_ctx->dst_offset),
 850                              areq_ctx->cryptlen, NS_BIT, 0);
 851                set_flow_mode(&desc[idx], flow_mode);
 852                break;
 853        case CC_DMA_BUF_MLLI:
 854                dev_dbg(dev, "CIPHER: SRC/DST buffer type MLLI\n");
 855                hw_desc_init(&desc[idx]);
 856                set_din_type(&desc[idx], DMA_MLLI, areq_ctx->src.sram_addr,
 857                             areq_ctx->src.mlli_nents, NS_BIT);
 858                set_dout_mlli(&desc[idx], areq_ctx->dst.sram_addr,
 859                              areq_ctx->dst.mlli_nents, NS_BIT, 0);
 860                set_flow_mode(&desc[idx], flow_mode);
 861                break;
 862        case CC_DMA_BUF_NULL:
 863        default:
 864                dev_err(dev, "CIPHER: Invalid SRC/DST buffer type\n");
 865        }
 866
 867        *seq_size = (++idx);
 868}
 869
 870static void cc_proc_digest_desc(struct aead_request *req,
 871                                struct cc_hw_desc desc[],
 872                                unsigned int *seq_size)
 873{
 874        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 875        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 876        struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 877        unsigned int idx = *seq_size;
 878        unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
 879                                DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
 880        int direct = req_ctx->gen_ctx.op_type;
 881
 882        /* Get final ICV result */
 883        if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 884                hw_desc_init(&desc[idx]);
 885                set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 886                set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 887                set_dout_dlli(&desc[idx], req_ctx->icv_dma_addr, ctx->authsize,
 888                              NS_BIT, 1);
 889                set_queue_last_ind(ctx->drvdata, &desc[idx]);
 890                if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
 891                        set_aes_not_hash_mode(&desc[idx]);
 892                        set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
 893                } else {
 894                        set_cipher_config0(&desc[idx],
 895                                           HASH_DIGEST_RESULT_LITTLE_ENDIAN);
 896                        set_cipher_mode(&desc[idx], hash_mode);
 897                }
 898        } else { /*Decrypt*/
 899                /* Get ICV out from hardware */
 900                hw_desc_init(&desc[idx]);
 901                set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 902                set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 903                set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr,
 904                              ctx->authsize, NS_BIT, 1);
 905                set_queue_last_ind(ctx->drvdata, &desc[idx]);
 906                set_cipher_config0(&desc[idx],
 907                                   HASH_DIGEST_RESULT_LITTLE_ENDIAN);
 908                set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 909                if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
 910                        set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
 911                        set_aes_not_hash_mode(&desc[idx]);
 912                } else {
 913                        set_cipher_mode(&desc[idx], hash_mode);
 914                }
 915        }
 916
 917        *seq_size = (++idx);
 918}
 919
 920static void cc_set_cipher_desc(struct aead_request *req,
 921                               struct cc_hw_desc desc[],
 922                               unsigned int *seq_size)
 923{
 924        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 925        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 926        struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 927        unsigned int hw_iv_size = req_ctx->hw_iv_size;
 928        unsigned int idx = *seq_size;
 929        int direct = req_ctx->gen_ctx.op_type;
 930
 931        /* Setup cipher state */
 932        hw_desc_init(&desc[idx]);
 933        set_cipher_config0(&desc[idx], direct);
 934        set_flow_mode(&desc[idx], ctx->flow_mode);
 935        set_din_type(&desc[idx], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
 936                     hw_iv_size, NS_BIT);
 937        if (ctx->cipher_mode == DRV_CIPHER_CTR)
 938                set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
 939        else
 940                set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 941        set_cipher_mode(&desc[idx], ctx->cipher_mode);
 942        idx++;
 943
 944        /* Setup enc. key */
 945        hw_desc_init(&desc[idx]);
 946        set_cipher_config0(&desc[idx], direct);
 947        set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 948        set_flow_mode(&desc[idx], ctx->flow_mode);
 949        if (ctx->flow_mode == S_DIN_to_AES) {
 950                set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
 951                             ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
 952                              ctx->enc_keylen), NS_BIT);
 953                set_key_size_aes(&desc[idx], ctx->enc_keylen);
 954        } else {
 955                set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
 956                             ctx->enc_keylen, NS_BIT);
 957                set_key_size_des(&desc[idx], ctx->enc_keylen);
 958        }
 959        set_cipher_mode(&desc[idx], ctx->cipher_mode);
 960        idx++;
 961
 962        *seq_size = idx;
 963}
 964
 965static void cc_proc_cipher(struct aead_request *req, struct cc_hw_desc desc[],
 966                           unsigned int *seq_size, unsigned int data_flow_mode)
 967{
 968        struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 969        int direct = req_ctx->gen_ctx.op_type;
 970        unsigned int idx = *seq_size;
 971
 972        if (req_ctx->cryptlen == 0)
 973                return; /*null processing*/
 974
 975        cc_set_cipher_desc(req, desc, &idx);
 976        cc_proc_cipher_desc(req, data_flow_mode, desc, &idx);
 977        if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 978                /* We must wait for DMA to write all cipher */
 979                hw_desc_init(&desc[idx]);
 980                set_din_no_dma(&desc[idx], 0, 0xfffff0);
 981                set_dout_no_dma(&desc[idx], 0, 0, 1);
 982                idx++;
 983        }
 984
 985        *seq_size = idx;
 986}
 987
 988static void cc_set_hmac_desc(struct aead_request *req, struct cc_hw_desc desc[],
 989                             unsigned int *seq_size)
 990{
 991        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 992        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 993        unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
 994                                DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
 995        unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
 996                                CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
 997        unsigned int idx = *seq_size;
 998
 999        /* Loading hash ipad xor key state */
1000        hw_desc_init(&desc[idx]);
1001        set_cipher_mode(&desc[idx], hash_mode);
1002        set_din_type(&desc[idx], DMA_DLLI,
1003                     ctx->auth_state.hmac.ipad_opad_dma_addr, digest_size,
1004                     NS_BIT);
1005        set_flow_mode(&desc[idx], S_DIN_to_HASH);
1006        set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1007        idx++;
1008
1009        /* Load init. digest len (64 bytes) */
1010        hw_desc_init(&desc[idx]);
1011        set_cipher_mode(&desc[idx], hash_mode);
1012        set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
1013                     ctx->hash_len);
1014        set_flow_mode(&desc[idx], S_DIN_to_HASH);
1015        set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1016        idx++;
1017
1018        *seq_size = idx;
1019}
1020
1021static void cc_set_xcbc_desc(struct aead_request *req, struct cc_hw_desc desc[],
1022                             unsigned int *seq_size)
1023{
1024        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1025        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1026        unsigned int idx = *seq_size;
1027
1028        /* Loading MAC state */
1029        hw_desc_init(&desc[idx]);
1030        set_din_const(&desc[idx], 0, CC_AES_BLOCK_SIZE);
1031        set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1032        set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1033        set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1034        set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1035        set_flow_mode(&desc[idx], S_DIN_to_HASH);
1036        set_aes_not_hash_mode(&desc[idx]);
1037        idx++;
1038
1039        /* Setup XCBC MAC K1 */
1040        hw_desc_init(&desc[idx]);
1041        set_din_type(&desc[idx], DMA_DLLI,
1042                     ctx->auth_state.xcbc.xcbc_keys_dma_addr,
1043                     AES_KEYSIZE_128, NS_BIT);
1044        set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1045        set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1046        set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1047        set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1048        set_flow_mode(&desc[idx], S_DIN_to_HASH);
1049        set_aes_not_hash_mode(&desc[idx]);
1050        idx++;
1051
1052        /* Setup XCBC MAC K2 */
1053        hw_desc_init(&desc[idx]);
1054        set_din_type(&desc[idx], DMA_DLLI,
1055                     (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1056                      AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1057        set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1058        set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1059        set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1060        set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1061        set_flow_mode(&desc[idx], S_DIN_to_HASH);
1062        set_aes_not_hash_mode(&desc[idx]);
1063        idx++;
1064
1065        /* Setup XCBC MAC K3 */
1066        hw_desc_init(&desc[idx]);
1067        set_din_type(&desc[idx], DMA_DLLI,
1068                     (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1069                      2 * AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1070        set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
1071        set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1072        set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1073        set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1074        set_flow_mode(&desc[idx], S_DIN_to_HASH);
1075        set_aes_not_hash_mode(&desc[idx]);
1076        idx++;
1077
1078        *seq_size = idx;
1079}
1080
1081static void cc_proc_header_desc(struct aead_request *req,
1082                                struct cc_hw_desc desc[],
1083                                unsigned int *seq_size)
1084{
1085        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1086        unsigned int idx = *seq_size;
1087
1088        /* Hash associated data */
1089        if (areq_ctx->assoclen > 0)
1090                cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1091
1092        /* Hash IV */
1093        *seq_size = idx;
1094}
1095
1096static void cc_proc_scheme_desc(struct aead_request *req,
1097                                struct cc_hw_desc desc[],
1098                                unsigned int *seq_size)
1099{
1100        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1101        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1102        struct cc_aead_handle *aead_handle = ctx->drvdata->aead_handle;
1103        unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
1104                                DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
1105        unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
1106                                CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
1107        unsigned int idx = *seq_size;
1108
1109        hw_desc_init(&desc[idx]);
1110        set_cipher_mode(&desc[idx], hash_mode);
1111        set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1112                      ctx->hash_len);
1113        set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1114        set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
1115        set_cipher_do(&desc[idx], DO_PAD);
1116        idx++;
1117
1118        /* Get final ICV result */
1119        hw_desc_init(&desc[idx]);
1120        set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1121                      digest_size);
1122        set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1123        set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1124        set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1125        set_cipher_mode(&desc[idx], hash_mode);
1126        idx++;
1127
1128        /* Loading hash opad xor key state */
1129        hw_desc_init(&desc[idx]);
1130        set_cipher_mode(&desc[idx], hash_mode);
1131        set_din_type(&desc[idx], DMA_DLLI,
1132                     (ctx->auth_state.hmac.ipad_opad_dma_addr + digest_size),
1133                     digest_size, NS_BIT);
1134        set_flow_mode(&desc[idx], S_DIN_to_HASH);
1135        set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1136        idx++;
1137
1138        /* Load init. digest len (64 bytes) */
1139        hw_desc_init(&desc[idx]);
1140        set_cipher_mode(&desc[idx], hash_mode);
1141        set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
1142                     ctx->hash_len);
1143        set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1144        set_flow_mode(&desc[idx], S_DIN_to_HASH);
1145        set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1146        idx++;
1147
1148        /* Perform HASH update */
1149        hw_desc_init(&desc[idx]);
1150        set_din_sram(&desc[idx], aead_handle->sram_workspace_addr,
1151                     digest_size);
1152        set_flow_mode(&desc[idx], DIN_HASH);
1153        idx++;
1154
1155        *seq_size = idx;
1156}
1157
1158static void cc_mlli_to_sram(struct aead_request *req,
1159                            struct cc_hw_desc desc[], unsigned int *seq_size)
1160{
1161        struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1162        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1163        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1164        struct device *dev = drvdata_to_dev(ctx->drvdata);
1165
1166        if ((req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1167            req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
1168            !req_ctx->is_single_pass) && req_ctx->mlli_params.mlli_len) {
1169                dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
1170                        ctx->drvdata->mlli_sram_addr,
1171                        req_ctx->mlli_params.mlli_len);
1172                /* Copy MLLI table host-to-sram */
1173                hw_desc_init(&desc[*seq_size]);
1174                set_din_type(&desc[*seq_size], DMA_DLLI,
1175                             req_ctx->mlli_params.mlli_dma_addr,
1176                             req_ctx->mlli_params.mlli_len, NS_BIT);
1177                set_dout_sram(&desc[*seq_size],
1178                              ctx->drvdata->mlli_sram_addr,
1179                              req_ctx->mlli_params.mlli_len);
1180                set_flow_mode(&desc[*seq_size], BYPASS);
1181                (*seq_size)++;
1182        }
1183}
1184
1185static enum cc_flow_mode cc_get_data_flow(enum drv_crypto_direction direct,
1186                                          enum cc_flow_mode setup_flow_mode,
1187                                          bool is_single_pass)
1188{
1189        enum cc_flow_mode data_flow_mode;
1190
1191        if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1192                if (setup_flow_mode == S_DIN_to_AES)
1193                        data_flow_mode = is_single_pass ?
1194                                AES_to_HASH_and_DOUT : DIN_AES_DOUT;
1195                else
1196                        data_flow_mode = is_single_pass ?
1197                                DES_to_HASH_and_DOUT : DIN_DES_DOUT;
1198        } else { /* Decrypt */
1199                if (setup_flow_mode == S_DIN_to_AES)
1200                        data_flow_mode = is_single_pass ?
1201                                AES_and_HASH : DIN_AES_DOUT;
1202                else
1203                        data_flow_mode = is_single_pass ?
1204                                DES_and_HASH : DIN_DES_DOUT;
1205        }
1206
1207        return data_flow_mode;
1208}
1209
1210static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
1211                            unsigned int *seq_size)
1212{
1213        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1214        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1215        struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1216        int direct = req_ctx->gen_ctx.op_type;
1217        unsigned int data_flow_mode =
1218                cc_get_data_flow(direct, ctx->flow_mode,
1219                                 req_ctx->is_single_pass);
1220
1221        if (req_ctx->is_single_pass) {
1222                /*
1223                 * Single-pass flow
1224                 */
1225                cc_set_hmac_desc(req, desc, seq_size);
1226                cc_set_cipher_desc(req, desc, seq_size);
1227                cc_proc_header_desc(req, desc, seq_size);
1228                cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
1229                cc_proc_scheme_desc(req, desc, seq_size);
1230                cc_proc_digest_desc(req, desc, seq_size);
1231                return;
1232        }
1233
1234        /*
1235         * Double-pass flow
1236         * Fallback for unsupported single-pass modes,
1237         * i.e. using assoc. data of non-word-multiple
1238         */
1239        if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1240                /* encrypt first.. */
1241                cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1242                /* authenc after..*/
1243                cc_set_hmac_desc(req, desc, seq_size);
1244                cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1245                cc_proc_scheme_desc(req, desc, seq_size);
1246                cc_proc_digest_desc(req, desc, seq_size);
1247
1248        } else { /*DECRYPT*/
1249                /* authenc first..*/
1250                cc_set_hmac_desc(req, desc, seq_size);
1251                cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1252                cc_proc_scheme_desc(req, desc, seq_size);
1253                /* decrypt after.. */
1254                cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1255                /* read the digest result with setting the completion bit
1256                 * must be after the cipher operation
1257                 */
1258                cc_proc_digest_desc(req, desc, seq_size);
1259        }
1260}
1261
1262static void
1263cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
1264                unsigned int *seq_size)
1265{
1266        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1267        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1268        struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1269        int direct = req_ctx->gen_ctx.op_type;
1270        unsigned int data_flow_mode =
1271                cc_get_data_flow(direct, ctx->flow_mode,
1272                                 req_ctx->is_single_pass);
1273
1274        if (req_ctx->is_single_pass) {
1275                /*
1276                 * Single-pass flow
1277                 */
1278                cc_set_xcbc_desc(req, desc, seq_size);
1279                cc_set_cipher_desc(req, desc, seq_size);
1280                cc_proc_header_desc(req, desc, seq_size);
1281                cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
1282                cc_proc_digest_desc(req, desc, seq_size);
1283                return;
1284        }
1285
1286        /*
1287         * Double-pass flow
1288         * Fallback for unsupported single-pass modes,
1289         * i.e. using assoc. data of non-word-multiple
1290         */
1291        if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1292                /* encrypt first.. */
1293                cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1294                /* authenc after.. */
1295                cc_set_xcbc_desc(req, desc, seq_size);
1296                cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1297                cc_proc_digest_desc(req, desc, seq_size);
1298        } else { /*DECRYPT*/
1299                /* authenc first.. */
1300                cc_set_xcbc_desc(req, desc, seq_size);
1301                cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1302                /* decrypt after..*/
1303                cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1304                /* read the digest result with setting the completion bit
1305                 * must be after the cipher operation
1306                 */
1307                cc_proc_digest_desc(req, desc, seq_size);
1308        }
1309}
1310
1311static int validate_data_size(struct cc_aead_ctx *ctx,
1312                              enum drv_crypto_direction direct,
1313                              struct aead_request *req)
1314{
1315        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1316        struct device *dev = drvdata_to_dev(ctx->drvdata);
1317        unsigned int assoclen = areq_ctx->assoclen;
1318        unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
1319                        (req->cryptlen - ctx->authsize) : req->cryptlen;
1320
1321        if (direct == DRV_CRYPTO_DIRECTION_DECRYPT &&
1322            req->cryptlen < ctx->authsize)
1323                goto data_size_err;
1324
1325        areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
1326
1327        switch (ctx->flow_mode) {
1328        case S_DIN_to_AES:
1329                if (ctx->cipher_mode == DRV_CIPHER_CBC &&
1330                    !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE))
1331                        goto data_size_err;
1332                if (ctx->cipher_mode == DRV_CIPHER_CCM)
1333                        break;
1334                if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1335                        if (areq_ctx->plaintext_authenticate_only)
1336                                areq_ctx->is_single_pass = false;
1337                        break;
1338                }
1339
1340                if (!IS_ALIGNED(assoclen, sizeof(u32)))
1341                        areq_ctx->is_single_pass = false;
1342
1343                if (ctx->cipher_mode == DRV_CIPHER_CTR &&
1344                    !IS_ALIGNED(cipherlen, sizeof(u32)))
1345                        areq_ctx->is_single_pass = false;
1346
1347                break;
1348        case S_DIN_to_DES:
1349                if (!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE))
1350                        goto data_size_err;
1351                if (!IS_ALIGNED(assoclen, DES_BLOCK_SIZE))
1352                        areq_ctx->is_single_pass = false;
1353                break;
1354        default:
1355                dev_err(dev, "Unexpected flow mode (%d)\n", ctx->flow_mode);
1356                goto data_size_err;
1357        }
1358
1359        return 0;
1360
1361data_size_err:
1362        return -EINVAL;
1363}
1364
1365static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size)
1366{
1367        unsigned int len = 0;
1368
1369        if (header_size == 0)
1370                return 0;
1371
1372        if (header_size < ((1UL << 16) - (1UL << 8))) {
1373                len = 2;
1374
1375                pa0_buff[0] = (header_size >> 8) & 0xFF;
1376                pa0_buff[1] = header_size & 0xFF;
1377        } else {
1378                len = 6;
1379
1380                pa0_buff[0] = 0xFF;
1381                pa0_buff[1] = 0xFE;
1382                pa0_buff[2] = (header_size >> 24) & 0xFF;
1383                pa0_buff[3] = (header_size >> 16) & 0xFF;
1384                pa0_buff[4] = (header_size >> 8) & 0xFF;
1385                pa0_buff[5] = header_size & 0xFF;
1386        }
1387
1388        return len;
1389}
1390
1391static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
1392{
1393        __be32 data;
1394
1395        memset(block, 0, csize);
1396        block += csize;
1397
1398        if (csize >= 4)
1399                csize = 4;
1400        else if (msglen > (1 << (8 * csize)))
1401                return -EOVERFLOW;
1402
1403        data = cpu_to_be32(msglen);
1404        memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
1405
1406        return 0;
1407}
1408
1409static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
1410                  unsigned int *seq_size)
1411{
1412        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1413        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1414        struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1415        unsigned int idx = *seq_size;
1416        unsigned int cipher_flow_mode;
1417        dma_addr_t mac_result;
1418
1419        if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1420                cipher_flow_mode = AES_to_HASH_and_DOUT;
1421                mac_result = req_ctx->mac_buf_dma_addr;
1422        } else { /* Encrypt */
1423                cipher_flow_mode = AES_and_HASH;
1424                mac_result = req_ctx->icv_dma_addr;
1425        }
1426
1427        /* load key */
1428        hw_desc_init(&desc[idx]);
1429        set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1430        set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1431                     ((ctx->enc_keylen == 24) ?  CC_AES_KEY_SIZE_MAX :
1432                      ctx->enc_keylen), NS_BIT);
1433        set_key_size_aes(&desc[idx], ctx->enc_keylen);
1434        set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1435        set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1436        set_flow_mode(&desc[idx], S_DIN_to_AES);
1437        idx++;
1438
1439        /* load ctr state */
1440        hw_desc_init(&desc[idx]);
1441        set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1442        set_key_size_aes(&desc[idx], ctx->enc_keylen);
1443        set_din_type(&desc[idx], DMA_DLLI,
1444                     req_ctx->gen_ctx.iv_dma_addr, AES_BLOCK_SIZE, NS_BIT);
1445        set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1446        set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1447        set_flow_mode(&desc[idx], S_DIN_to_AES);
1448        idx++;
1449
1450        /* load MAC key */
1451        hw_desc_init(&desc[idx]);
1452        set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1453        set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1454                     ((ctx->enc_keylen == 24) ?  CC_AES_KEY_SIZE_MAX :
1455                      ctx->enc_keylen), NS_BIT);
1456        set_key_size_aes(&desc[idx], ctx->enc_keylen);
1457        set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1458        set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1459        set_flow_mode(&desc[idx], S_DIN_to_HASH);
1460        set_aes_not_hash_mode(&desc[idx]);
1461        idx++;
1462
1463        /* load MAC state */
1464        hw_desc_init(&desc[idx]);
1465        set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1466        set_key_size_aes(&desc[idx], ctx->enc_keylen);
1467        set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1468                     AES_BLOCK_SIZE, NS_BIT);
1469        set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1470        set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1471        set_flow_mode(&desc[idx], S_DIN_to_HASH);
1472        set_aes_not_hash_mode(&desc[idx]);
1473        idx++;
1474
1475        /* process assoc data */
1476        if (req_ctx->assoclen > 0) {
1477                cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1478        } else {
1479                hw_desc_init(&desc[idx]);
1480                set_din_type(&desc[idx], DMA_DLLI,
1481                             sg_dma_address(&req_ctx->ccm_adata_sg),
1482                             AES_BLOCK_SIZE + req_ctx->ccm_hdr_size, NS_BIT);
1483                set_flow_mode(&desc[idx], DIN_HASH);
1484                idx++;
1485        }
1486
1487        /* process the cipher */
1488        if (req_ctx->cryptlen)
1489                cc_proc_cipher_desc(req, cipher_flow_mode, desc, &idx);
1490
1491        /* Read temporal MAC */
1492        hw_desc_init(&desc[idx]);
1493        set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1494        set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, ctx->authsize,
1495                      NS_BIT, 0);
1496        set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1497        set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1498        set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1499        set_aes_not_hash_mode(&desc[idx]);
1500        idx++;
1501
1502        /* load AES-CTR state (for last MAC calculation)*/
1503        hw_desc_init(&desc[idx]);
1504        set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1505        set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1506        set_din_type(&desc[idx], DMA_DLLI, req_ctx->ccm_iv0_dma_addr,
1507                     AES_BLOCK_SIZE, NS_BIT);
1508        set_key_size_aes(&desc[idx], ctx->enc_keylen);
1509        set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1510        set_flow_mode(&desc[idx], S_DIN_to_AES);
1511        idx++;
1512
1513        hw_desc_init(&desc[idx]);
1514        set_din_no_dma(&desc[idx], 0, 0xfffff0);
1515        set_dout_no_dma(&desc[idx], 0, 0, 1);
1516        idx++;
1517
1518        /* encrypt the "T" value and store MAC in mac_state */
1519        hw_desc_init(&desc[idx]);
1520        set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1521                     ctx->authsize, NS_BIT);
1522        set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1523        set_queue_last_ind(ctx->drvdata, &desc[idx]);
1524        set_flow_mode(&desc[idx], DIN_AES_DOUT);
1525        idx++;
1526
1527        *seq_size = idx;
1528        return 0;
1529}
1530
1531static int config_ccm_adata(struct aead_request *req)
1532{
1533        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1534        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1535        struct device *dev = drvdata_to_dev(ctx->drvdata);
1536        struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1537        //unsigned int size_of_a = 0, rem_a_size = 0;
1538        unsigned int lp = req->iv[0];
1539        /* Note: The code assume that req->iv[0] already contains the value
1540         * of L' of RFC3610
1541         */
1542        unsigned int l = lp + 1;  /* This is L' of RFC 3610. */
1543        unsigned int m = ctx->authsize;  /* This is M' of RFC 3610. */
1544        u8 *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
1545        u8 *a0 = req_ctx->ccm_config + CCM_A0_OFFSET;
1546        u8 *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
1547        unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1548                                 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1549                                req->cryptlen :
1550                                (req->cryptlen - ctx->authsize);
1551        int rc;
1552
1553        memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1554        memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE * 3);
1555
1556        /* taken from crypto/ccm.c */
1557        /* 2 <= L <= 8, so 1 <= L' <= 7. */
1558        if (l < 2 || l > 8) {
1559                dev_dbg(dev, "illegal iv value %X\n", req->iv[0]);
1560                return -EINVAL;
1561        }
1562        memcpy(b0, req->iv, AES_BLOCK_SIZE);
1563
1564        /* format control info per RFC 3610 and
1565         * NIST Special Publication 800-38C
1566         */
1567        *b0 |= (8 * ((m - 2) / 2));
1568        if (req_ctx->assoclen > 0)
1569                *b0 |= 64;  /* Enable bit 6 if Adata exists. */
1570
1571        rc = set_msg_len(b0 + 16 - l, cryptlen, l);  /* Write L'. */
1572        if (rc) {
1573                dev_err(dev, "message len overflow detected");
1574                return rc;
1575        }
1576         /* END of "taken from crypto/ccm.c" */
1577
1578        /* l(a) - size of associated data. */
1579        req_ctx->ccm_hdr_size = format_ccm_a0(a0, req_ctx->assoclen);
1580
1581        memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
1582        req->iv[15] = 1;
1583
1584        memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE);
1585        ctr_count_0[15] = 0;
1586
1587        return 0;
1588}
1589
1590static void cc_proc_rfc4309_ccm(struct aead_request *req)
1591{
1592        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1593        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1594        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1595
1596        /* L' */
1597        memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE);
1598        /* For RFC 4309, always use 4 bytes for message length
1599         * (at most 2^32-1 bytes).
1600         */
1601        areq_ctx->ctr_iv[0] = 3;
1602
1603        /* In RFC 4309 there is an 11-bytes nonce+IV part,
1604         * that we build here.
1605         */
1606        memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce,
1607               CCM_BLOCK_NONCE_SIZE);
1608        memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
1609               CCM_BLOCK_IV_SIZE);
1610        req->iv = areq_ctx->ctr_iv;
1611}
1612
1613static void cc_set_ghash_desc(struct aead_request *req,
1614                              struct cc_hw_desc desc[], unsigned int *seq_size)
1615{
1616        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1617        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1618        struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1619        unsigned int idx = *seq_size;
1620
1621        /* load key to AES*/
1622        hw_desc_init(&desc[idx]);
1623        set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1624        set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1625        set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1626                     ctx->enc_keylen, NS_BIT);
1627        set_key_size_aes(&desc[idx], ctx->enc_keylen);
1628        set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1629        set_flow_mode(&desc[idx], S_DIN_to_AES);
1630        idx++;
1631
1632        /* process one zero block to generate hkey */
1633        hw_desc_init(&desc[idx]);
1634        set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1635        set_dout_dlli(&desc[idx], req_ctx->hkey_dma_addr, AES_BLOCK_SIZE,
1636                      NS_BIT, 0);
1637        set_flow_mode(&desc[idx], DIN_AES_DOUT);
1638        idx++;
1639
1640        /* Memory Barrier */
1641        hw_desc_init(&desc[idx]);
1642        set_din_no_dma(&desc[idx], 0, 0xfffff0);
1643        set_dout_no_dma(&desc[idx], 0, 0, 1);
1644        idx++;
1645
1646        /* Load GHASH subkey */
1647        hw_desc_init(&desc[idx]);
1648        set_din_type(&desc[idx], DMA_DLLI, req_ctx->hkey_dma_addr,
1649                     AES_BLOCK_SIZE, NS_BIT);
1650        set_dout_no_dma(&desc[idx], 0, 0, 1);
1651        set_flow_mode(&desc[idx], S_DIN_to_HASH);
1652        set_aes_not_hash_mode(&desc[idx]);
1653        set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1654        set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1655        set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1656        idx++;
1657
1658        /* Configure Hash Engine to work with GHASH.
1659         * Since it was not possible to extend HASH submodes to add GHASH,
1660         * The following command is necessary in order to
1661         * select GHASH (according to HW designers)
1662         */
1663        hw_desc_init(&desc[idx]);
1664        set_din_no_dma(&desc[idx], 0, 0xfffff0);
1665        set_dout_no_dma(&desc[idx], 0, 0, 1);
1666        set_flow_mode(&desc[idx], S_DIN_to_HASH);
1667        set_aes_not_hash_mode(&desc[idx]);
1668        set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1669        set_cipher_do(&desc[idx], 1); //1=AES_SK RKEK
1670        set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1671        set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1672        set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1673        idx++;
1674
1675        /* Load GHASH initial STATE (which is 0). (for any hash there is an
1676         * initial state)
1677         */
1678        hw_desc_init(&desc[idx]);
1679        set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1680        set_dout_no_dma(&desc[idx], 0, 0, 1);
1681        set_flow_mode(&desc[idx], S_DIN_to_HASH);
1682        set_aes_not_hash_mode(&desc[idx]);
1683        set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1684        set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1685        set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1686        idx++;
1687
1688        *seq_size = idx;
1689}
1690
1691static void cc_set_gctr_desc(struct aead_request *req, struct cc_hw_desc desc[],
1692                             unsigned int *seq_size)
1693{
1694        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1695        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1696        struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1697        unsigned int idx = *seq_size;
1698
1699        /* load key to AES*/
1700        hw_desc_init(&desc[idx]);
1701        set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1702        set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1703        set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1704                     ctx->enc_keylen, NS_BIT);
1705        set_key_size_aes(&desc[idx], ctx->enc_keylen);
1706        set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1707        set_flow_mode(&desc[idx], S_DIN_to_AES);
1708        idx++;
1709
1710        if (req_ctx->cryptlen && !req_ctx->plaintext_authenticate_only) {
1711                /* load AES/CTR initial CTR value inc by 2*/
1712                hw_desc_init(&desc[idx]);
1713                set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1714                set_key_size_aes(&desc[idx], ctx->enc_keylen);
1715                set_din_type(&desc[idx], DMA_DLLI,
1716                             req_ctx->gcm_iv_inc2_dma_addr, AES_BLOCK_SIZE,
1717                             NS_BIT);
1718                set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1719                set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1720                set_flow_mode(&desc[idx], S_DIN_to_AES);
1721                idx++;
1722        }
1723
1724        *seq_size = idx;
1725}
1726
1727static void cc_proc_gcm_result(struct aead_request *req,
1728                               struct cc_hw_desc desc[],
1729                               unsigned int *seq_size)
1730{
1731        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1732        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1733        struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1734        dma_addr_t mac_result;
1735        unsigned int idx = *seq_size;
1736
1737        if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1738                mac_result = req_ctx->mac_buf_dma_addr;
1739        } else { /* Encrypt */
1740                mac_result = req_ctx->icv_dma_addr;
1741        }
1742
1743        /* process(ghash) gcm_block_len */
1744        hw_desc_init(&desc[idx]);
1745        set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_block_len_dma_addr,
1746                     AES_BLOCK_SIZE, NS_BIT);
1747        set_flow_mode(&desc[idx], DIN_HASH);
1748        idx++;
1749
1750        /* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
1751        hw_desc_init(&desc[idx]);
1752        set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1753        set_din_no_dma(&desc[idx], 0, 0xfffff0);
1754        set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, AES_BLOCK_SIZE,
1755                      NS_BIT, 0);
1756        set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1757        set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1758        set_aes_not_hash_mode(&desc[idx]);
1759
1760        idx++;
1761
1762        /* load AES/CTR initial CTR value inc by 1*/
1763        hw_desc_init(&desc[idx]);
1764        set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1765        set_key_size_aes(&desc[idx], ctx->enc_keylen);
1766        set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_iv_inc1_dma_addr,
1767                     AES_BLOCK_SIZE, NS_BIT);
1768        set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1769        set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1770        set_flow_mode(&desc[idx], S_DIN_to_AES);
1771        idx++;
1772
1773        /* Memory Barrier */
1774        hw_desc_init(&desc[idx]);
1775        set_din_no_dma(&desc[idx], 0, 0xfffff0);
1776        set_dout_no_dma(&desc[idx], 0, 0, 1);
1777        idx++;
1778
1779        /* process GCTR on stored GHASH and store MAC in mac_state*/
1780        hw_desc_init(&desc[idx]);
1781        set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1782        set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1783                     AES_BLOCK_SIZE, NS_BIT);
1784        set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1785        set_queue_last_ind(ctx->drvdata, &desc[idx]);
1786        set_flow_mode(&desc[idx], DIN_AES_DOUT);
1787        idx++;
1788
1789        *seq_size = idx;
1790}
1791
1792static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
1793                  unsigned int *seq_size)
1794{
1795        struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1796        unsigned int cipher_flow_mode;
1797
1798        //in RFC4543 no data to encrypt. just copy data from src to dest.
1799        if (req_ctx->plaintext_authenticate_only) {
1800                cc_proc_cipher_desc(req, BYPASS, desc, seq_size);
1801                cc_set_ghash_desc(req, desc, seq_size);
1802                /* process(ghash) assoc data */
1803                cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1804                cc_set_gctr_desc(req, desc, seq_size);
1805                cc_proc_gcm_result(req, desc, seq_size);
1806                return 0;
1807        }
1808
1809        if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1810                cipher_flow_mode = AES_and_HASH;
1811        } else { /* Encrypt */
1812                cipher_flow_mode = AES_to_HASH_and_DOUT;
1813        }
1814
1815        // for gcm and rfc4106.
1816        cc_set_ghash_desc(req, desc, seq_size);
1817        /* process(ghash) assoc data */
1818        if (req_ctx->assoclen > 0)
1819                cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1820        cc_set_gctr_desc(req, desc, seq_size);
1821        /* process(gctr+ghash) */
1822        if (req_ctx->cryptlen)
1823                cc_proc_cipher_desc(req, cipher_flow_mode, desc, seq_size);
1824        cc_proc_gcm_result(req, desc, seq_size);
1825
1826        return 0;
1827}
1828
1829static int config_gcm_context(struct aead_request *req)
1830{
1831        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1832        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1833        struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1834        struct device *dev = drvdata_to_dev(ctx->drvdata);
1835
1836        unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1837                                 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1838                                req->cryptlen :
1839                                (req->cryptlen - ctx->authsize);
1840        __be32 counter = cpu_to_be32(2);
1841
1842        dev_dbg(dev, "%s() cryptlen = %d, req_ctx->assoclen = %d ctx->authsize = %d\n",
1843                __func__, cryptlen, req_ctx->assoclen, ctx->authsize);
1844
1845        memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
1846
1847        memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1848
1849        memcpy(req->iv + 12, &counter, 4);
1850        memcpy(req_ctx->gcm_iv_inc2, req->iv, 16);
1851
1852        counter = cpu_to_be32(1);
1853        memcpy(req->iv + 12, &counter, 4);
1854        memcpy(req_ctx->gcm_iv_inc1, req->iv, 16);
1855
1856        if (!req_ctx->plaintext_authenticate_only) {
1857                __be64 temp64;
1858
1859                temp64 = cpu_to_be64(req_ctx->assoclen * 8);
1860                memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1861                temp64 = cpu_to_be64(cryptlen * 8);
1862                memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1863        } else {
1864                /* rfc4543=>  all data(AAD,IV,Plain) are considered additional
1865                 * data that is nothing is encrypted.
1866                 */
1867                __be64 temp64;
1868
1869                temp64 = cpu_to_be64((req_ctx->assoclen + cryptlen) * 8);
1870                memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1871                temp64 = 0;
1872                memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1873        }
1874
1875        return 0;
1876}
1877
1878static void cc_proc_rfc4_gcm(struct aead_request *req)
1879{
1880        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1881        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1882        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1883
1884        memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET,
1885               ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
1886        memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
1887               GCM_BLOCK_RFC4_IV_SIZE);
1888        req->iv = areq_ctx->ctr_iv;
1889}
1890
1891static int cc_proc_aead(struct aead_request *req,
1892                        enum drv_crypto_direction direct)
1893{
1894        int rc = 0;
1895        int seq_len = 0;
1896        struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ];
1897        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1898        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1899        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1900        struct device *dev = drvdata_to_dev(ctx->drvdata);
1901        struct cc_crypto_req cc_req = {};
1902
1903        dev_dbg(dev, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
1904                ((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Enc" : "Dec"),
1905                ctx, req, req->iv, sg_virt(req->src), req->src->offset,
1906                sg_virt(req->dst), req->dst->offset, req->cryptlen);
1907
1908        /* STAT_PHASE_0: Init and sanity checks */
1909
1910        /* Check data length according to mode */
1911        if (validate_data_size(ctx, direct, req)) {
1912                dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
1913                        req->cryptlen, areq_ctx->assoclen);
1914                return -EINVAL;
1915        }
1916
1917        /* Setup request structure */
1918        cc_req.user_cb = cc_aead_complete;
1919        cc_req.user_arg = req;
1920
1921        /* Setup request context */
1922        areq_ctx->gen_ctx.op_type = direct;
1923        areq_ctx->req_authsize = ctx->authsize;
1924        areq_ctx->cipher_mode = ctx->cipher_mode;
1925
1926        /* STAT_PHASE_1: Map buffers */
1927
1928        if (ctx->cipher_mode == DRV_CIPHER_CTR) {
1929                /* Build CTR IV - Copy nonce from last 4 bytes in
1930                 * CTR key to first 4 bytes in CTR IV
1931                 */
1932                memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce,
1933                       CTR_RFC3686_NONCE_SIZE);
1934                memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, req->iv,
1935                       CTR_RFC3686_IV_SIZE);
1936                /* Initialize counter portion of counter block */
1937                *(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
1938                            CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1939
1940                /* Replace with counter iv */
1941                req->iv = areq_ctx->ctr_iv;
1942                areq_ctx->hw_iv_size = CTR_RFC3686_BLOCK_SIZE;
1943        } else if ((ctx->cipher_mode == DRV_CIPHER_CCM) ||
1944                   (ctx->cipher_mode == DRV_CIPHER_GCTR)) {
1945                areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
1946                if (areq_ctx->ctr_iv != req->iv) {
1947                        memcpy(areq_ctx->ctr_iv, req->iv,
1948                               crypto_aead_ivsize(tfm));
1949                        req->iv = areq_ctx->ctr_iv;
1950                }
1951        }  else {
1952                areq_ctx->hw_iv_size = crypto_aead_ivsize(tfm);
1953        }
1954
1955        if (ctx->cipher_mode == DRV_CIPHER_CCM) {
1956                rc = config_ccm_adata(req);
1957                if (rc) {
1958                        dev_dbg(dev, "config_ccm_adata() returned with a failure %d!",
1959                                rc);
1960                        goto exit;
1961                }
1962        } else {
1963                areq_ctx->ccm_hdr_size = ccm_header_size_null;
1964        }
1965
1966        if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1967                rc = config_gcm_context(req);
1968                if (rc) {
1969                        dev_dbg(dev, "config_gcm_context() returned with a failure %d!",
1970                                rc);
1971                        goto exit;
1972                }
1973        }
1974
1975        rc = cc_map_aead_request(ctx->drvdata, req);
1976        if (rc) {
1977                dev_err(dev, "map_request() failed\n");
1978                goto exit;
1979        }
1980
1981        /* STAT_PHASE_2: Create sequence */
1982
1983        /* Load MLLI tables to SRAM if necessary */
1984        cc_mlli_to_sram(req, desc, &seq_len);
1985
1986        switch (ctx->auth_mode) {
1987        case DRV_HASH_SHA1:
1988        case DRV_HASH_SHA256:
1989                cc_hmac_authenc(req, desc, &seq_len);
1990                break;
1991        case DRV_HASH_XCBC_MAC:
1992                cc_xcbc_authenc(req, desc, &seq_len);
1993                break;
1994        case DRV_HASH_NULL:
1995                if (ctx->cipher_mode == DRV_CIPHER_CCM)
1996                        cc_ccm(req, desc, &seq_len);
1997                if (ctx->cipher_mode == DRV_CIPHER_GCTR)
1998                        cc_gcm(req, desc, &seq_len);
1999                break;
2000        default:
2001                dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
2002                cc_unmap_aead_request(dev, req);
2003                rc = -ENOTSUPP;
2004                goto exit;
2005        }
2006
2007        /* STAT_PHASE_3: Lock HW and push sequence */
2008
2009        rc = cc_send_request(ctx->drvdata, &cc_req, desc, seq_len, &req->base);
2010
2011        if (rc != -EINPROGRESS && rc != -EBUSY) {
2012                dev_err(dev, "send_request() failed (rc=%d)\n", rc);
2013                cc_unmap_aead_request(dev, req);
2014        }
2015
2016exit:
2017        return rc;
2018}
2019
2020static int cc_aead_encrypt(struct aead_request *req)
2021{
2022        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2023        int rc;
2024
2025        memset(areq_ctx, 0, sizeof(*areq_ctx));
2026
2027        /* No generated IV required */
2028        areq_ctx->backup_iv = req->iv;
2029        areq_ctx->assoclen = req->assoclen;
2030
2031        rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2032        if (rc != -EINPROGRESS && rc != -EBUSY)
2033                req->iv = areq_ctx->backup_iv;
2034
2035        return rc;
2036}
2037
2038static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
2039{
2040        /* Very similar to cc_aead_encrypt() above. */
2041
2042        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2043        int rc;
2044
2045        rc = crypto_ipsec_check_assoclen(req->assoclen);
2046        if (rc)
2047                goto out;
2048
2049        memset(areq_ctx, 0, sizeof(*areq_ctx));
2050
2051        /* No generated IV required */
2052        areq_ctx->backup_iv = req->iv;
2053        areq_ctx->assoclen = req->assoclen - CCM_BLOCK_IV_SIZE;
2054
2055        cc_proc_rfc4309_ccm(req);
2056
2057        rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2058        if (rc != -EINPROGRESS && rc != -EBUSY)
2059                req->iv = areq_ctx->backup_iv;
2060out:
2061        return rc;
2062}
2063
2064static int cc_aead_decrypt(struct aead_request *req)
2065{
2066        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2067        int rc;
2068
2069        memset(areq_ctx, 0, sizeof(*areq_ctx));
2070
2071        /* No generated IV required */
2072        areq_ctx->backup_iv = req->iv;
2073        areq_ctx->assoclen = req->assoclen;
2074
2075        rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2076        if (rc != -EINPROGRESS && rc != -EBUSY)
2077                req->iv = areq_ctx->backup_iv;
2078
2079        return rc;
2080}
2081
2082static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
2083{
2084        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2085        int rc;
2086
2087        rc = crypto_ipsec_check_assoclen(req->assoclen);
2088        if (rc)
2089                goto out;
2090
2091        memset(areq_ctx, 0, sizeof(*areq_ctx));
2092
2093        /* No generated IV required */
2094        areq_ctx->backup_iv = req->iv;
2095        areq_ctx->assoclen = req->assoclen - CCM_BLOCK_IV_SIZE;
2096
2097        cc_proc_rfc4309_ccm(req);
2098
2099        rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2100        if (rc != -EINPROGRESS && rc != -EBUSY)
2101                req->iv = areq_ctx->backup_iv;
2102
2103out:
2104        return rc;
2105}
2106
2107static int cc_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
2108                                 unsigned int keylen)
2109{
2110        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2111        struct device *dev = drvdata_to_dev(ctx->drvdata);
2112
2113        dev_dbg(dev, "%s()  keylen %d, key %p\n", __func__, keylen, key);
2114
2115        if (keylen < 4)
2116                return -EINVAL;
2117
2118        keylen -= 4;
2119        memcpy(ctx->ctr_nonce, key + keylen, 4);
2120
2121        return cc_aead_setkey(tfm, key, keylen);
2122}
2123
2124static int cc_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
2125                                 unsigned int keylen)
2126{
2127        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2128        struct device *dev = drvdata_to_dev(ctx->drvdata);
2129
2130        dev_dbg(dev, "%s()  keylen %d, key %p\n", __func__, keylen, key);
2131
2132        if (keylen < 4)
2133                return -EINVAL;
2134
2135        keylen -= 4;
2136        memcpy(ctx->ctr_nonce, key + keylen, 4);
2137
2138        return cc_aead_setkey(tfm, key, keylen);
2139}
2140
2141static int cc_gcm_setauthsize(struct crypto_aead *authenc,
2142                              unsigned int authsize)
2143{
2144        switch (authsize) {
2145        case 4:
2146        case 8:
2147        case 12:
2148        case 13:
2149        case 14:
2150        case 15:
2151        case 16:
2152                break;
2153        default:
2154                return -EINVAL;
2155        }
2156
2157        return cc_aead_setauthsize(authenc, authsize);
2158}
2159
2160static int cc_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
2161                                      unsigned int authsize)
2162{
2163        struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
2164        struct device *dev = drvdata_to_dev(ctx->drvdata);
2165
2166        dev_dbg(dev, "authsize %d\n", authsize);
2167
2168        switch (authsize) {
2169        case 8:
2170        case 12:
2171        case 16:
2172                break;
2173        default:
2174                return -EINVAL;
2175        }
2176
2177        return cc_aead_setauthsize(authenc, authsize);
2178}
2179
2180static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
2181                                      unsigned int authsize)
2182{
2183        struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
2184        struct device *dev = drvdata_to_dev(ctx->drvdata);
2185
2186        dev_dbg(dev, "authsize %d\n", authsize);
2187
2188        if (authsize != 16)
2189                return -EINVAL;
2190
2191        return cc_aead_setauthsize(authenc, authsize);
2192}
2193
2194static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
2195{
2196        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2197        int rc;
2198
2199        rc = crypto_ipsec_check_assoclen(req->assoclen);
2200        if (rc)
2201                goto out;
2202
2203        memset(areq_ctx, 0, sizeof(*areq_ctx));
2204
2205        /* No generated IV required */
2206        areq_ctx->backup_iv = req->iv;
2207        areq_ctx->assoclen = req->assoclen - GCM_BLOCK_RFC4_IV_SIZE;
2208
2209        cc_proc_rfc4_gcm(req);
2210
2211        rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2212        if (rc != -EINPROGRESS && rc != -EBUSY)
2213                req->iv = areq_ctx->backup_iv;
2214out:
2215        return rc;
2216}
2217
2218static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
2219{
2220        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2221        int rc;
2222
2223        rc = crypto_ipsec_check_assoclen(req->assoclen);
2224        if (rc)
2225                goto out;
2226
2227        memset(areq_ctx, 0, sizeof(*areq_ctx));
2228
2229        //plaintext is not encryped with rfc4543
2230        areq_ctx->plaintext_authenticate_only = true;
2231
2232        /* No generated IV required */
2233        areq_ctx->backup_iv = req->iv;
2234        areq_ctx->assoclen = req->assoclen;
2235
2236        cc_proc_rfc4_gcm(req);
2237
2238        rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2239        if (rc != -EINPROGRESS && rc != -EBUSY)
2240                req->iv = areq_ctx->backup_iv;
2241out:
2242        return rc;
2243}
2244
2245static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
2246{
2247        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2248        int rc;
2249
2250        rc = crypto_ipsec_check_assoclen(req->assoclen);
2251        if (rc)
2252                goto out;
2253
2254        memset(areq_ctx, 0, sizeof(*areq_ctx));
2255
2256        /* No generated IV required */
2257        areq_ctx->backup_iv = req->iv;
2258        areq_ctx->assoclen = req->assoclen - GCM_BLOCK_RFC4_IV_SIZE;
2259
2260        cc_proc_rfc4_gcm(req);
2261
2262        rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2263        if (rc != -EINPROGRESS && rc != -EBUSY)
2264                req->iv = areq_ctx->backup_iv;
2265out:
2266        return rc;
2267}
2268
2269static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
2270{
2271        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2272        int rc;
2273
2274        rc = crypto_ipsec_check_assoclen(req->assoclen);
2275        if (rc)
2276                goto out;
2277
2278        memset(areq_ctx, 0, sizeof(*areq_ctx));
2279
2280        //plaintext is not decryped with rfc4543
2281        areq_ctx->plaintext_authenticate_only = true;
2282
2283        /* No generated IV required */
2284        areq_ctx->backup_iv = req->iv;
2285        areq_ctx->assoclen = req->assoclen;
2286
2287        cc_proc_rfc4_gcm(req);
2288
2289        rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2290        if (rc != -EINPROGRESS && rc != -EBUSY)
2291                req->iv = areq_ctx->backup_iv;
2292out:
2293        return rc;
2294}
2295
2296/* aead alg */
2297static struct cc_alg_template aead_algs[] = {
2298        {
2299                .name = "authenc(hmac(sha1),cbc(aes))",
2300                .driver_name = "authenc-hmac-sha1-cbc-aes-ccree",
2301                .blocksize = AES_BLOCK_SIZE,
2302                .template_aead = {
2303                        .setkey = cc_aead_setkey,
2304                        .setauthsize = cc_aead_setauthsize,
2305                        .encrypt = cc_aead_encrypt,
2306                        .decrypt = cc_aead_decrypt,
2307                        .init = cc_aead_init,
2308                        .exit = cc_aead_exit,
2309                        .ivsize = AES_BLOCK_SIZE,
2310                        .maxauthsize = SHA1_DIGEST_SIZE,
2311                },
2312                .cipher_mode = DRV_CIPHER_CBC,
2313                .flow_mode = S_DIN_to_AES,
2314                .auth_mode = DRV_HASH_SHA1,
2315                .min_hw_rev = CC_HW_REV_630,
2316                .std_body = CC_STD_NIST,
2317        },
2318        {
2319                .name = "authenc(hmac(sha1),cbc(des3_ede))",
2320                .driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
2321                .blocksize = DES3_EDE_BLOCK_SIZE,
2322                .template_aead = {
2323                        .setkey = cc_des3_aead_setkey,
2324                        .setauthsize = cc_aead_setauthsize,
2325                        .encrypt = cc_aead_encrypt,
2326                        .decrypt = cc_aead_decrypt,
2327                        .init = cc_aead_init,
2328                        .exit = cc_aead_exit,
2329                        .ivsize = DES3_EDE_BLOCK_SIZE,
2330                        .maxauthsize = SHA1_DIGEST_SIZE,
2331                },
2332                .cipher_mode = DRV_CIPHER_CBC,
2333                .flow_mode = S_DIN_to_DES,
2334                .auth_mode = DRV_HASH_SHA1,
2335                .min_hw_rev = CC_HW_REV_630,
2336                .std_body = CC_STD_NIST,
2337        },
2338        {
2339                .name = "authenc(hmac(sha256),cbc(aes))",
2340                .driver_name = "authenc-hmac-sha256-cbc-aes-ccree",
2341                .blocksize = AES_BLOCK_SIZE,
2342                .template_aead = {
2343                        .setkey = cc_aead_setkey,
2344                        .setauthsize = cc_aead_setauthsize,
2345                        .encrypt = cc_aead_encrypt,
2346                        .decrypt = cc_aead_decrypt,
2347                        .init = cc_aead_init,
2348                        .exit = cc_aead_exit,
2349                        .ivsize = AES_BLOCK_SIZE,
2350                        .maxauthsize = SHA256_DIGEST_SIZE,
2351                },
2352                .cipher_mode = DRV_CIPHER_CBC,
2353                .flow_mode = S_DIN_to_AES,
2354                .auth_mode = DRV_HASH_SHA256,
2355                .min_hw_rev = CC_HW_REV_630,
2356                .std_body = CC_STD_NIST,
2357        },
2358        {
2359                .name = "authenc(hmac(sha256),cbc(des3_ede))",
2360                .driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
2361                .blocksize = DES3_EDE_BLOCK_SIZE,
2362                .template_aead = {
2363                        .setkey = cc_des3_aead_setkey,
2364                        .setauthsize = cc_aead_setauthsize,
2365                        .encrypt = cc_aead_encrypt,
2366                        .decrypt = cc_aead_decrypt,
2367                        .init = cc_aead_init,
2368                        .exit = cc_aead_exit,
2369                        .ivsize = DES3_EDE_BLOCK_SIZE,
2370                        .maxauthsize = SHA256_DIGEST_SIZE,
2371                },
2372                .cipher_mode = DRV_CIPHER_CBC,
2373                .flow_mode = S_DIN_to_DES,
2374                .auth_mode = DRV_HASH_SHA256,
2375                .min_hw_rev = CC_HW_REV_630,
2376                .std_body = CC_STD_NIST,
2377        },
2378        {
2379                .name = "authenc(xcbc(aes),cbc(aes))",
2380                .driver_name = "authenc-xcbc-aes-cbc-aes-ccree",
2381                .blocksize = AES_BLOCK_SIZE,
2382                .template_aead = {
2383                        .setkey = cc_aead_setkey,
2384                        .setauthsize = cc_aead_setauthsize,
2385                        .encrypt = cc_aead_encrypt,
2386                        .decrypt = cc_aead_decrypt,
2387                        .init = cc_aead_init,
2388                        .exit = cc_aead_exit,
2389                        .ivsize = AES_BLOCK_SIZE,
2390                        .maxauthsize = AES_BLOCK_SIZE,
2391                },
2392                .cipher_mode = DRV_CIPHER_CBC,
2393                .flow_mode = S_DIN_to_AES,
2394                .auth_mode = DRV_HASH_XCBC_MAC,
2395                .min_hw_rev = CC_HW_REV_630,
2396                .std_body = CC_STD_NIST,
2397        },
2398        {
2399                .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
2400                .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-ccree",
2401                .blocksize = 1,
2402                .template_aead = {
2403                        .setkey = cc_aead_setkey,
2404                        .setauthsize = cc_aead_setauthsize,
2405                        .encrypt = cc_aead_encrypt,
2406                        .decrypt = cc_aead_decrypt,
2407                        .init = cc_aead_init,
2408                        .exit = cc_aead_exit,
2409                        .ivsize = CTR_RFC3686_IV_SIZE,
2410                        .maxauthsize = SHA1_DIGEST_SIZE,
2411                },
2412                .cipher_mode = DRV_CIPHER_CTR,
2413                .flow_mode = S_DIN_to_AES,
2414                .auth_mode = DRV_HASH_SHA1,
2415                .min_hw_rev = CC_HW_REV_630,
2416                .std_body = CC_STD_NIST,
2417        },
2418        {
2419                .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
2420                .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-ccree",
2421                .blocksize = 1,
2422                .template_aead = {
2423                        .setkey = cc_aead_setkey,
2424                        .setauthsize = cc_aead_setauthsize,
2425                        .encrypt = cc_aead_encrypt,
2426                        .decrypt = cc_aead_decrypt,
2427                        .init = cc_aead_init,
2428                        .exit = cc_aead_exit,
2429                        .ivsize = CTR_RFC3686_IV_SIZE,
2430                        .maxauthsize = SHA256_DIGEST_SIZE,
2431                },
2432                .cipher_mode = DRV_CIPHER_CTR,
2433                .flow_mode = S_DIN_to_AES,
2434                .auth_mode = DRV_HASH_SHA256,
2435                .min_hw_rev = CC_HW_REV_630,
2436                .std_body = CC_STD_NIST,
2437        },
2438        {
2439                .name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
2440                .driver_name = "authenc-xcbc-aes-rfc3686-ctr-aes-ccree",
2441                .blocksize = 1,
2442                .template_aead = {
2443                        .setkey = cc_aead_setkey,
2444                        .setauthsize = cc_aead_setauthsize,
2445                        .encrypt = cc_aead_encrypt,
2446                        .decrypt = cc_aead_decrypt,
2447                        .init = cc_aead_init,
2448                        .exit = cc_aead_exit,
2449                        .ivsize = CTR_RFC3686_IV_SIZE,
2450                        .maxauthsize = AES_BLOCK_SIZE,
2451                },
2452                .cipher_mode = DRV_CIPHER_CTR,
2453                .flow_mode = S_DIN_to_AES,
2454                .auth_mode = DRV_HASH_XCBC_MAC,
2455                .min_hw_rev = CC_HW_REV_630,
2456                .std_body = CC_STD_NIST,
2457        },
2458        {
2459                .name = "ccm(aes)",
2460                .driver_name = "ccm-aes-ccree",
2461                .blocksize = 1,
2462                .template_aead = {
2463                        .setkey = cc_aead_setkey,
2464                        .setauthsize = cc_ccm_setauthsize,
2465                        .encrypt = cc_aead_encrypt,
2466                        .decrypt = cc_aead_decrypt,
2467                        .init = cc_aead_init,
2468                        .exit = cc_aead_exit,
2469                        .ivsize = AES_BLOCK_SIZE,
2470                        .maxauthsize = AES_BLOCK_SIZE,
2471                },
2472                .cipher_mode = DRV_CIPHER_CCM,
2473                .flow_mode = S_DIN_to_AES,
2474                .auth_mode = DRV_HASH_NULL,
2475                .min_hw_rev = CC_HW_REV_630,
2476                .std_body = CC_STD_NIST,
2477        },
2478        {
2479                .name = "rfc4309(ccm(aes))",
2480                .driver_name = "rfc4309-ccm-aes-ccree",
2481                .blocksize = 1,
2482                .template_aead = {
2483                        .setkey = cc_rfc4309_ccm_setkey,
2484                        .setauthsize = cc_rfc4309_ccm_setauthsize,
2485                        .encrypt = cc_rfc4309_ccm_encrypt,
2486                        .decrypt = cc_rfc4309_ccm_decrypt,
2487                        .init = cc_aead_init,
2488                        .exit = cc_aead_exit,
2489                        .ivsize = CCM_BLOCK_IV_SIZE,
2490                        .maxauthsize = AES_BLOCK_SIZE,
2491                },
2492                .cipher_mode = DRV_CIPHER_CCM,
2493                .flow_mode = S_DIN_to_AES,
2494                .auth_mode = DRV_HASH_NULL,
2495                .min_hw_rev = CC_HW_REV_630,
2496                .std_body = CC_STD_NIST,
2497        },
2498        {
2499                .name = "gcm(aes)",
2500                .driver_name = "gcm-aes-ccree",
2501                .blocksize = 1,
2502                .template_aead = {
2503                        .setkey = cc_aead_setkey,
2504                        .setauthsize = cc_gcm_setauthsize,
2505                        .encrypt = cc_aead_encrypt,
2506                        .decrypt = cc_aead_decrypt,
2507                        .init = cc_aead_init,
2508                        .exit = cc_aead_exit,
2509                        .ivsize = 12,
2510                        .maxauthsize = AES_BLOCK_SIZE,
2511                },
2512                .cipher_mode = DRV_CIPHER_GCTR,
2513                .flow_mode = S_DIN_to_AES,
2514                .auth_mode = DRV_HASH_NULL,
2515                .min_hw_rev = CC_HW_REV_630,
2516                .std_body = CC_STD_NIST,
2517        },
2518        {
2519                .name = "rfc4106(gcm(aes))",
2520                .driver_name = "rfc4106-gcm-aes-ccree",
2521                .blocksize = 1,
2522                .template_aead = {
2523                        .setkey = cc_rfc4106_gcm_setkey,
2524                        .setauthsize = cc_rfc4106_gcm_setauthsize,
2525                        .encrypt = cc_rfc4106_gcm_encrypt,
2526                        .decrypt = cc_rfc4106_gcm_decrypt,
2527                        .init = cc_aead_init,
2528                        .exit = cc_aead_exit,
2529                        .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2530                        .maxauthsize = AES_BLOCK_SIZE,
2531                },
2532                .cipher_mode = DRV_CIPHER_GCTR,
2533                .flow_mode = S_DIN_to_AES,
2534                .auth_mode = DRV_HASH_NULL,
2535                .min_hw_rev = CC_HW_REV_630,
2536                .std_body = CC_STD_NIST,
2537        },
2538        {
2539                .name = "rfc4543(gcm(aes))",
2540                .driver_name = "rfc4543-gcm-aes-ccree",
2541                .blocksize = 1,
2542                .template_aead = {
2543                        .setkey = cc_rfc4543_gcm_setkey,
2544                        .setauthsize = cc_rfc4543_gcm_setauthsize,
2545                        .encrypt = cc_rfc4543_gcm_encrypt,
2546                        .decrypt = cc_rfc4543_gcm_decrypt,
2547                        .init = cc_aead_init,
2548                        .exit = cc_aead_exit,
2549                        .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2550                        .maxauthsize = AES_BLOCK_SIZE,
2551                },
2552                .cipher_mode = DRV_CIPHER_GCTR,
2553                .flow_mode = S_DIN_to_AES,
2554                .auth_mode = DRV_HASH_NULL,
2555                .min_hw_rev = CC_HW_REV_630,
2556                .std_body = CC_STD_NIST,
2557        },
2558};
2559
2560static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
2561                                                struct device *dev)
2562{
2563        struct cc_crypto_alg *t_alg;
2564        struct aead_alg *alg;
2565
2566        t_alg = devm_kzalloc(dev, sizeof(*t_alg), GFP_KERNEL);
2567        if (!t_alg)
2568                return ERR_PTR(-ENOMEM);
2569
2570        alg = &tmpl->template_aead;
2571
2572        snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
2573        snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2574                 tmpl->driver_name);
2575        alg->base.cra_module = THIS_MODULE;
2576        alg->base.cra_priority = CC_CRA_PRIO;
2577
2578        alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
2579        alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2580        alg->base.cra_blocksize = tmpl->blocksize;
2581        alg->init = cc_aead_init;
2582        alg->exit = cc_aead_exit;
2583
2584        t_alg->aead_alg = *alg;
2585
2586        t_alg->cipher_mode = tmpl->cipher_mode;
2587        t_alg->flow_mode = tmpl->flow_mode;
2588        t_alg->auth_mode = tmpl->auth_mode;
2589
2590        return t_alg;
2591}
2592
2593int cc_aead_free(struct cc_drvdata *drvdata)
2594{
2595        struct cc_crypto_alg *t_alg, *n;
2596        struct cc_aead_handle *aead_handle = drvdata->aead_handle;
2597
2598        /* Remove registered algs */
2599        list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list, entry) {
2600                crypto_unregister_aead(&t_alg->aead_alg);
2601                list_del(&t_alg->entry);
2602        }
2603
2604        return 0;
2605}
2606
2607int cc_aead_alloc(struct cc_drvdata *drvdata)
2608{
2609        struct cc_aead_handle *aead_handle;
2610        struct cc_crypto_alg *t_alg;
2611        int rc = -ENOMEM;
2612        int alg;
2613        struct device *dev = drvdata_to_dev(drvdata);
2614
2615        aead_handle = devm_kmalloc(dev, sizeof(*aead_handle), GFP_KERNEL);
2616        if (!aead_handle) {
2617                rc = -ENOMEM;
2618                goto fail0;
2619        }
2620
2621        INIT_LIST_HEAD(&aead_handle->aead_list);
2622        drvdata->aead_handle = aead_handle;
2623
2624        aead_handle->sram_workspace_addr = cc_sram_alloc(drvdata,
2625                                                         MAX_HMAC_DIGEST_SIZE);
2626
2627        if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
2628                rc = -ENOMEM;
2629                goto fail1;
2630        }
2631
2632        /* Linux crypto */
2633        for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
2634                if ((aead_algs[alg].min_hw_rev > drvdata->hw_rev) ||
2635                    !(drvdata->std_bodies & aead_algs[alg].std_body))
2636                        continue;
2637
2638                t_alg = cc_create_aead_alg(&aead_algs[alg], dev);
2639                if (IS_ERR(t_alg)) {
2640                        rc = PTR_ERR(t_alg);
2641                        dev_err(dev, "%s alg allocation failed\n",
2642                                aead_algs[alg].driver_name);
2643                        goto fail1;
2644                }
2645                t_alg->drvdata = drvdata;
2646                rc = crypto_register_aead(&t_alg->aead_alg);
2647                if (rc) {
2648                        dev_err(dev, "%s alg registration failed\n",
2649                                t_alg->aead_alg.base.cra_driver_name);
2650                        goto fail1;
2651                }
2652
2653                list_add_tail(&t_alg->entry, &aead_handle->aead_list);
2654                dev_dbg(dev, "Registered %s\n",
2655                        t_alg->aead_alg.base.cra_driver_name);
2656        }
2657
2658        return 0;
2659
2660fail1:
2661        cc_aead_free(drvdata);
2662fail0:
2663        return rc;
2664}
2665