linux/drivers/crypto/ccree/cc_aead.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
   3
   4#include <linux/kernel.h>
   5#include <linux/module.h>
   6#include <crypto/algapi.h>
   7#include <crypto/internal/aead.h>
   8#include <crypto/authenc.h>
   9#include <crypto/des.h>
  10#include <linux/rtnetlink.h>
  11#include "cc_driver.h"
  12#include "cc_buffer_mgr.h"
  13#include "cc_aead.h"
  14#include "cc_request_mgr.h"
  15#include "cc_hash.h"
  16#include "cc_sram_mgr.h"
  17
  18#define template_aead   template_u.aead
  19
  20#define MAX_AEAD_SETKEY_SEQ 12
  21#define MAX_AEAD_PROCESS_SEQ 23
  22
  23#define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
  24#define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
  25
  26#define AES_CCM_RFC4309_NONCE_SIZE 3
  27#define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
  28
  29/* Value of each ICV_CMP byte (of 8) in case of success */
  30#define ICV_VERIF_OK 0x01
  31
  32struct cc_aead_handle {
  33        cc_sram_addr_t sram_workspace_addr;
  34        struct list_head aead_list;
  35};
  36
  37struct cc_hmac_s {
  38        u8 *padded_authkey;
  39        u8 *ipad_opad; /* IPAD, OPAD*/
  40        dma_addr_t padded_authkey_dma_addr;
  41        dma_addr_t ipad_opad_dma_addr;
  42};
  43
  44struct cc_xcbc_s {
  45        u8 *xcbc_keys; /* K1,K2,K3 */
  46        dma_addr_t xcbc_keys_dma_addr;
  47};
  48
  49struct cc_aead_ctx {
  50        struct cc_drvdata *drvdata;
  51        u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
  52        u8 *enckey;
  53        dma_addr_t enckey_dma_addr;
  54        union {
  55                struct cc_hmac_s hmac;
  56                struct cc_xcbc_s xcbc;
  57        } auth_state;
  58        unsigned int enc_keylen;
  59        unsigned int auth_keylen;
  60        unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */
  61        enum drv_cipher_mode cipher_mode;
  62        enum cc_flow_mode flow_mode;
  63        enum drv_hash_mode auth_mode;
  64};
  65
  66static inline bool valid_assoclen(struct aead_request *req)
  67{
  68        return ((req->assoclen == 16) || (req->assoclen == 20));
  69}
  70
  71static void cc_aead_exit(struct crypto_aead *tfm)
  72{
  73        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  74        struct device *dev = drvdata_to_dev(ctx->drvdata);
  75
  76        dev_dbg(dev, "Clearing context @%p for %s\n", crypto_aead_ctx(tfm),
  77                crypto_tfm_alg_name(&tfm->base));
  78
  79        /* Unmap enckey buffer */
  80        if (ctx->enckey) {
  81                dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey,
  82                                  ctx->enckey_dma_addr);
  83                dev_dbg(dev, "Freed enckey DMA buffer enckey_dma_addr=%pad\n",
  84                        &ctx->enckey_dma_addr);
  85                ctx->enckey_dma_addr = 0;
  86                ctx->enckey = NULL;
  87        }
  88
  89        if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
  90                struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
  91
  92                if (xcbc->xcbc_keys) {
  93                        dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3,
  94                                          xcbc->xcbc_keys,
  95                                          xcbc->xcbc_keys_dma_addr);
  96                }
  97                dev_dbg(dev, "Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n",
  98                        &xcbc->xcbc_keys_dma_addr);
  99                xcbc->xcbc_keys_dma_addr = 0;
 100                xcbc->xcbc_keys = NULL;
 101        } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
 102                struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
 103
 104                if (hmac->ipad_opad) {
 105                        dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
 106                                          hmac->ipad_opad,
 107                                          hmac->ipad_opad_dma_addr);
 108                        dev_dbg(dev, "Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n",
 109                                &hmac->ipad_opad_dma_addr);
 110                        hmac->ipad_opad_dma_addr = 0;
 111                        hmac->ipad_opad = NULL;
 112                }
 113                if (hmac->padded_authkey) {
 114                        dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
 115                                          hmac->padded_authkey,
 116                                          hmac->padded_authkey_dma_addr);
 117                        dev_dbg(dev, "Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n",
 118                                &hmac->padded_authkey_dma_addr);
 119                        hmac->padded_authkey_dma_addr = 0;
 120                        hmac->padded_authkey = NULL;
 121                }
 122        }
 123}
 124
 125static int cc_aead_init(struct crypto_aead *tfm)
 126{
 127        struct aead_alg *alg = crypto_aead_alg(tfm);
 128        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 129        struct cc_crypto_alg *cc_alg =
 130                        container_of(alg, struct cc_crypto_alg, aead_alg);
 131        struct device *dev = drvdata_to_dev(cc_alg->drvdata);
 132
 133        dev_dbg(dev, "Initializing context @%p for %s\n", ctx,
 134                crypto_tfm_alg_name(&tfm->base));
 135
 136        /* Initialize modes in instance */
 137        ctx->cipher_mode = cc_alg->cipher_mode;
 138        ctx->flow_mode = cc_alg->flow_mode;
 139        ctx->auth_mode = cc_alg->auth_mode;
 140        ctx->drvdata = cc_alg->drvdata;
 141        crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx));
 142
 143        /* Allocate key buffer, cache line aligned */
 144        ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
 145                                         &ctx->enckey_dma_addr, GFP_KERNEL);
 146        if (!ctx->enckey) {
 147                dev_err(dev, "Failed allocating key buffer\n");
 148                goto init_failed;
 149        }
 150        dev_dbg(dev, "Allocated enckey buffer in context ctx->enckey=@%p\n",
 151                ctx->enckey);
 152
 153        /* Set default authlen value */
 154
 155        if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
 156                struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
 157                const unsigned int key_size = CC_AES_128_BIT_KEY_SIZE * 3;
 158
 159                /* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */
 160                /* (and temporary for user key - up to 256b) */
 161                xcbc->xcbc_keys = dma_alloc_coherent(dev, key_size,
 162                                                     &xcbc->xcbc_keys_dma_addr,
 163                                                     GFP_KERNEL);
 164                if (!xcbc->xcbc_keys) {
 165                        dev_err(dev, "Failed allocating buffer for XCBC keys\n");
 166                        goto init_failed;
 167                }
 168        } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */
 169                struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
 170                const unsigned int digest_size = 2 * MAX_HMAC_DIGEST_SIZE;
 171                dma_addr_t *pkey_dma = &hmac->padded_authkey_dma_addr;
 172
 173                /* Allocate dma-coherent buffer for IPAD + OPAD */
 174                hmac->ipad_opad = dma_alloc_coherent(dev, digest_size,
 175                                                     &hmac->ipad_opad_dma_addr,
 176                                                     GFP_KERNEL);
 177
 178                if (!hmac->ipad_opad) {
 179                        dev_err(dev, "Failed allocating IPAD/OPAD buffer\n");
 180                        goto init_failed;
 181                }
 182
 183                dev_dbg(dev, "Allocated authkey buffer in context ctx->authkey=@%p\n",
 184                        hmac->ipad_opad);
 185
 186                hmac->padded_authkey = dma_alloc_coherent(dev,
 187                                                          MAX_HMAC_BLOCK_SIZE,
 188                                                          pkey_dma,
 189                                                          GFP_KERNEL);
 190
 191                if (!hmac->padded_authkey) {
 192                        dev_err(dev, "failed to allocate padded_authkey\n");
 193                        goto init_failed;
 194                }
 195        } else {
 196                ctx->auth_state.hmac.ipad_opad = NULL;
 197                ctx->auth_state.hmac.padded_authkey = NULL;
 198        }
 199
 200        return 0;
 201
 202init_failed:
 203        cc_aead_exit(tfm);
 204        return -ENOMEM;
 205}
 206
 207static void cc_aead_complete(struct device *dev, void *cc_req, int err)
 208{
 209        struct aead_request *areq = (struct aead_request *)cc_req;
 210        struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
 211        struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
 212        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 213
 214        cc_unmap_aead_request(dev, areq);
 215
 216        /* Restore ordinary iv pointer */
 217        areq->iv = areq_ctx->backup_iv;
 218
 219        if (err)
 220                goto done;
 221
 222        if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
 223                if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
 224                           ctx->authsize) != 0) {
 225                        dev_dbg(dev, "Payload authentication failure, (auth-size=%d, cipher=%d)\n",
 226                                ctx->authsize, ctx->cipher_mode);
 227                        /* In case of payload authentication failure, MUST NOT
 228                         * revealed the decrypted message --> zero its memory.
 229                         */
 230                        cc_zero_sgl(areq->dst, areq_ctx->cryptlen);
 231                        err = -EBADMSG;
 232                }
 233        } else { /*ENCRYPT*/
 234                if (areq_ctx->is_icv_fragmented) {
 235                        u32 skip = areq->cryptlen + areq_ctx->dst_offset;
 236
 237                        cc_copy_sg_portion(dev, areq_ctx->mac_buf,
 238                                           areq_ctx->dst_sgl, skip,
 239                                           (skip + ctx->authsize),
 240                                           CC_SG_FROM_BUF);
 241                }
 242
 243                /* If an IV was generated, copy it back to the user provided
 244                 * buffer.
 245                 */
 246                if (areq_ctx->backup_giv) {
 247                        if (ctx->cipher_mode == DRV_CIPHER_CTR)
 248                                memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
 249                                       CTR_RFC3686_NONCE_SIZE,
 250                                       CTR_RFC3686_IV_SIZE);
 251                        else if (ctx->cipher_mode == DRV_CIPHER_CCM)
 252                                memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
 253                                       CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE);
 254                }
 255        }
 256done:
 257        aead_request_complete(areq, err);
 258}
 259
 260static unsigned int xcbc_setkey(struct cc_hw_desc *desc,
 261                                struct cc_aead_ctx *ctx)
 262{
 263        /* Load the AES key */
 264        hw_desc_init(&desc[0]);
 265        /* We are using for the source/user key the same buffer
 266         * as for the output keys, * because after this key loading it
 267         * is not needed anymore
 268         */
 269        set_din_type(&desc[0], DMA_DLLI,
 270                     ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen,
 271                     NS_BIT);
 272        set_cipher_mode(&desc[0], DRV_CIPHER_ECB);
 273        set_cipher_config0(&desc[0], DRV_CRYPTO_DIRECTION_ENCRYPT);
 274        set_key_size_aes(&desc[0], ctx->auth_keylen);
 275        set_flow_mode(&desc[0], S_DIN_to_AES);
 276        set_setup_mode(&desc[0], SETUP_LOAD_KEY0);
 277
 278        hw_desc_init(&desc[1]);
 279        set_din_const(&desc[1], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
 280        set_flow_mode(&desc[1], DIN_AES_DOUT);
 281        set_dout_dlli(&desc[1], ctx->auth_state.xcbc.xcbc_keys_dma_addr,
 282                      AES_KEYSIZE_128, NS_BIT, 0);
 283
 284        hw_desc_init(&desc[2]);
 285        set_din_const(&desc[2], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
 286        set_flow_mode(&desc[2], DIN_AES_DOUT);
 287        set_dout_dlli(&desc[2], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
 288                                         + AES_KEYSIZE_128),
 289                              AES_KEYSIZE_128, NS_BIT, 0);
 290
 291        hw_desc_init(&desc[3]);
 292        set_din_const(&desc[3], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
 293        set_flow_mode(&desc[3], DIN_AES_DOUT);
 294        set_dout_dlli(&desc[3], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
 295                                          + 2 * AES_KEYSIZE_128),
 296                              AES_KEYSIZE_128, NS_BIT, 0);
 297
 298        return 4;
 299}
 300
 301static int hmac_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx)
 302{
 303        unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
 304        unsigned int digest_ofs = 0;
 305        unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
 306                        DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
 307        unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
 308                        CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
 309        struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
 310
 311        unsigned int idx = 0;
 312        int i;
 313
 314        /* calc derived HMAC key */
 315        for (i = 0; i < 2; i++) {
 316                /* Load hash initial state */
 317                hw_desc_init(&desc[idx]);
 318                set_cipher_mode(&desc[idx], hash_mode);
 319                set_din_sram(&desc[idx],
 320                             cc_larval_digest_addr(ctx->drvdata,
 321                                                   ctx->auth_mode),
 322                             digest_size);
 323                set_flow_mode(&desc[idx], S_DIN_to_HASH);
 324                set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 325                idx++;
 326
 327                /* Load the hash current length*/
 328                hw_desc_init(&desc[idx]);
 329                set_cipher_mode(&desc[idx], hash_mode);
 330                set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
 331                set_flow_mode(&desc[idx], S_DIN_to_HASH);
 332                set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 333                idx++;
 334
 335                /* Prepare ipad key */
 336                hw_desc_init(&desc[idx]);
 337                set_xor_val(&desc[idx], hmac_pad_const[i]);
 338                set_cipher_mode(&desc[idx], hash_mode);
 339                set_flow_mode(&desc[idx], S_DIN_to_HASH);
 340                set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
 341                idx++;
 342
 343                /* Perform HASH update */
 344                hw_desc_init(&desc[idx]);
 345                set_din_type(&desc[idx], DMA_DLLI,
 346                             hmac->padded_authkey_dma_addr,
 347                             SHA256_BLOCK_SIZE, NS_BIT);
 348                set_cipher_mode(&desc[idx], hash_mode);
 349                set_xor_active(&desc[idx]);
 350                set_flow_mode(&desc[idx], DIN_HASH);
 351                idx++;
 352
 353                /* Get the digset */
 354                hw_desc_init(&desc[idx]);
 355                set_cipher_mode(&desc[idx], hash_mode);
 356                set_dout_dlli(&desc[idx],
 357                              (hmac->ipad_opad_dma_addr + digest_ofs),
 358                              digest_size, NS_BIT, 0);
 359                set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 360                set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 361                set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 362                idx++;
 363
 364                digest_ofs += digest_size;
 365        }
 366
 367        return idx;
 368}
 369
 370static int validate_keys_sizes(struct cc_aead_ctx *ctx)
 371{
 372        struct device *dev = drvdata_to_dev(ctx->drvdata);
 373
 374        dev_dbg(dev, "enc_keylen=%u  authkeylen=%u\n",
 375                ctx->enc_keylen, ctx->auth_keylen);
 376
 377        switch (ctx->auth_mode) {
 378        case DRV_HASH_SHA1:
 379        case DRV_HASH_SHA256:
 380                break;
 381        case DRV_HASH_XCBC_MAC:
 382                if (ctx->auth_keylen != AES_KEYSIZE_128 &&
 383                    ctx->auth_keylen != AES_KEYSIZE_192 &&
 384                    ctx->auth_keylen != AES_KEYSIZE_256)
 385                        return -ENOTSUPP;
 386                break;
 387        case DRV_HASH_NULL: /* Not authenc (e.g., CCM) - no auth_key) */
 388                if (ctx->auth_keylen > 0)
 389                        return -EINVAL;
 390                break;
 391        default:
 392                dev_err(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
 393                return -EINVAL;
 394        }
 395        /* Check cipher key size */
 396        if (ctx->flow_mode == S_DIN_to_DES) {
 397                if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
 398                        dev_err(dev, "Invalid cipher(3DES) key size: %u\n",
 399                                ctx->enc_keylen);
 400                        return -EINVAL;
 401                }
 402        } else { /* Default assumed to be AES ciphers */
 403                if (ctx->enc_keylen != AES_KEYSIZE_128 &&
 404                    ctx->enc_keylen != AES_KEYSIZE_192 &&
 405                    ctx->enc_keylen != AES_KEYSIZE_256) {
 406                        dev_err(dev, "Invalid cipher(AES) key size: %u\n",
 407                                ctx->enc_keylen);
 408                        return -EINVAL;
 409                }
 410        }
 411
 412        return 0; /* All tests of keys sizes passed */
 413}
 414
 415/* This function prepers the user key so it can pass to the hmac processing
 416 * (copy to intenral buffer or hash in case of key longer than block
 417 */
 418static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
 419                                 unsigned int keylen)
 420{
 421        dma_addr_t key_dma_addr = 0;
 422        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 423        struct device *dev = drvdata_to_dev(ctx->drvdata);
 424        u32 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->auth_mode);
 425        struct cc_crypto_req cc_req = {};
 426        unsigned int blocksize;
 427        unsigned int digestsize;
 428        unsigned int hashmode;
 429        unsigned int idx = 0;
 430        int rc = 0;
 431        struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
 432        dma_addr_t padded_authkey_dma_addr =
 433                ctx->auth_state.hmac.padded_authkey_dma_addr;
 434
 435        switch (ctx->auth_mode) { /* auth_key required and >0 */
 436        case DRV_HASH_SHA1:
 437                blocksize = SHA1_BLOCK_SIZE;
 438                digestsize = SHA1_DIGEST_SIZE;
 439                hashmode = DRV_HASH_HW_SHA1;
 440                break;
 441        case DRV_HASH_SHA256:
 442        default:
 443                blocksize = SHA256_BLOCK_SIZE;
 444                digestsize = SHA256_DIGEST_SIZE;
 445                hashmode = DRV_HASH_HW_SHA256;
 446        }
 447
 448        if (keylen != 0) {
 449                key_dma_addr = dma_map_single(dev, (void *)key, keylen,
 450                                              DMA_TO_DEVICE);
 451                if (dma_mapping_error(dev, key_dma_addr)) {
 452                        dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
 453                                key, keylen);
 454                        return -ENOMEM;
 455                }
 456                if (keylen > blocksize) {
 457                        /* Load hash initial state */
 458                        hw_desc_init(&desc[idx]);
 459                        set_cipher_mode(&desc[idx], hashmode);
 460                        set_din_sram(&desc[idx], larval_addr, digestsize);
 461                        set_flow_mode(&desc[idx], S_DIN_to_HASH);
 462                        set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 463                        idx++;
 464
 465                        /* Load the hash current length*/
 466                        hw_desc_init(&desc[idx]);
 467                        set_cipher_mode(&desc[idx], hashmode);
 468                        set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
 469                        set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
 470                        set_flow_mode(&desc[idx], S_DIN_to_HASH);
 471                        set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 472                        idx++;
 473
 474                        hw_desc_init(&desc[idx]);
 475                        set_din_type(&desc[idx], DMA_DLLI,
 476                                     key_dma_addr, keylen, NS_BIT);
 477                        set_flow_mode(&desc[idx], DIN_HASH);
 478                        idx++;
 479
 480                        /* Get hashed key */
 481                        hw_desc_init(&desc[idx]);
 482                        set_cipher_mode(&desc[idx], hashmode);
 483                        set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
 484                                      digestsize, NS_BIT, 0);
 485                        set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 486                        set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 487                        set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 488                        set_cipher_config0(&desc[idx],
 489                                           HASH_DIGEST_RESULT_LITTLE_ENDIAN);
 490                        idx++;
 491
 492                        hw_desc_init(&desc[idx]);
 493                        set_din_const(&desc[idx], 0, (blocksize - digestsize));
 494                        set_flow_mode(&desc[idx], BYPASS);
 495                        set_dout_dlli(&desc[idx], (padded_authkey_dma_addr +
 496                                      digestsize), (blocksize - digestsize),
 497                                      NS_BIT, 0);
 498                        idx++;
 499                } else {
 500                        hw_desc_init(&desc[idx]);
 501                        set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
 502                                     keylen, NS_BIT);
 503                        set_flow_mode(&desc[idx], BYPASS);
 504                        set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
 505                                      keylen, NS_BIT, 0);
 506                        idx++;
 507
 508                        if ((blocksize - keylen) != 0) {
 509                                hw_desc_init(&desc[idx]);
 510                                set_din_const(&desc[idx], 0,
 511                                              (blocksize - keylen));
 512                                set_flow_mode(&desc[idx], BYPASS);
 513                                set_dout_dlli(&desc[idx],
 514                                              (padded_authkey_dma_addr +
 515                                               keylen),
 516                                              (blocksize - keylen), NS_BIT, 0);
 517                                idx++;
 518                        }
 519                }
 520        } else {
 521                hw_desc_init(&desc[idx]);
 522                set_din_const(&desc[idx], 0, (blocksize - keylen));
 523                set_flow_mode(&desc[idx], BYPASS);
 524                set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
 525                              blocksize, NS_BIT, 0);
 526                idx++;
 527        }
 528
 529        rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
 530        if (rc)
 531                dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 532
 533        if (key_dma_addr)
 534                dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
 535
 536        return rc;
 537}
 538
 539static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
 540                          unsigned int keylen)
 541{
 542        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 543        struct rtattr *rta = (struct rtattr *)key;
 544        struct cc_crypto_req cc_req = {};
 545        struct crypto_authenc_key_param *param;
 546        struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
 547        int rc = -EINVAL;
 548        unsigned int seq_len = 0;
 549        struct device *dev = drvdata_to_dev(ctx->drvdata);
 550
 551        dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
 552                ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
 553
 554        /* STAT_PHASE_0: Init and sanity checks */
 555
 556        if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
 557                if (!RTA_OK(rta, keylen))
 558                        goto badkey;
 559                if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
 560                        goto badkey;
 561                if (RTA_PAYLOAD(rta) < sizeof(*param))
 562                        goto badkey;
 563                param = RTA_DATA(rta);
 564                ctx->enc_keylen = be32_to_cpu(param->enckeylen);
 565                key += RTA_ALIGN(rta->rta_len);
 566                keylen -= RTA_ALIGN(rta->rta_len);
 567                if (keylen < ctx->enc_keylen)
 568                        goto badkey;
 569                ctx->auth_keylen = keylen - ctx->enc_keylen;
 570
 571                if (ctx->cipher_mode == DRV_CIPHER_CTR) {
 572                        /* the nonce is stored in bytes at end of key */
 573                        if (ctx->enc_keylen <
 574                            (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
 575                                goto badkey;
 576                        /* Copy nonce from last 4 bytes in CTR key to
 577                         *  first 4 bytes in CTR IV
 578                         */
 579                        memcpy(ctx->ctr_nonce, key + ctx->auth_keylen +
 580                               ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE,
 581                               CTR_RFC3686_NONCE_SIZE);
 582                        /* Set CTR key size */
 583                        ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
 584                }
 585        } else { /* non-authenc - has just one key */
 586                ctx->enc_keylen = keylen;
 587                ctx->auth_keylen = 0;
 588        }
 589
 590        rc = validate_keys_sizes(ctx);
 591        if (rc)
 592                goto badkey;
 593
 594        /* STAT_PHASE_1: Copy key to ctx */
 595
 596        /* Get key material */
 597        memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen);
 598        if (ctx->enc_keylen == 24)
 599                memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
 600        if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
 601                memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen);
 602        } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
 603                rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
 604                if (rc)
 605                        goto badkey;
 606        }
 607
 608        /* STAT_PHASE_2: Create sequence */
 609
 610        switch (ctx->auth_mode) {
 611        case DRV_HASH_SHA1:
 612        case DRV_HASH_SHA256:
 613                seq_len = hmac_setkey(desc, ctx);
 614                break;
 615        case DRV_HASH_XCBC_MAC:
 616                seq_len = xcbc_setkey(desc, ctx);
 617                break;
 618        case DRV_HASH_NULL: /* non-authenc modes, e.g., CCM */
 619                break; /* No auth. key setup */
 620        default:
 621                dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
 622                rc = -ENOTSUPP;
 623                goto badkey;
 624        }
 625
 626        /* STAT_PHASE_3: Submit sequence to HW */
 627
 628        if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
 629                rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len);
 630                if (rc) {
 631                        dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 632                        goto setkey_error;
 633                }
 634        }
 635
 636        /* Update STAT_PHASE_3 */
 637        return rc;
 638
 639badkey:
 640        crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 641
 642setkey_error:
 643        return rc;
 644}
 645
 646static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
 647                                 unsigned int keylen)
 648{
 649        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 650
 651        if (keylen < 3)
 652                return -EINVAL;
 653
 654        keylen -= 3;
 655        memcpy(ctx->ctr_nonce, key + keylen, 3);
 656
 657        return cc_aead_setkey(tfm, key, keylen);
 658}
 659
 660static int cc_aead_setauthsize(struct crypto_aead *authenc,
 661                               unsigned int authsize)
 662{
 663        struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
 664        struct device *dev = drvdata_to_dev(ctx->drvdata);
 665
 666        /* Unsupported auth. sizes */
 667        if (authsize == 0 ||
 668            authsize > crypto_aead_maxauthsize(authenc)) {
 669                return -ENOTSUPP;
 670        }
 671
 672        ctx->authsize = authsize;
 673        dev_dbg(dev, "authlen=%d\n", ctx->authsize);
 674
 675        return 0;
 676}
 677
 678static int cc_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
 679                                      unsigned int authsize)
 680{
 681        switch (authsize) {
 682        case 8:
 683        case 12:
 684        case 16:
 685                break;
 686        default:
 687                return -EINVAL;
 688        }
 689
 690        return cc_aead_setauthsize(authenc, authsize);
 691}
 692
 693static int cc_ccm_setauthsize(struct crypto_aead *authenc,
 694                              unsigned int authsize)
 695{
 696        switch (authsize) {
 697        case 4:
 698        case 6:
 699        case 8:
 700        case 10:
 701        case 12:
 702        case 14:
 703        case 16:
 704                break;
 705        default:
 706                return -EINVAL;
 707        }
 708
 709        return cc_aead_setauthsize(authenc, authsize);
 710}
 711
 712static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
 713                              struct cc_hw_desc desc[], unsigned int *seq_size)
 714{
 715        struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
 716        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 717        struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
 718        enum cc_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
 719        unsigned int idx = *seq_size;
 720        struct device *dev = drvdata_to_dev(ctx->drvdata);
 721
 722        switch (assoc_dma_type) {
 723        case CC_DMA_BUF_DLLI:
 724                dev_dbg(dev, "ASSOC buffer type DLLI\n");
 725                hw_desc_init(&desc[idx]);
 726                set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
 727                             areq->assoclen, NS_BIT);
 728                set_flow_mode(&desc[idx], flow_mode);
 729                if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
 730                    areq_ctx->cryptlen > 0)
 731                        set_din_not_last_indication(&desc[idx]);
 732                break;
 733        case CC_DMA_BUF_MLLI:
 734                dev_dbg(dev, "ASSOC buffer type MLLI\n");
 735                hw_desc_init(&desc[idx]);
 736                set_din_type(&desc[idx], DMA_MLLI, areq_ctx->assoc.sram_addr,
 737                             areq_ctx->assoc.mlli_nents, NS_BIT);
 738                set_flow_mode(&desc[idx], flow_mode);
 739                if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
 740                    areq_ctx->cryptlen > 0)
 741                        set_din_not_last_indication(&desc[idx]);
 742                break;
 743        case CC_DMA_BUF_NULL:
 744        default:
 745                dev_err(dev, "Invalid ASSOC buffer type\n");
 746        }
 747
 748        *seq_size = (++idx);
 749}
 750
 751static void cc_proc_authen_desc(struct aead_request *areq,
 752                                unsigned int flow_mode,
 753                                struct cc_hw_desc desc[],
 754                                unsigned int *seq_size, int direct)
 755{
 756        struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
 757        enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
 758        unsigned int idx = *seq_size;
 759        struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
 760        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 761        struct device *dev = drvdata_to_dev(ctx->drvdata);
 762
 763        switch (data_dma_type) {
 764        case CC_DMA_BUF_DLLI:
 765        {
 766                struct scatterlist *cipher =
 767                        (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 768                        areq_ctx->dst_sgl : areq_ctx->src_sgl;
 769
 770                unsigned int offset =
 771                        (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 772                        areq_ctx->dst_offset : areq_ctx->src_offset;
 773                dev_dbg(dev, "AUTHENC: SRC/DST buffer type DLLI\n");
 774                hw_desc_init(&desc[idx]);
 775                set_din_type(&desc[idx], DMA_DLLI,
 776                             (sg_dma_address(cipher) + offset),
 777                             areq_ctx->cryptlen, NS_BIT);
 778                set_flow_mode(&desc[idx], flow_mode);
 779                break;
 780        }
 781        case CC_DMA_BUF_MLLI:
 782        {
 783                /* DOUBLE-PASS flow (as default)
 784                 * assoc. + iv + data -compact in one table
 785                 * if assoclen is ZERO only IV perform
 786                 */
 787                cc_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
 788                u32 mlli_nents = areq_ctx->assoc.mlli_nents;
 789
 790                if (areq_ctx->is_single_pass) {
 791                        if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 792                                mlli_addr = areq_ctx->dst.sram_addr;
 793                                mlli_nents = areq_ctx->dst.mlli_nents;
 794                        } else {
 795                                mlli_addr = areq_ctx->src.sram_addr;
 796                                mlli_nents = areq_ctx->src.mlli_nents;
 797                        }
 798                }
 799
 800                dev_dbg(dev, "AUTHENC: SRC/DST buffer type MLLI\n");
 801                hw_desc_init(&desc[idx]);
 802                set_din_type(&desc[idx], DMA_MLLI, mlli_addr, mlli_nents,
 803                             NS_BIT);
 804                set_flow_mode(&desc[idx], flow_mode);
 805                break;
 806        }
 807        case CC_DMA_BUF_NULL:
 808        default:
 809                dev_err(dev, "AUTHENC: Invalid SRC/DST buffer type\n");
 810        }
 811
 812        *seq_size = (++idx);
 813}
 814
 815static void cc_proc_cipher_desc(struct aead_request *areq,
 816                                unsigned int flow_mode,
 817                                struct cc_hw_desc desc[],
 818                                unsigned int *seq_size)
 819{
 820        unsigned int idx = *seq_size;
 821        struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
 822        enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
 823        struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
 824        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 825        struct device *dev = drvdata_to_dev(ctx->drvdata);
 826
 827        if (areq_ctx->cryptlen == 0)
 828                return; /*null processing*/
 829
 830        switch (data_dma_type) {
 831        case CC_DMA_BUF_DLLI:
 832                dev_dbg(dev, "CIPHER: SRC/DST buffer type DLLI\n");
 833                hw_desc_init(&desc[idx]);
 834                set_din_type(&desc[idx], DMA_DLLI,
 835                             (sg_dma_address(areq_ctx->src_sgl) +
 836                              areq_ctx->src_offset), areq_ctx->cryptlen,
 837                              NS_BIT);
 838                set_dout_dlli(&desc[idx],
 839                              (sg_dma_address(areq_ctx->dst_sgl) +
 840                               areq_ctx->dst_offset),
 841                              areq_ctx->cryptlen, NS_BIT, 0);
 842                set_flow_mode(&desc[idx], flow_mode);
 843                break;
 844        case CC_DMA_BUF_MLLI:
 845                dev_dbg(dev, "CIPHER: SRC/DST buffer type MLLI\n");
 846                hw_desc_init(&desc[idx]);
 847                set_din_type(&desc[idx], DMA_MLLI, areq_ctx->src.sram_addr,
 848                             areq_ctx->src.mlli_nents, NS_BIT);
 849                set_dout_mlli(&desc[idx], areq_ctx->dst.sram_addr,
 850                              areq_ctx->dst.mlli_nents, NS_BIT, 0);
 851                set_flow_mode(&desc[idx], flow_mode);
 852                break;
 853        case CC_DMA_BUF_NULL:
 854        default:
 855                dev_err(dev, "CIPHER: Invalid SRC/DST buffer type\n");
 856        }
 857
 858        *seq_size = (++idx);
 859}
 860
 861static void cc_proc_digest_desc(struct aead_request *req,
 862                                struct cc_hw_desc desc[],
 863                                unsigned int *seq_size)
 864{
 865        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 866        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 867        struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 868        unsigned int idx = *seq_size;
 869        unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
 870                                DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
 871        int direct = req_ctx->gen_ctx.op_type;
 872
 873        /* Get final ICV result */
 874        if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 875                hw_desc_init(&desc[idx]);
 876                set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 877                set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 878                set_dout_dlli(&desc[idx], req_ctx->icv_dma_addr, ctx->authsize,
 879                              NS_BIT, 1);
 880                set_queue_last_ind(ctx->drvdata, &desc[idx]);
 881                if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
 882                        set_aes_not_hash_mode(&desc[idx]);
 883                        set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
 884                } else {
 885                        set_cipher_config0(&desc[idx],
 886                                           HASH_DIGEST_RESULT_LITTLE_ENDIAN);
 887                        set_cipher_mode(&desc[idx], hash_mode);
 888                }
 889        } else { /*Decrypt*/
 890                /* Get ICV out from hardware */
 891                hw_desc_init(&desc[idx]);
 892                set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 893                set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 894                set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr,
 895                              ctx->authsize, NS_BIT, 1);
 896                set_queue_last_ind(ctx->drvdata, &desc[idx]);
 897                set_cipher_config0(&desc[idx],
 898                                   HASH_DIGEST_RESULT_LITTLE_ENDIAN);
 899                set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 900                if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
 901                        set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
 902                        set_aes_not_hash_mode(&desc[idx]);
 903                } else {
 904                        set_cipher_mode(&desc[idx], hash_mode);
 905                }
 906        }
 907
 908        *seq_size = (++idx);
 909}
 910
 911static void cc_set_cipher_desc(struct aead_request *req,
 912                               struct cc_hw_desc desc[],
 913                               unsigned int *seq_size)
 914{
 915        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 916        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 917        struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 918        unsigned int hw_iv_size = req_ctx->hw_iv_size;
 919        unsigned int idx = *seq_size;
 920        int direct = req_ctx->gen_ctx.op_type;
 921
 922        /* Setup cipher state */
 923        hw_desc_init(&desc[idx]);
 924        set_cipher_config0(&desc[idx], direct);
 925        set_flow_mode(&desc[idx], ctx->flow_mode);
 926        set_din_type(&desc[idx], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
 927                     hw_iv_size, NS_BIT);
 928        if (ctx->cipher_mode == DRV_CIPHER_CTR)
 929                set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
 930        else
 931                set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 932        set_cipher_mode(&desc[idx], ctx->cipher_mode);
 933        idx++;
 934
 935        /* Setup enc. key */
 936        hw_desc_init(&desc[idx]);
 937        set_cipher_config0(&desc[idx], direct);
 938        set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 939        set_flow_mode(&desc[idx], ctx->flow_mode);
 940        if (ctx->flow_mode == S_DIN_to_AES) {
 941                set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
 942                             ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
 943                              ctx->enc_keylen), NS_BIT);
 944                set_key_size_aes(&desc[idx], ctx->enc_keylen);
 945        } else {
 946                set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
 947                             ctx->enc_keylen, NS_BIT);
 948                set_key_size_des(&desc[idx], ctx->enc_keylen);
 949        }
 950        set_cipher_mode(&desc[idx], ctx->cipher_mode);
 951        idx++;
 952
 953        *seq_size = idx;
 954}
 955
 956static void cc_proc_cipher(struct aead_request *req, struct cc_hw_desc desc[],
 957                           unsigned int *seq_size, unsigned int data_flow_mode)
 958{
 959        struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 960        int direct = req_ctx->gen_ctx.op_type;
 961        unsigned int idx = *seq_size;
 962
 963        if (req_ctx->cryptlen == 0)
 964                return; /*null processing*/
 965
 966        cc_set_cipher_desc(req, desc, &idx);
 967        cc_proc_cipher_desc(req, data_flow_mode, desc, &idx);
 968        if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 969                /* We must wait for DMA to write all cipher */
 970                hw_desc_init(&desc[idx]);
 971                set_din_no_dma(&desc[idx], 0, 0xfffff0);
 972                set_dout_no_dma(&desc[idx], 0, 0, 1);
 973                idx++;
 974        }
 975
 976        *seq_size = idx;
 977}
 978
 979static void cc_set_hmac_desc(struct aead_request *req, struct cc_hw_desc desc[],
 980                             unsigned int *seq_size)
 981{
 982        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 983        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 984        unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
 985                                DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
 986        unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
 987                                CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
 988        unsigned int idx = *seq_size;
 989
 990        /* Loading hash ipad xor key state */
 991        hw_desc_init(&desc[idx]);
 992        set_cipher_mode(&desc[idx], hash_mode);
 993        set_din_type(&desc[idx], DMA_DLLI,
 994                     ctx->auth_state.hmac.ipad_opad_dma_addr, digest_size,
 995                     NS_BIT);
 996        set_flow_mode(&desc[idx], S_DIN_to_HASH);
 997        set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 998        idx++;
 999
1000        /* Load init. digest len (64 bytes) */
1001        hw_desc_init(&desc[idx]);
1002        set_cipher_mode(&desc[idx], hash_mode);
1003        set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
1004                     ctx->drvdata->hash_len_sz);
1005        set_flow_mode(&desc[idx], S_DIN_to_HASH);
1006        set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1007        idx++;
1008
1009        *seq_size = idx;
1010}
1011
1012static void cc_set_xcbc_desc(struct aead_request *req, struct cc_hw_desc desc[],
1013                             unsigned int *seq_size)
1014{
1015        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1016        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1017        unsigned int idx = *seq_size;
1018
1019        /* Loading MAC state */
1020        hw_desc_init(&desc[idx]);
1021        set_din_const(&desc[idx], 0, CC_AES_BLOCK_SIZE);
1022        set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1023        set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1024        set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1025        set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1026        set_flow_mode(&desc[idx], S_DIN_to_HASH);
1027        set_aes_not_hash_mode(&desc[idx]);
1028        idx++;
1029
1030        /* Setup XCBC MAC K1 */
1031        hw_desc_init(&desc[idx]);
1032        set_din_type(&desc[idx], DMA_DLLI,
1033                     ctx->auth_state.xcbc.xcbc_keys_dma_addr,
1034                     AES_KEYSIZE_128, NS_BIT);
1035        set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1036        set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1037        set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1038        set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1039        set_flow_mode(&desc[idx], S_DIN_to_HASH);
1040        set_aes_not_hash_mode(&desc[idx]);
1041        idx++;
1042
1043        /* Setup XCBC MAC K2 */
1044        hw_desc_init(&desc[idx]);
1045        set_din_type(&desc[idx], DMA_DLLI,
1046                     (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1047                      AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1048        set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1049        set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1050        set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1051        set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1052        set_flow_mode(&desc[idx], S_DIN_to_HASH);
1053        set_aes_not_hash_mode(&desc[idx]);
1054        idx++;
1055
1056        /* Setup XCBC MAC K3 */
1057        hw_desc_init(&desc[idx]);
1058        set_din_type(&desc[idx], DMA_DLLI,
1059                     (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1060                      2 * AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1061        set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
1062        set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1063        set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1064        set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1065        set_flow_mode(&desc[idx], S_DIN_to_HASH);
1066        set_aes_not_hash_mode(&desc[idx]);
1067        idx++;
1068
1069        *seq_size = idx;
1070}
1071
1072static void cc_proc_header_desc(struct aead_request *req,
1073                                struct cc_hw_desc desc[],
1074                                unsigned int *seq_size)
1075{
1076        unsigned int idx = *seq_size;
1077        /* Hash associated data */
1078        if (req->assoclen > 0)
1079                cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1080
1081        /* Hash IV */
1082        *seq_size = idx;
1083}
1084
1085static void cc_proc_scheme_desc(struct aead_request *req,
1086                                struct cc_hw_desc desc[],
1087                                unsigned int *seq_size)
1088{
1089        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1090        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1091        struct cc_aead_handle *aead_handle = ctx->drvdata->aead_handle;
1092        unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
1093                                DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
1094        unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
1095                                CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
1096        unsigned int idx = *seq_size;
1097
1098        hw_desc_init(&desc[idx]);
1099        set_cipher_mode(&desc[idx], hash_mode);
1100        set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1101                      ctx->drvdata->hash_len_sz);
1102        set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1103        set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
1104        set_cipher_do(&desc[idx], DO_PAD);
1105        idx++;
1106
1107        /* Get final ICV result */
1108        hw_desc_init(&desc[idx]);
1109        set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1110                      digest_size);
1111        set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1112        set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1113        set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1114        set_cipher_mode(&desc[idx], hash_mode);
1115        idx++;
1116
1117        /* Loading hash opad xor key state */
1118        hw_desc_init(&desc[idx]);
1119        set_cipher_mode(&desc[idx], hash_mode);
1120        set_din_type(&desc[idx], DMA_DLLI,
1121                     (ctx->auth_state.hmac.ipad_opad_dma_addr + digest_size),
1122                     digest_size, NS_BIT);
1123        set_flow_mode(&desc[idx], S_DIN_to_HASH);
1124        set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1125        idx++;
1126
1127        /* Load init. digest len (64 bytes) */
1128        hw_desc_init(&desc[idx]);
1129        set_cipher_mode(&desc[idx], hash_mode);
1130        set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
1131                     ctx->drvdata->hash_len_sz);
1132        set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1133        set_flow_mode(&desc[idx], S_DIN_to_HASH);
1134        set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1135        idx++;
1136
1137        /* Perform HASH update */
1138        hw_desc_init(&desc[idx]);
1139        set_din_sram(&desc[idx], aead_handle->sram_workspace_addr,
1140                     digest_size);
1141        set_flow_mode(&desc[idx], DIN_HASH);
1142        idx++;
1143
1144        *seq_size = idx;
1145}
1146
1147static void cc_mlli_to_sram(struct aead_request *req,
1148                            struct cc_hw_desc desc[], unsigned int *seq_size)
1149{
1150        struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1151        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1152        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1153        struct device *dev = drvdata_to_dev(ctx->drvdata);
1154
1155        if (req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1156            req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
1157            !req_ctx->is_single_pass) {
1158                dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
1159                        (unsigned int)ctx->drvdata->mlli_sram_addr,
1160                        req_ctx->mlli_params.mlli_len);
1161                /* Copy MLLI table host-to-sram */
1162                hw_desc_init(&desc[*seq_size]);
1163                set_din_type(&desc[*seq_size], DMA_DLLI,
1164                             req_ctx->mlli_params.mlli_dma_addr,
1165                             req_ctx->mlli_params.mlli_len, NS_BIT);
1166                set_dout_sram(&desc[*seq_size],
1167                              ctx->drvdata->mlli_sram_addr,
1168                              req_ctx->mlli_params.mlli_len);
1169                set_flow_mode(&desc[*seq_size], BYPASS);
1170                (*seq_size)++;
1171        }
1172}
1173
1174static enum cc_flow_mode cc_get_data_flow(enum drv_crypto_direction direct,
1175                                          enum cc_flow_mode setup_flow_mode,
1176                                          bool is_single_pass)
1177{
1178        enum cc_flow_mode data_flow_mode;
1179
1180        if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1181                if (setup_flow_mode == S_DIN_to_AES)
1182                        data_flow_mode = is_single_pass ?
1183                                AES_to_HASH_and_DOUT : DIN_AES_DOUT;
1184                else
1185                        data_flow_mode = is_single_pass ?
1186                                DES_to_HASH_and_DOUT : DIN_DES_DOUT;
1187        } else { /* Decrypt */
1188                if (setup_flow_mode == S_DIN_to_AES)
1189                        data_flow_mode = is_single_pass ?
1190                                AES_and_HASH : DIN_AES_DOUT;
1191                else
1192                        data_flow_mode = is_single_pass ?
1193                                DES_and_HASH : DIN_DES_DOUT;
1194        }
1195
1196        return data_flow_mode;
1197}
1198
1199static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
1200                            unsigned int *seq_size)
1201{
1202        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1203        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1204        struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1205        int direct = req_ctx->gen_ctx.op_type;
1206        unsigned int data_flow_mode =
1207                cc_get_data_flow(direct, ctx->flow_mode,
1208                                 req_ctx->is_single_pass);
1209
1210        if (req_ctx->is_single_pass) {
1211                /**
1212                 * Single-pass flow
1213                 */
1214                cc_set_hmac_desc(req, desc, seq_size);
1215                cc_set_cipher_desc(req, desc, seq_size);
1216                cc_proc_header_desc(req, desc, seq_size);
1217                cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
1218                cc_proc_scheme_desc(req, desc, seq_size);
1219                cc_proc_digest_desc(req, desc, seq_size);
1220                return;
1221        }
1222
1223        /**
1224         * Double-pass flow
1225         * Fallback for unsupported single-pass modes,
1226         * i.e. using assoc. data of non-word-multiple
1227         */
1228        if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1229                /* encrypt first.. */
1230                cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1231                /* authenc after..*/
1232                cc_set_hmac_desc(req, desc, seq_size);
1233                cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1234                cc_proc_scheme_desc(req, desc, seq_size);
1235                cc_proc_digest_desc(req, desc, seq_size);
1236
1237        } else { /*DECRYPT*/
1238                /* authenc first..*/
1239                cc_set_hmac_desc(req, desc, seq_size);
1240                cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1241                cc_proc_scheme_desc(req, desc, seq_size);
1242                /* decrypt after.. */
1243                cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1244                /* read the digest result with setting the completion bit
1245                 * must be after the cipher operation
1246                 */
1247                cc_proc_digest_desc(req, desc, seq_size);
1248        }
1249}
1250
1251static void
1252cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
1253                unsigned int *seq_size)
1254{
1255        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1256        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1257        struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1258        int direct = req_ctx->gen_ctx.op_type;
1259        unsigned int data_flow_mode =
1260                cc_get_data_flow(direct, ctx->flow_mode,
1261                                 req_ctx->is_single_pass);
1262
1263        if (req_ctx->is_single_pass) {
1264                /**
1265                 * Single-pass flow
1266                 */
1267                cc_set_xcbc_desc(req, desc, seq_size);
1268                cc_set_cipher_desc(req, desc, seq_size);
1269                cc_proc_header_desc(req, desc, seq_size);
1270                cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
1271                cc_proc_digest_desc(req, desc, seq_size);
1272                return;
1273        }
1274
1275        /**
1276         * Double-pass flow
1277         * Fallback for unsupported single-pass modes,
1278         * i.e. using assoc. data of non-word-multiple
1279         */
1280        if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1281                /* encrypt first.. */
1282                cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1283                /* authenc after.. */
1284                cc_set_xcbc_desc(req, desc, seq_size);
1285                cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1286                cc_proc_digest_desc(req, desc, seq_size);
1287        } else { /*DECRYPT*/
1288                /* authenc first.. */
1289                cc_set_xcbc_desc(req, desc, seq_size);
1290                cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1291                /* decrypt after..*/
1292                cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1293                /* read the digest result with setting the completion bit
1294                 * must be after the cipher operation
1295                 */
1296                cc_proc_digest_desc(req, desc, seq_size);
1297        }
1298}
1299
1300static int validate_data_size(struct cc_aead_ctx *ctx,
1301                              enum drv_crypto_direction direct,
1302                              struct aead_request *req)
1303{
1304        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1305        struct device *dev = drvdata_to_dev(ctx->drvdata);
1306        unsigned int assoclen = req->assoclen;
1307        unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
1308                        (req->cryptlen - ctx->authsize) : req->cryptlen;
1309
1310        if (direct == DRV_CRYPTO_DIRECTION_DECRYPT &&
1311            req->cryptlen < ctx->authsize)
1312                goto data_size_err;
1313
1314        areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
1315
1316        switch (ctx->flow_mode) {
1317        case S_DIN_to_AES:
1318                if (ctx->cipher_mode == DRV_CIPHER_CBC &&
1319                    !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE))
1320                        goto data_size_err;
1321                if (ctx->cipher_mode == DRV_CIPHER_CCM)
1322                        break;
1323                if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1324                        if (areq_ctx->plaintext_authenticate_only)
1325                                areq_ctx->is_single_pass = false;
1326                        break;
1327                }
1328
1329                if (!IS_ALIGNED(assoclen, sizeof(u32)))
1330                        areq_ctx->is_single_pass = false;
1331
1332                if (ctx->cipher_mode == DRV_CIPHER_CTR &&
1333                    !IS_ALIGNED(cipherlen, sizeof(u32)))
1334                        areq_ctx->is_single_pass = false;
1335
1336                break;
1337        case S_DIN_to_DES:
1338                if (!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE))
1339                        goto data_size_err;
1340                if (!IS_ALIGNED(assoclen, DES_BLOCK_SIZE))
1341                        areq_ctx->is_single_pass = false;
1342                break;
1343        default:
1344                dev_err(dev, "Unexpected flow mode (%d)\n", ctx->flow_mode);
1345                goto data_size_err;
1346        }
1347
1348        return 0;
1349
1350data_size_err:
1351        return -EINVAL;
1352}
1353
1354static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size)
1355{
1356        unsigned int len = 0;
1357
1358        if (header_size == 0)
1359                return 0;
1360
1361        if (header_size < ((1UL << 16) - (1UL << 8))) {
1362                len = 2;
1363
1364                pa0_buff[0] = (header_size >> 8) & 0xFF;
1365                pa0_buff[1] = header_size & 0xFF;
1366        } else {
1367                len = 6;
1368
1369                pa0_buff[0] = 0xFF;
1370                pa0_buff[1] = 0xFE;
1371                pa0_buff[2] = (header_size >> 24) & 0xFF;
1372                pa0_buff[3] = (header_size >> 16) & 0xFF;
1373                pa0_buff[4] = (header_size >> 8) & 0xFF;
1374                pa0_buff[5] = header_size & 0xFF;
1375        }
1376
1377        return len;
1378}
1379
1380static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
1381{
1382        __be32 data;
1383
1384        memset(block, 0, csize);
1385        block += csize;
1386
1387        if (csize >= 4)
1388                csize = 4;
1389        else if (msglen > (1 << (8 * csize)))
1390                return -EOVERFLOW;
1391
1392        data = cpu_to_be32(msglen);
1393        memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
1394
1395        return 0;
1396}
1397
1398static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
1399                  unsigned int *seq_size)
1400{
1401        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1402        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1403        struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1404        unsigned int idx = *seq_size;
1405        unsigned int cipher_flow_mode;
1406        dma_addr_t mac_result;
1407
1408        if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1409                cipher_flow_mode = AES_to_HASH_and_DOUT;
1410                mac_result = req_ctx->mac_buf_dma_addr;
1411        } else { /* Encrypt */
1412                cipher_flow_mode = AES_and_HASH;
1413                mac_result = req_ctx->icv_dma_addr;
1414        }
1415
1416        /* load key */
1417        hw_desc_init(&desc[idx]);
1418        set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1419        set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1420                     ((ctx->enc_keylen == 24) ?  CC_AES_KEY_SIZE_MAX :
1421                      ctx->enc_keylen), NS_BIT);
1422        set_key_size_aes(&desc[idx], ctx->enc_keylen);
1423        set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1424        set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1425        set_flow_mode(&desc[idx], S_DIN_to_AES);
1426        idx++;
1427
1428        /* load ctr state */
1429        hw_desc_init(&desc[idx]);
1430        set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1431        set_key_size_aes(&desc[idx], ctx->enc_keylen);
1432        set_din_type(&desc[idx], DMA_DLLI,
1433                     req_ctx->gen_ctx.iv_dma_addr, AES_BLOCK_SIZE, NS_BIT);
1434        set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1435        set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1436        set_flow_mode(&desc[idx], S_DIN_to_AES);
1437        idx++;
1438
1439        /* load MAC key */
1440        hw_desc_init(&desc[idx]);
1441        set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1442        set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1443                     ((ctx->enc_keylen == 24) ?  CC_AES_KEY_SIZE_MAX :
1444                      ctx->enc_keylen), NS_BIT);
1445        set_key_size_aes(&desc[idx], ctx->enc_keylen);
1446        set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1447        set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1448        set_flow_mode(&desc[idx], S_DIN_to_HASH);
1449        set_aes_not_hash_mode(&desc[idx]);
1450        idx++;
1451
1452        /* load MAC state */
1453        hw_desc_init(&desc[idx]);
1454        set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1455        set_key_size_aes(&desc[idx], ctx->enc_keylen);
1456        set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1457                     AES_BLOCK_SIZE, NS_BIT);
1458        set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1459        set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1460        set_flow_mode(&desc[idx], S_DIN_to_HASH);
1461        set_aes_not_hash_mode(&desc[idx]);
1462        idx++;
1463
1464        /* process assoc data */
1465        if (req->assoclen > 0) {
1466                cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1467        } else {
1468                hw_desc_init(&desc[idx]);
1469                set_din_type(&desc[idx], DMA_DLLI,
1470                             sg_dma_address(&req_ctx->ccm_adata_sg),
1471                             AES_BLOCK_SIZE + req_ctx->ccm_hdr_size, NS_BIT);
1472                set_flow_mode(&desc[idx], DIN_HASH);
1473                idx++;
1474        }
1475
1476        /* process the cipher */
1477        if (req_ctx->cryptlen)
1478                cc_proc_cipher_desc(req, cipher_flow_mode, desc, &idx);
1479
1480        /* Read temporal MAC */
1481        hw_desc_init(&desc[idx]);
1482        set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1483        set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, ctx->authsize,
1484                      NS_BIT, 0);
1485        set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1486        set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1487        set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1488        set_aes_not_hash_mode(&desc[idx]);
1489        idx++;
1490
1491        /* load AES-CTR state (for last MAC calculation)*/
1492        hw_desc_init(&desc[idx]);
1493        set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1494        set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1495        set_din_type(&desc[idx], DMA_DLLI, req_ctx->ccm_iv0_dma_addr,
1496                     AES_BLOCK_SIZE, NS_BIT);
1497        set_key_size_aes(&desc[idx], ctx->enc_keylen);
1498        set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1499        set_flow_mode(&desc[idx], S_DIN_to_AES);
1500        idx++;
1501
1502        hw_desc_init(&desc[idx]);
1503        set_din_no_dma(&desc[idx], 0, 0xfffff0);
1504        set_dout_no_dma(&desc[idx], 0, 0, 1);
1505        idx++;
1506
1507        /* encrypt the "T" value and store MAC in mac_state */
1508        hw_desc_init(&desc[idx]);
1509        set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1510                     ctx->authsize, NS_BIT);
1511        set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1512        set_queue_last_ind(ctx->drvdata, &desc[idx]);
1513        set_flow_mode(&desc[idx], DIN_AES_DOUT);
1514        idx++;
1515
1516        *seq_size = idx;
1517        return 0;
1518}
1519
1520static int config_ccm_adata(struct aead_request *req)
1521{
1522        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1523        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1524        struct device *dev = drvdata_to_dev(ctx->drvdata);
1525        struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1526        //unsigned int size_of_a = 0, rem_a_size = 0;
1527        unsigned int lp = req->iv[0];
1528        /* Note: The code assume that req->iv[0] already contains the value
1529         * of L' of RFC3610
1530         */
1531        unsigned int l = lp + 1;  /* This is L' of RFC 3610. */
1532        unsigned int m = ctx->authsize;  /* This is M' of RFC 3610. */
1533        u8 *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
1534        u8 *a0 = req_ctx->ccm_config + CCM_A0_OFFSET;
1535        u8 *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
1536        unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1537                                 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1538                                req->cryptlen :
1539                                (req->cryptlen - ctx->authsize);
1540        int rc;
1541
1542        memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1543        memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE * 3);
1544
1545        /* taken from crypto/ccm.c */
1546        /* 2 <= L <= 8, so 1 <= L' <= 7. */
1547        if (l < 2 || l > 8) {
1548                dev_err(dev, "illegal iv value %X\n", req->iv[0]);
1549                return -EINVAL;
1550        }
1551        memcpy(b0, req->iv, AES_BLOCK_SIZE);
1552
1553        /* format control info per RFC 3610 and
1554         * NIST Special Publication 800-38C
1555         */
1556        *b0 |= (8 * ((m - 2) / 2));
1557        if (req->assoclen > 0)
1558                *b0 |= 64;  /* Enable bit 6 if Adata exists. */
1559
1560        rc = set_msg_len(b0 + 16 - l, cryptlen, l);  /* Write L'. */
1561        if (rc) {
1562                dev_err(dev, "message len overflow detected");
1563                return rc;
1564        }
1565         /* END of "taken from crypto/ccm.c" */
1566
1567        /* l(a) - size of associated data. */
1568        req_ctx->ccm_hdr_size = format_ccm_a0(a0, req->assoclen);
1569
1570        memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
1571        req->iv[15] = 1;
1572
1573        memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE);
1574        ctr_count_0[15] = 0;
1575
1576        return 0;
1577}
1578
1579static void cc_proc_rfc4309_ccm(struct aead_request *req)
1580{
1581        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1582        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1583        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1584
1585        /* L' */
1586        memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE);
1587        /* For RFC 4309, always use 4 bytes for message length
1588         * (at most 2^32-1 bytes).
1589         */
1590        areq_ctx->ctr_iv[0] = 3;
1591
1592        /* In RFC 4309 there is an 11-bytes nonce+IV part,
1593         * that we build here.
1594         */
1595        memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce,
1596               CCM_BLOCK_NONCE_SIZE);
1597        memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
1598               CCM_BLOCK_IV_SIZE);
1599        req->iv = areq_ctx->ctr_iv;
1600        req->assoclen -= CCM_BLOCK_IV_SIZE;
1601}
1602
1603static void cc_set_ghash_desc(struct aead_request *req,
1604                              struct cc_hw_desc desc[], unsigned int *seq_size)
1605{
1606        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1607        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1608        struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1609        unsigned int idx = *seq_size;
1610
1611        /* load key to AES*/
1612        hw_desc_init(&desc[idx]);
1613        set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1614        set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1615        set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1616                     ctx->enc_keylen, NS_BIT);
1617        set_key_size_aes(&desc[idx], ctx->enc_keylen);
1618        set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1619        set_flow_mode(&desc[idx], S_DIN_to_AES);
1620        idx++;
1621
1622        /* process one zero block to generate hkey */
1623        hw_desc_init(&desc[idx]);
1624        set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1625        set_dout_dlli(&desc[idx], req_ctx->hkey_dma_addr, AES_BLOCK_SIZE,
1626                      NS_BIT, 0);
1627        set_flow_mode(&desc[idx], DIN_AES_DOUT);
1628        idx++;
1629
1630        /* Memory Barrier */
1631        hw_desc_init(&desc[idx]);
1632        set_din_no_dma(&desc[idx], 0, 0xfffff0);
1633        set_dout_no_dma(&desc[idx], 0, 0, 1);
1634        idx++;
1635
1636        /* Load GHASH subkey */
1637        hw_desc_init(&desc[idx]);
1638        set_din_type(&desc[idx], DMA_DLLI, req_ctx->hkey_dma_addr,
1639                     AES_BLOCK_SIZE, NS_BIT);
1640        set_dout_no_dma(&desc[idx], 0, 0, 1);
1641        set_flow_mode(&desc[idx], S_DIN_to_HASH);
1642        set_aes_not_hash_mode(&desc[idx]);
1643        set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1644        set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1645        set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1646        idx++;
1647
1648        /* Configure Hash Engine to work with GHASH.
1649         * Since it was not possible to extend HASH submodes to add GHASH,
1650         * The following command is necessary in order to
1651         * select GHASH (according to HW designers)
1652         */
1653        hw_desc_init(&desc[idx]);
1654        set_din_no_dma(&desc[idx], 0, 0xfffff0);
1655        set_dout_no_dma(&desc[idx], 0, 0, 1);
1656        set_flow_mode(&desc[idx], S_DIN_to_HASH);
1657        set_aes_not_hash_mode(&desc[idx]);
1658        set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1659        set_cipher_do(&desc[idx], 1); //1=AES_SK RKEK
1660        set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1661        set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1662        set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1663        idx++;
1664
1665        /* Load GHASH initial STATE (which is 0). (for any hash there is an
1666         * initial state)
1667         */
1668        hw_desc_init(&desc[idx]);
1669        set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1670        set_dout_no_dma(&desc[idx], 0, 0, 1);
1671        set_flow_mode(&desc[idx], S_DIN_to_HASH);
1672        set_aes_not_hash_mode(&desc[idx]);
1673        set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1674        set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1675        set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1676        idx++;
1677
1678        *seq_size = idx;
1679}
1680
1681static void cc_set_gctr_desc(struct aead_request *req, struct cc_hw_desc desc[],
1682                             unsigned int *seq_size)
1683{
1684        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1685        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1686        struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1687        unsigned int idx = *seq_size;
1688
1689        /* load key to AES*/
1690        hw_desc_init(&desc[idx]);
1691        set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1692        set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1693        set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1694                     ctx->enc_keylen, NS_BIT);
1695        set_key_size_aes(&desc[idx], ctx->enc_keylen);
1696        set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1697        set_flow_mode(&desc[idx], S_DIN_to_AES);
1698        idx++;
1699
1700        if (req_ctx->cryptlen && !req_ctx->plaintext_authenticate_only) {
1701                /* load AES/CTR initial CTR value inc by 2*/
1702                hw_desc_init(&desc[idx]);
1703                set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1704                set_key_size_aes(&desc[idx], ctx->enc_keylen);
1705                set_din_type(&desc[idx], DMA_DLLI,
1706                             req_ctx->gcm_iv_inc2_dma_addr, AES_BLOCK_SIZE,
1707                             NS_BIT);
1708                set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1709                set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1710                set_flow_mode(&desc[idx], S_DIN_to_AES);
1711                idx++;
1712        }
1713
1714        *seq_size = idx;
1715}
1716
1717static void cc_proc_gcm_result(struct aead_request *req,
1718                               struct cc_hw_desc desc[],
1719                               unsigned int *seq_size)
1720{
1721        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1722        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1723        struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1724        dma_addr_t mac_result;
1725        unsigned int idx = *seq_size;
1726
1727        if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1728                mac_result = req_ctx->mac_buf_dma_addr;
1729        } else { /* Encrypt */
1730                mac_result = req_ctx->icv_dma_addr;
1731        }
1732
1733        /* process(ghash) gcm_block_len */
1734        hw_desc_init(&desc[idx]);
1735        set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_block_len_dma_addr,
1736                     AES_BLOCK_SIZE, NS_BIT);
1737        set_flow_mode(&desc[idx], DIN_HASH);
1738        idx++;
1739
1740        /* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
1741        hw_desc_init(&desc[idx]);
1742        set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1743        set_din_no_dma(&desc[idx], 0, 0xfffff0);
1744        set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, AES_BLOCK_SIZE,
1745                      NS_BIT, 0);
1746        set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1747        set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1748        set_aes_not_hash_mode(&desc[idx]);
1749
1750        idx++;
1751
1752        /* load AES/CTR initial CTR value inc by 1*/
1753        hw_desc_init(&desc[idx]);
1754        set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1755        set_key_size_aes(&desc[idx], ctx->enc_keylen);
1756        set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_iv_inc1_dma_addr,
1757                     AES_BLOCK_SIZE, NS_BIT);
1758        set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1759        set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1760        set_flow_mode(&desc[idx], S_DIN_to_AES);
1761        idx++;
1762
1763        /* Memory Barrier */
1764        hw_desc_init(&desc[idx]);
1765        set_din_no_dma(&desc[idx], 0, 0xfffff0);
1766        set_dout_no_dma(&desc[idx], 0, 0, 1);
1767        idx++;
1768
1769        /* process GCTR on stored GHASH and store MAC in mac_state*/
1770        hw_desc_init(&desc[idx]);
1771        set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1772        set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1773                     AES_BLOCK_SIZE, NS_BIT);
1774        set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1775        set_queue_last_ind(ctx->drvdata, &desc[idx]);
1776        set_flow_mode(&desc[idx], DIN_AES_DOUT);
1777        idx++;
1778
1779        *seq_size = idx;
1780}
1781
1782static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
1783                  unsigned int *seq_size)
1784{
1785        struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1786        unsigned int cipher_flow_mode;
1787
1788        if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1789                cipher_flow_mode = AES_and_HASH;
1790        } else { /* Encrypt */
1791                cipher_flow_mode = AES_to_HASH_and_DOUT;
1792        }
1793
1794        //in RFC4543 no data to encrypt. just copy data from src to dest.
1795        if (req_ctx->plaintext_authenticate_only) {
1796                cc_proc_cipher_desc(req, BYPASS, desc, seq_size);
1797                cc_set_ghash_desc(req, desc, seq_size);
1798                /* process(ghash) assoc data */
1799                cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1800                cc_set_gctr_desc(req, desc, seq_size);
1801                cc_proc_gcm_result(req, desc, seq_size);
1802                return 0;
1803        }
1804
1805        // for gcm and rfc4106.
1806        cc_set_ghash_desc(req, desc, seq_size);
1807        /* process(ghash) assoc data */
1808        if (req->assoclen > 0)
1809                cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1810        cc_set_gctr_desc(req, desc, seq_size);
1811        /* process(gctr+ghash) */
1812        if (req_ctx->cryptlen)
1813                cc_proc_cipher_desc(req, cipher_flow_mode, desc, seq_size);
1814        cc_proc_gcm_result(req, desc, seq_size);
1815
1816        return 0;
1817}
1818
1819static int config_gcm_context(struct aead_request *req)
1820{
1821        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1822        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1823        struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1824        struct device *dev = drvdata_to_dev(ctx->drvdata);
1825
1826        unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1827                                 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1828                                req->cryptlen :
1829                                (req->cryptlen - ctx->authsize);
1830        __be32 counter = cpu_to_be32(2);
1831
1832        dev_dbg(dev, "%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n",
1833                __func__, cryptlen, req->assoclen, ctx->authsize);
1834
1835        memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
1836
1837        memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1838
1839        memcpy(req->iv + 12, &counter, 4);
1840        memcpy(req_ctx->gcm_iv_inc2, req->iv, 16);
1841
1842        counter = cpu_to_be32(1);
1843        memcpy(req->iv + 12, &counter, 4);
1844        memcpy(req_ctx->gcm_iv_inc1, req->iv, 16);
1845
1846        if (!req_ctx->plaintext_authenticate_only) {
1847                __be64 temp64;
1848
1849                temp64 = cpu_to_be64(req->assoclen * 8);
1850                memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1851                temp64 = cpu_to_be64(cryptlen * 8);
1852                memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1853        } else {
1854                /* rfc4543=>  all data(AAD,IV,Plain) are considered additional
1855                 * data that is nothing is encrypted.
1856                 */
1857                __be64 temp64;
1858
1859                temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE +
1860                                      cryptlen) * 8);
1861                memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1862                temp64 = 0;
1863                memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1864        }
1865
1866        return 0;
1867}
1868
1869static void cc_proc_rfc4_gcm(struct aead_request *req)
1870{
1871        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1872        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1873        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1874
1875        memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET,
1876               ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
1877        memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
1878               GCM_BLOCK_RFC4_IV_SIZE);
1879        req->iv = areq_ctx->ctr_iv;
1880        req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
1881}
1882
1883static int cc_proc_aead(struct aead_request *req,
1884                        enum drv_crypto_direction direct)
1885{
1886        int rc = 0;
1887        int seq_len = 0;
1888        struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ];
1889        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1890        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1891        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1892        struct device *dev = drvdata_to_dev(ctx->drvdata);
1893        struct cc_crypto_req cc_req = {};
1894
1895        dev_dbg(dev, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
1896                ((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Enc" : "Dec"),
1897                ctx, req, req->iv, sg_virt(req->src), req->src->offset,
1898                sg_virt(req->dst), req->dst->offset, req->cryptlen);
1899
1900        /* STAT_PHASE_0: Init and sanity checks */
1901
1902        /* Check data length according to mode */
1903        if (validate_data_size(ctx, direct, req)) {
1904                dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
1905                        req->cryptlen, req->assoclen);
1906                crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
1907                return -EINVAL;
1908        }
1909
1910        /* Setup request structure */
1911        cc_req.user_cb = (void *)cc_aead_complete;
1912        cc_req.user_arg = (void *)req;
1913
1914        /* Setup request context */
1915        areq_ctx->gen_ctx.op_type = direct;
1916        areq_ctx->req_authsize = ctx->authsize;
1917        areq_ctx->cipher_mode = ctx->cipher_mode;
1918
1919        /* STAT_PHASE_1: Map buffers */
1920
1921        if (ctx->cipher_mode == DRV_CIPHER_CTR) {
1922                /* Build CTR IV - Copy nonce from last 4 bytes in
1923                 * CTR key to first 4 bytes in CTR IV
1924                 */
1925                memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce,
1926                       CTR_RFC3686_NONCE_SIZE);
1927                if (!areq_ctx->backup_giv) /*User none-generated IV*/
1928                        memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE,
1929                               req->iv, CTR_RFC3686_IV_SIZE);
1930                /* Initialize counter portion of counter block */
1931                *(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
1932                            CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1933
1934                /* Replace with counter iv */
1935                req->iv = areq_ctx->ctr_iv;
1936                areq_ctx->hw_iv_size = CTR_RFC3686_BLOCK_SIZE;
1937        } else if ((ctx->cipher_mode == DRV_CIPHER_CCM) ||
1938                   (ctx->cipher_mode == DRV_CIPHER_GCTR)) {
1939                areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
1940                if (areq_ctx->ctr_iv != req->iv) {
1941                        memcpy(areq_ctx->ctr_iv, req->iv,
1942                               crypto_aead_ivsize(tfm));
1943                        req->iv = areq_ctx->ctr_iv;
1944                }
1945        }  else {
1946                areq_ctx->hw_iv_size = crypto_aead_ivsize(tfm);
1947        }
1948
1949        if (ctx->cipher_mode == DRV_CIPHER_CCM) {
1950                rc = config_ccm_adata(req);
1951                if (rc) {
1952                        dev_dbg(dev, "config_ccm_adata() returned with a failure %d!",
1953                                rc);
1954                        goto exit;
1955                }
1956        } else {
1957                areq_ctx->ccm_hdr_size = ccm_header_size_null;
1958        }
1959
1960        if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1961                rc = config_gcm_context(req);
1962                if (rc) {
1963                        dev_dbg(dev, "config_gcm_context() returned with a failure %d!",
1964                                rc);
1965                        goto exit;
1966                }
1967        }
1968
1969        rc = cc_map_aead_request(ctx->drvdata, req);
1970        if (rc) {
1971                dev_err(dev, "map_request() failed\n");
1972                goto exit;
1973        }
1974
1975        /* do we need to generate IV? */
1976        if (areq_ctx->backup_giv) {
1977                /* set the DMA mapped IV address*/
1978                if (ctx->cipher_mode == DRV_CIPHER_CTR) {
1979                        cc_req.ivgen_dma_addr[0] =
1980                                areq_ctx->gen_ctx.iv_dma_addr +
1981                                CTR_RFC3686_NONCE_SIZE;
1982                        cc_req.ivgen_dma_addr_len = 1;
1983                } else if (ctx->cipher_mode == DRV_CIPHER_CCM) {
1984                        /* In ccm, the IV needs to exist both inside B0 and
1985                         * inside the counter.It is also copied to iv_dma_addr
1986                         * for other reasons (like returning it to the user).
1987                         * So, using 3 (identical) IV outputs.
1988                         */
1989                        cc_req.ivgen_dma_addr[0] =
1990                                areq_ctx->gen_ctx.iv_dma_addr +
1991                                CCM_BLOCK_IV_OFFSET;
1992                        cc_req.ivgen_dma_addr[1] =
1993                                sg_dma_address(&areq_ctx->ccm_adata_sg) +
1994                                CCM_B0_OFFSET + CCM_BLOCK_IV_OFFSET;
1995                        cc_req.ivgen_dma_addr[2] =
1996                                sg_dma_address(&areq_ctx->ccm_adata_sg) +
1997                                CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET;
1998                        cc_req.ivgen_dma_addr_len = 3;
1999                } else {
2000                        cc_req.ivgen_dma_addr[0] =
2001                                areq_ctx->gen_ctx.iv_dma_addr;
2002                        cc_req.ivgen_dma_addr_len = 1;
2003                }
2004
2005                /* set the IV size (8/16 B long)*/
2006                cc_req.ivgen_size = crypto_aead_ivsize(tfm);
2007        }
2008
2009        /* STAT_PHASE_2: Create sequence */
2010
2011        /* Load MLLI tables to SRAM if necessary */
2012        cc_mlli_to_sram(req, desc, &seq_len);
2013
2014        /*TODO: move seq len by reference */
2015        switch (ctx->auth_mode) {
2016        case DRV_HASH_SHA1:
2017        case DRV_HASH_SHA256:
2018                cc_hmac_authenc(req, desc, &seq_len);
2019                break;
2020        case DRV_HASH_XCBC_MAC:
2021                cc_xcbc_authenc(req, desc, &seq_len);
2022                break;
2023        case DRV_HASH_NULL:
2024                if (ctx->cipher_mode == DRV_CIPHER_CCM)
2025                        cc_ccm(req, desc, &seq_len);
2026                if (ctx->cipher_mode == DRV_CIPHER_GCTR)
2027                        cc_gcm(req, desc, &seq_len);
2028                break;
2029        default:
2030                dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
2031                cc_unmap_aead_request(dev, req);
2032                rc = -ENOTSUPP;
2033                goto exit;
2034        }
2035
2036        /* STAT_PHASE_3: Lock HW and push sequence */
2037
2038        rc = cc_send_request(ctx->drvdata, &cc_req, desc, seq_len, &req->base);
2039
2040        if (rc != -EINPROGRESS && rc != -EBUSY) {
2041                dev_err(dev, "send_request() failed (rc=%d)\n", rc);
2042                cc_unmap_aead_request(dev, req);
2043        }
2044
2045exit:
2046        return rc;
2047}
2048
2049static int cc_aead_encrypt(struct aead_request *req)
2050{
2051        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2052        int rc;
2053
2054        /* No generated IV required */
2055        areq_ctx->backup_iv = req->iv;
2056        areq_ctx->backup_giv = NULL;
2057        areq_ctx->is_gcm4543 = false;
2058
2059        areq_ctx->plaintext_authenticate_only = false;
2060
2061        rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2062        if (rc != -EINPROGRESS && rc != -EBUSY)
2063                req->iv = areq_ctx->backup_iv;
2064
2065        return rc;
2066}
2067
2068static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
2069{
2070        /* Very similar to cc_aead_encrypt() above. */
2071
2072        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2073        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2074        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2075        struct device *dev = drvdata_to_dev(ctx->drvdata);
2076        int rc = -EINVAL;
2077
2078        if (!valid_assoclen(req)) {
2079                dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2080                goto out;
2081        }
2082
2083        /* No generated IV required */
2084        areq_ctx->backup_iv = req->iv;
2085        areq_ctx->backup_giv = NULL;
2086        areq_ctx->is_gcm4543 = true;
2087
2088        cc_proc_rfc4309_ccm(req);
2089
2090        rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2091        if (rc != -EINPROGRESS && rc != -EBUSY)
2092                req->iv = areq_ctx->backup_iv;
2093out:
2094        return rc;
2095}
2096
2097static int cc_aead_decrypt(struct aead_request *req)
2098{
2099        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2100        int rc;
2101
2102        /* No generated IV required */
2103        areq_ctx->backup_iv = req->iv;
2104        areq_ctx->backup_giv = NULL;
2105        areq_ctx->is_gcm4543 = false;
2106
2107        areq_ctx->plaintext_authenticate_only = false;
2108
2109        rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2110        if (rc != -EINPROGRESS && rc != -EBUSY)
2111                req->iv = areq_ctx->backup_iv;
2112
2113        return rc;
2114}
2115
2116static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
2117{
2118        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2119        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2120        struct device *dev = drvdata_to_dev(ctx->drvdata);
2121        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2122        int rc = -EINVAL;
2123
2124        if (!valid_assoclen(req)) {
2125                dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2126                goto out;
2127        }
2128
2129        /* No generated IV required */
2130        areq_ctx->backup_iv = req->iv;
2131        areq_ctx->backup_giv = NULL;
2132
2133        areq_ctx->is_gcm4543 = true;
2134        cc_proc_rfc4309_ccm(req);
2135
2136        rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2137        if (rc != -EINPROGRESS && rc != -EBUSY)
2138                req->iv = areq_ctx->backup_iv;
2139
2140out:
2141        return rc;
2142}
2143
2144static int cc_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
2145                                 unsigned int keylen)
2146{
2147        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2148        struct device *dev = drvdata_to_dev(ctx->drvdata);
2149
2150        dev_dbg(dev, "%s()  keylen %d, key %p\n", __func__, keylen, key);
2151
2152        if (keylen < 4)
2153                return -EINVAL;
2154
2155        keylen -= 4;
2156        memcpy(ctx->ctr_nonce, key + keylen, 4);
2157
2158        return cc_aead_setkey(tfm, key, keylen);
2159}
2160
2161static int cc_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
2162                                 unsigned int keylen)
2163{
2164        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2165        struct device *dev = drvdata_to_dev(ctx->drvdata);
2166
2167        dev_dbg(dev, "%s()  keylen %d, key %p\n", __func__, keylen, key);
2168
2169        if (keylen < 4)
2170                return -EINVAL;
2171
2172        keylen -= 4;
2173        memcpy(ctx->ctr_nonce, key + keylen, 4);
2174
2175        return cc_aead_setkey(tfm, key, keylen);
2176}
2177
2178static int cc_gcm_setauthsize(struct crypto_aead *authenc,
2179                              unsigned int authsize)
2180{
2181        switch (authsize) {
2182        case 4:
2183        case 8:
2184        case 12:
2185        case 13:
2186        case 14:
2187        case 15:
2188        case 16:
2189                break;
2190        default:
2191                return -EINVAL;
2192        }
2193
2194        return cc_aead_setauthsize(authenc, authsize);
2195}
2196
2197static int cc_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
2198                                      unsigned int authsize)
2199{
2200        struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
2201        struct device *dev = drvdata_to_dev(ctx->drvdata);
2202
2203        dev_dbg(dev, "authsize %d\n", authsize);
2204
2205        switch (authsize) {
2206        case 8:
2207        case 12:
2208        case 16:
2209                break;
2210        default:
2211                return -EINVAL;
2212        }
2213
2214        return cc_aead_setauthsize(authenc, authsize);
2215}
2216
2217static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
2218                                      unsigned int authsize)
2219{
2220        struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
2221        struct device *dev = drvdata_to_dev(ctx->drvdata);
2222
2223        dev_dbg(dev, "authsize %d\n", authsize);
2224
2225        if (authsize != 16)
2226                return -EINVAL;
2227
2228        return cc_aead_setauthsize(authenc, authsize);
2229}
2230
2231static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
2232{
2233        /* Very similar to cc_aead_encrypt() above. */
2234
2235        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2236        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2237        struct device *dev = drvdata_to_dev(ctx->drvdata);
2238        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2239        int rc = -EINVAL;
2240
2241        if (!valid_assoclen(req)) {
2242                dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2243                goto out;
2244        }
2245
2246        /* No generated IV required */
2247        areq_ctx->backup_iv = req->iv;
2248        areq_ctx->backup_giv = NULL;
2249
2250        areq_ctx->plaintext_authenticate_only = false;
2251
2252        cc_proc_rfc4_gcm(req);
2253        areq_ctx->is_gcm4543 = true;
2254
2255        rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2256        if (rc != -EINPROGRESS && rc != -EBUSY)
2257                req->iv = areq_ctx->backup_iv;
2258out:
2259        return rc;
2260}
2261
2262static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
2263{
2264        /* Very similar to cc_aead_encrypt() above. */
2265
2266        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2267        int rc;
2268
2269        //plaintext is not encryped with rfc4543
2270        areq_ctx->plaintext_authenticate_only = true;
2271
2272        /* No generated IV required */
2273        areq_ctx->backup_iv = req->iv;
2274        areq_ctx->backup_giv = NULL;
2275
2276        cc_proc_rfc4_gcm(req);
2277        areq_ctx->is_gcm4543 = true;
2278
2279        rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2280        if (rc != -EINPROGRESS && rc != -EBUSY)
2281                req->iv = areq_ctx->backup_iv;
2282
2283        return rc;
2284}
2285
2286static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
2287{
2288        /* Very similar to cc_aead_decrypt() above. */
2289
2290        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2291        struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2292        struct device *dev = drvdata_to_dev(ctx->drvdata);
2293        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2294        int rc = -EINVAL;
2295
2296        if (!valid_assoclen(req)) {
2297                dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2298                goto out;
2299        }
2300
2301        /* No generated IV required */
2302        areq_ctx->backup_iv = req->iv;
2303        areq_ctx->backup_giv = NULL;
2304
2305        areq_ctx->plaintext_authenticate_only = false;
2306
2307        cc_proc_rfc4_gcm(req);
2308        areq_ctx->is_gcm4543 = true;
2309
2310        rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2311        if (rc != -EINPROGRESS && rc != -EBUSY)
2312                req->iv = areq_ctx->backup_iv;
2313out:
2314        return rc;
2315}
2316
2317static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
2318{
2319        /* Very similar to cc_aead_decrypt() above. */
2320
2321        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2322        int rc;
2323
2324        //plaintext is not decryped with rfc4543
2325        areq_ctx->plaintext_authenticate_only = true;
2326
2327        /* No generated IV required */
2328        areq_ctx->backup_iv = req->iv;
2329        areq_ctx->backup_giv = NULL;
2330
2331        cc_proc_rfc4_gcm(req);
2332        areq_ctx->is_gcm4543 = true;
2333
2334        rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2335        if (rc != -EINPROGRESS && rc != -EBUSY)
2336                req->iv = areq_ctx->backup_iv;
2337
2338        return rc;
2339}
2340
2341/* aead alg */
2342static struct cc_alg_template aead_algs[] = {
2343        {
2344                .name = "authenc(hmac(sha1),cbc(aes))",
2345                .driver_name = "authenc-hmac-sha1-cbc-aes-ccree",
2346                .blocksize = AES_BLOCK_SIZE,
2347                .type = CRYPTO_ALG_TYPE_AEAD,
2348                .template_aead = {
2349                        .setkey = cc_aead_setkey,
2350                        .setauthsize = cc_aead_setauthsize,
2351                        .encrypt = cc_aead_encrypt,
2352                        .decrypt = cc_aead_decrypt,
2353                        .init = cc_aead_init,
2354                        .exit = cc_aead_exit,
2355                        .ivsize = AES_BLOCK_SIZE,
2356                        .maxauthsize = SHA1_DIGEST_SIZE,
2357                },
2358                .cipher_mode = DRV_CIPHER_CBC,
2359                .flow_mode = S_DIN_to_AES,
2360                .auth_mode = DRV_HASH_SHA1,
2361                .min_hw_rev = CC_HW_REV_630,
2362        },
2363        {
2364                .name = "authenc(hmac(sha1),cbc(des3_ede))",
2365                .driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
2366                .blocksize = DES3_EDE_BLOCK_SIZE,
2367                .type = CRYPTO_ALG_TYPE_AEAD,
2368                .template_aead = {
2369                        .setkey = cc_aead_setkey,
2370                        .setauthsize = cc_aead_setauthsize,
2371                        .encrypt = cc_aead_encrypt,
2372                        .decrypt = cc_aead_decrypt,
2373                        .init = cc_aead_init,
2374                        .exit = cc_aead_exit,
2375                        .ivsize = DES3_EDE_BLOCK_SIZE,
2376                        .maxauthsize = SHA1_DIGEST_SIZE,
2377                },
2378                .cipher_mode = DRV_CIPHER_CBC,
2379                .flow_mode = S_DIN_to_DES,
2380                .auth_mode = DRV_HASH_SHA1,
2381                .min_hw_rev = CC_HW_REV_630,
2382        },
2383        {
2384                .name = "authenc(hmac(sha256),cbc(aes))",
2385                .driver_name = "authenc-hmac-sha256-cbc-aes-ccree",
2386                .blocksize = AES_BLOCK_SIZE,
2387                .type = CRYPTO_ALG_TYPE_AEAD,
2388                .template_aead = {
2389                        .setkey = cc_aead_setkey,
2390                        .setauthsize = cc_aead_setauthsize,
2391                        .encrypt = cc_aead_encrypt,
2392                        .decrypt = cc_aead_decrypt,
2393                        .init = cc_aead_init,
2394                        .exit = cc_aead_exit,
2395                        .ivsize = AES_BLOCK_SIZE,
2396                        .maxauthsize = SHA256_DIGEST_SIZE,
2397                },
2398                .cipher_mode = DRV_CIPHER_CBC,
2399                .flow_mode = S_DIN_to_AES,
2400                .auth_mode = DRV_HASH_SHA256,
2401                .min_hw_rev = CC_HW_REV_630,
2402        },
2403        {
2404                .name = "authenc(hmac(sha256),cbc(des3_ede))",
2405                .driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
2406                .blocksize = DES3_EDE_BLOCK_SIZE,
2407                .type = CRYPTO_ALG_TYPE_AEAD,
2408                .template_aead = {
2409                        .setkey = cc_aead_setkey,
2410                        .setauthsize = cc_aead_setauthsize,
2411                        .encrypt = cc_aead_encrypt,
2412                        .decrypt = cc_aead_decrypt,
2413                        .init = cc_aead_init,
2414                        .exit = cc_aead_exit,
2415                        .ivsize = DES3_EDE_BLOCK_SIZE,
2416                        .maxauthsize = SHA256_DIGEST_SIZE,
2417                },
2418                .cipher_mode = DRV_CIPHER_CBC,
2419                .flow_mode = S_DIN_to_DES,
2420                .auth_mode = DRV_HASH_SHA256,
2421                .min_hw_rev = CC_HW_REV_630,
2422        },
2423        {
2424                .name = "authenc(xcbc(aes),cbc(aes))",
2425                .driver_name = "authenc-xcbc-aes-cbc-aes-ccree",
2426                .blocksize = AES_BLOCK_SIZE,
2427                .type = CRYPTO_ALG_TYPE_AEAD,
2428                .template_aead = {
2429                        .setkey = cc_aead_setkey,
2430                        .setauthsize = cc_aead_setauthsize,
2431                        .encrypt = cc_aead_encrypt,
2432                        .decrypt = cc_aead_decrypt,
2433                        .init = cc_aead_init,
2434                        .exit = cc_aead_exit,
2435                        .ivsize = AES_BLOCK_SIZE,
2436                        .maxauthsize = AES_BLOCK_SIZE,
2437                },
2438                .cipher_mode = DRV_CIPHER_CBC,
2439                .flow_mode = S_DIN_to_AES,
2440                .auth_mode = DRV_HASH_XCBC_MAC,
2441                .min_hw_rev = CC_HW_REV_630,
2442        },
2443        {
2444                .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
2445                .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-ccree",
2446                .blocksize = 1,
2447                .type = CRYPTO_ALG_TYPE_AEAD,
2448                .template_aead = {
2449                        .setkey = cc_aead_setkey,
2450                        .setauthsize = cc_aead_setauthsize,
2451                        .encrypt = cc_aead_encrypt,
2452                        .decrypt = cc_aead_decrypt,
2453                        .init = cc_aead_init,
2454                        .exit = cc_aead_exit,
2455                        .ivsize = CTR_RFC3686_IV_SIZE,
2456                        .maxauthsize = SHA1_DIGEST_SIZE,
2457                },
2458                .cipher_mode = DRV_CIPHER_CTR,
2459                .flow_mode = S_DIN_to_AES,
2460                .auth_mode = DRV_HASH_SHA1,
2461                .min_hw_rev = CC_HW_REV_630,
2462        },
2463        {
2464                .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
2465                .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-ccree",
2466                .blocksize = 1,
2467                .type = CRYPTO_ALG_TYPE_AEAD,
2468                .template_aead = {
2469                        .setkey = cc_aead_setkey,
2470                        .setauthsize = cc_aead_setauthsize,
2471                        .encrypt = cc_aead_encrypt,
2472                        .decrypt = cc_aead_decrypt,
2473                        .init = cc_aead_init,
2474                        .exit = cc_aead_exit,
2475                        .ivsize = CTR_RFC3686_IV_SIZE,
2476                        .maxauthsize = SHA256_DIGEST_SIZE,
2477                },
2478                .cipher_mode = DRV_CIPHER_CTR,
2479                .flow_mode = S_DIN_to_AES,
2480                .auth_mode = DRV_HASH_SHA256,
2481                .min_hw_rev = CC_HW_REV_630,
2482        },
2483        {
2484                .name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
2485                .driver_name = "authenc-xcbc-aes-rfc3686-ctr-aes-ccree",
2486                .blocksize = 1,
2487                .type = CRYPTO_ALG_TYPE_AEAD,
2488                .template_aead = {
2489                        .setkey = cc_aead_setkey,
2490                        .setauthsize = cc_aead_setauthsize,
2491                        .encrypt = cc_aead_encrypt,
2492                        .decrypt = cc_aead_decrypt,
2493                        .init = cc_aead_init,
2494                        .exit = cc_aead_exit,
2495                        .ivsize = CTR_RFC3686_IV_SIZE,
2496                        .maxauthsize = AES_BLOCK_SIZE,
2497                },
2498                .cipher_mode = DRV_CIPHER_CTR,
2499                .flow_mode = S_DIN_to_AES,
2500                .auth_mode = DRV_HASH_XCBC_MAC,
2501                .min_hw_rev = CC_HW_REV_630,
2502        },
2503        {
2504                .name = "ccm(aes)",
2505                .driver_name = "ccm-aes-ccree",
2506                .blocksize = 1,
2507                .type = CRYPTO_ALG_TYPE_AEAD,
2508                .template_aead = {
2509                        .setkey = cc_aead_setkey,
2510                        .setauthsize = cc_ccm_setauthsize,
2511                        .encrypt = cc_aead_encrypt,
2512                        .decrypt = cc_aead_decrypt,
2513                        .init = cc_aead_init,
2514                        .exit = cc_aead_exit,
2515                        .ivsize = AES_BLOCK_SIZE,
2516                        .maxauthsize = AES_BLOCK_SIZE,
2517                },
2518                .cipher_mode = DRV_CIPHER_CCM,
2519                .flow_mode = S_DIN_to_AES,
2520                .auth_mode = DRV_HASH_NULL,
2521                .min_hw_rev = CC_HW_REV_630,
2522        },
2523        {
2524                .name = "rfc4309(ccm(aes))",
2525                .driver_name = "rfc4309-ccm-aes-ccree",
2526                .blocksize = 1,
2527                .type = CRYPTO_ALG_TYPE_AEAD,
2528                .template_aead = {
2529                        .setkey = cc_rfc4309_ccm_setkey,
2530                        .setauthsize = cc_rfc4309_ccm_setauthsize,
2531                        .encrypt = cc_rfc4309_ccm_encrypt,
2532                        .decrypt = cc_rfc4309_ccm_decrypt,
2533                        .init = cc_aead_init,
2534                        .exit = cc_aead_exit,
2535                        .ivsize = CCM_BLOCK_IV_SIZE,
2536                        .maxauthsize = AES_BLOCK_SIZE,
2537                },
2538                .cipher_mode = DRV_CIPHER_CCM,
2539                .flow_mode = S_DIN_to_AES,
2540                .auth_mode = DRV_HASH_NULL,
2541                .min_hw_rev = CC_HW_REV_630,
2542        },
2543        {
2544                .name = "gcm(aes)",
2545                .driver_name = "gcm-aes-ccree",
2546                .blocksize = 1,
2547                .type = CRYPTO_ALG_TYPE_AEAD,
2548                .template_aead = {
2549                        .setkey = cc_aead_setkey,
2550                        .setauthsize = cc_gcm_setauthsize,
2551                        .encrypt = cc_aead_encrypt,
2552                        .decrypt = cc_aead_decrypt,
2553                        .init = cc_aead_init,
2554                        .exit = cc_aead_exit,
2555                        .ivsize = 12,
2556                        .maxauthsize = AES_BLOCK_SIZE,
2557                },
2558                .cipher_mode = DRV_CIPHER_GCTR,
2559                .flow_mode = S_DIN_to_AES,
2560                .auth_mode = DRV_HASH_NULL,
2561                .min_hw_rev = CC_HW_REV_630,
2562        },
2563        {
2564                .name = "rfc4106(gcm(aes))",
2565                .driver_name = "rfc4106-gcm-aes-ccree",
2566                .blocksize = 1,
2567                .type = CRYPTO_ALG_TYPE_AEAD,
2568                .template_aead = {
2569                        .setkey = cc_rfc4106_gcm_setkey,
2570                        .setauthsize = cc_rfc4106_gcm_setauthsize,
2571                        .encrypt = cc_rfc4106_gcm_encrypt,
2572                        .decrypt = cc_rfc4106_gcm_decrypt,
2573                        .init = cc_aead_init,
2574                        .exit = cc_aead_exit,
2575                        .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2576                        .maxauthsize = AES_BLOCK_SIZE,
2577                },
2578                .cipher_mode = DRV_CIPHER_GCTR,
2579                .flow_mode = S_DIN_to_AES,
2580                .auth_mode = DRV_HASH_NULL,
2581                .min_hw_rev = CC_HW_REV_630,
2582        },
2583        {
2584                .name = "rfc4543(gcm(aes))",
2585                .driver_name = "rfc4543-gcm-aes-ccree",
2586                .blocksize = 1,
2587                .type = CRYPTO_ALG_TYPE_AEAD,
2588                .template_aead = {
2589                        .setkey = cc_rfc4543_gcm_setkey,
2590                        .setauthsize = cc_rfc4543_gcm_setauthsize,
2591                        .encrypt = cc_rfc4543_gcm_encrypt,
2592                        .decrypt = cc_rfc4543_gcm_decrypt,
2593                        .init = cc_aead_init,
2594                        .exit = cc_aead_exit,
2595                        .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2596                        .maxauthsize = AES_BLOCK_SIZE,
2597                },
2598                .cipher_mode = DRV_CIPHER_GCTR,
2599                .flow_mode = S_DIN_to_AES,
2600                .auth_mode = DRV_HASH_NULL,
2601                .min_hw_rev = CC_HW_REV_630,
2602        },
2603};
2604
2605static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
2606                                                struct device *dev)
2607{
2608        struct cc_crypto_alg *t_alg;
2609        struct aead_alg *alg;
2610
2611        t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
2612        if (!t_alg)
2613                return ERR_PTR(-ENOMEM);
2614
2615        alg = &tmpl->template_aead;
2616
2617        snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
2618        snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2619                 tmpl->driver_name);
2620        alg->base.cra_module = THIS_MODULE;
2621        alg->base.cra_priority = CC_CRA_PRIO;
2622
2623        alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
2624        alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2625                         tmpl->type;
2626        alg->init = cc_aead_init;
2627        alg->exit = cc_aead_exit;
2628
2629        t_alg->aead_alg = *alg;
2630
2631        t_alg->cipher_mode = tmpl->cipher_mode;
2632        t_alg->flow_mode = tmpl->flow_mode;
2633        t_alg->auth_mode = tmpl->auth_mode;
2634
2635        return t_alg;
2636}
2637
2638int cc_aead_free(struct cc_drvdata *drvdata)
2639{
2640        struct cc_crypto_alg *t_alg, *n;
2641        struct cc_aead_handle *aead_handle =
2642                (struct cc_aead_handle *)drvdata->aead_handle;
2643
2644        if (aead_handle) {
2645                /* Remove registered algs */
2646                list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list,
2647                                         entry) {
2648                        crypto_unregister_aead(&t_alg->aead_alg);
2649                        list_del(&t_alg->entry);
2650                        kfree(t_alg);
2651                }
2652                kfree(aead_handle);
2653                drvdata->aead_handle = NULL;
2654        }
2655
2656        return 0;
2657}
2658
2659int cc_aead_alloc(struct cc_drvdata *drvdata)
2660{
2661        struct cc_aead_handle *aead_handle;
2662        struct cc_crypto_alg *t_alg;
2663        int rc = -ENOMEM;
2664        int alg;
2665        struct device *dev = drvdata_to_dev(drvdata);
2666
2667        aead_handle = kmalloc(sizeof(*aead_handle), GFP_KERNEL);
2668        if (!aead_handle) {
2669                rc = -ENOMEM;
2670                goto fail0;
2671        }
2672
2673        INIT_LIST_HEAD(&aead_handle->aead_list);
2674        drvdata->aead_handle = aead_handle;
2675
2676        aead_handle->sram_workspace_addr = cc_sram_alloc(drvdata,
2677                                                         MAX_HMAC_DIGEST_SIZE);
2678
2679        if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
2680                dev_err(dev, "SRAM pool exhausted\n");
2681                rc = -ENOMEM;
2682                goto fail1;
2683        }
2684
2685        /* Linux crypto */
2686        for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
2687                if (aead_algs[alg].min_hw_rev > drvdata->hw_rev)
2688                        continue;
2689
2690                t_alg = cc_create_aead_alg(&aead_algs[alg], dev);
2691                if (IS_ERR(t_alg)) {
2692                        rc = PTR_ERR(t_alg);
2693                        dev_err(dev, "%s alg allocation failed\n",
2694                                aead_algs[alg].driver_name);
2695                        goto fail1;
2696                }
2697                t_alg->drvdata = drvdata;
2698                rc = crypto_register_aead(&t_alg->aead_alg);
2699                if (rc) {
2700                        dev_err(dev, "%s alg registration failed\n",
2701                                t_alg->aead_alg.base.cra_driver_name);
2702                        goto fail2;
2703                } else {
2704                        list_add_tail(&t_alg->entry, &aead_handle->aead_list);
2705                        dev_dbg(dev, "Registered %s\n",
2706                                t_alg->aead_alg.base.cra_driver_name);
2707                }
2708        }
2709
2710        return 0;
2711
2712fail2:
2713        kfree(t_alg);
2714fail1:
2715        cc_aead_free(drvdata);
2716fail0:
2717        return rc;
2718}
2719