linux/drivers/crypto/chelsio/chcr_algo.c
<<
>>
Prefs
   1/*
   2 * This file is part of the Chelsio T6 Crypto driver for Linux.
   3 *
   4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 *
  34 * Written and Maintained by:
  35 *      Manoj Malviya (manojmalviya@chelsio.com)
  36 *      Atul Gupta (atul.gupta@chelsio.com)
  37 *      Jitendra Lulla (jlulla@chelsio.com)
  38 *      Yeshaswi M R Gowda (yeshaswi@chelsio.com)
  39 *      Harsh Jain (harsh@chelsio.com)
  40 */
  41
  42#define pr_fmt(fmt) "chcr:" fmt
  43
  44#include <linux/kernel.h>
  45#include <linux/module.h>
  46#include <linux/crypto.h>
  47#include <linux/cryptohash.h>
  48#include <linux/skbuff.h>
  49#include <linux/rtnetlink.h>
  50#include <linux/highmem.h>
  51#include <linux/scatterlist.h>
  52
  53#include <crypto/aes.h>
  54#include <crypto/algapi.h>
  55#include <crypto/hash.h>
  56#include <crypto/gcm.h>
  57#include <crypto/sha.h>
  58#include <crypto/authenc.h>
  59#include <crypto/ctr.h>
  60#include <crypto/gf128mul.h>
  61#include <crypto/internal/aead.h>
  62#include <crypto/null.h>
  63#include <crypto/internal/skcipher.h>
  64#include <crypto/aead.h>
  65#include <crypto/scatterwalk.h>
  66#include <crypto/internal/hash.h>
  67
  68#include "t4fw_api.h"
  69#include "t4_msg.h"
  70#include "chcr_core.h"
  71#include "chcr_algo.h"
  72#include "chcr_crypto.h"
  73
  74#define IV AES_BLOCK_SIZE
  75
  76static unsigned int sgl_ent_len[] = {
  77        0, 0, 16, 24, 40, 48, 64, 72, 88,
  78        96, 112, 120, 136, 144, 160, 168, 184,
  79        192, 208, 216, 232, 240, 256, 264, 280,
  80        288, 304, 312, 328, 336, 352, 360, 376
  81};
  82
  83static unsigned int dsgl_ent_len[] = {
  84        0, 32, 32, 48, 48, 64, 64, 80, 80,
  85        112, 112, 128, 128, 144, 144, 160, 160,
  86        192, 192, 208, 208, 224, 224, 240, 240,
  87        272, 272, 288, 288, 304, 304, 320, 320
  88};
  89
  90static u32 round_constant[11] = {
  91        0x01000000, 0x02000000, 0x04000000, 0x08000000,
  92        0x10000000, 0x20000000, 0x40000000, 0x80000000,
  93        0x1B000000, 0x36000000, 0x6C000000
  94};
  95
  96static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
  97                                   unsigned char *input, int err);
  98
  99static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
 100{
 101        return ctx->crypto_ctx->aeadctx;
 102}
 103
 104static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
 105{
 106        return ctx->crypto_ctx->ablkctx;
 107}
 108
 109static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
 110{
 111        return ctx->crypto_ctx->hmacctx;
 112}
 113
 114static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
 115{
 116        return gctx->ctx->gcm;
 117}
 118
 119static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
 120{
 121        return gctx->ctx->authenc;
 122}
 123
 124static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
 125{
 126        return ctx->dev->u_ctx;
 127}
 128
 129static inline int is_ofld_imm(const struct sk_buff *skb)
 130{
 131        return (skb->len <= SGE_MAX_WR_LEN);
 132}
 133
 134static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
 135{
 136        memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
 137}
 138
 139static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
 140                         unsigned int entlen,
 141                         unsigned int skip)
 142{
 143        int nents = 0;
 144        unsigned int less;
 145        unsigned int skip_len = 0;
 146
 147        while (sg && skip) {
 148                if (sg_dma_len(sg) <= skip) {
 149                        skip -= sg_dma_len(sg);
 150                        skip_len = 0;
 151                        sg = sg_next(sg);
 152                } else {
 153                        skip_len = skip;
 154                        skip = 0;
 155                }
 156        }
 157
 158        while (sg && reqlen) {
 159                less = min(reqlen, sg_dma_len(sg) - skip_len);
 160                nents += DIV_ROUND_UP(less, entlen);
 161                reqlen -= less;
 162                skip_len = 0;
 163                sg = sg_next(sg);
 164        }
 165        return nents;
 166}
 167
 168static inline int get_aead_subtype(struct crypto_aead *aead)
 169{
 170        struct aead_alg *alg = crypto_aead_alg(aead);
 171        struct chcr_alg_template *chcr_crypto_alg =
 172                container_of(alg, struct chcr_alg_template, alg.aead);
 173        return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
 174}
 175
 176void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
 177{
 178        u8 temp[SHA512_DIGEST_SIZE];
 179        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 180        int authsize = crypto_aead_authsize(tfm);
 181        struct cpl_fw6_pld *fw6_pld;
 182        int cmp = 0;
 183
 184        fw6_pld = (struct cpl_fw6_pld *)input;
 185        if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
 186            (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
 187                cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
 188        } else {
 189
 190                sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
 191                                authsize, req->assoclen +
 192                                req->cryptlen - authsize);
 193                cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
 194        }
 195        if (cmp)
 196                *err = -EBADMSG;
 197        else
 198                *err = 0;
 199}
 200
 201static inline void chcr_handle_aead_resp(struct aead_request *req,
 202                                         unsigned char *input,
 203                                         int err)
 204{
 205        struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
 206        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 207        struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
 208
 209        chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
 210        if (reqctx->b0_dma)
 211                dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma,
 212                                 reqctx->b0_len, DMA_BIDIRECTIONAL);
 213        if (reqctx->verify == VERIFY_SW) {
 214                chcr_verify_tag(req, input, &err);
 215                reqctx->verify = VERIFY_HW;
 216        }
 217        req->base.complete(&req->base, err);
 218}
 219
 220static void get_aes_decrypt_key(unsigned char *dec_key,
 221                                       const unsigned char *key,
 222                                       unsigned int keylength)
 223{
 224        u32 temp;
 225        u32 w_ring[MAX_NK];
 226        int i, j, k;
 227        u8  nr, nk;
 228
 229        switch (keylength) {
 230        case AES_KEYLENGTH_128BIT:
 231                nk = KEYLENGTH_4BYTES;
 232                nr = NUMBER_OF_ROUNDS_10;
 233                break;
 234        case AES_KEYLENGTH_192BIT:
 235                nk = KEYLENGTH_6BYTES;
 236                nr = NUMBER_OF_ROUNDS_12;
 237                break;
 238        case AES_KEYLENGTH_256BIT:
 239                nk = KEYLENGTH_8BYTES;
 240                nr = NUMBER_OF_ROUNDS_14;
 241                break;
 242        default:
 243                return;
 244        }
 245        for (i = 0; i < nk; i++)
 246                w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
 247
 248        i = 0;
 249        temp = w_ring[nk - 1];
 250        while (i + nk < (nr + 1) * 4) {
 251                if (!(i % nk)) {
 252                        /* RotWord(temp) */
 253                        temp = (temp << 8) | (temp >> 24);
 254                        temp = aes_ks_subword(temp);
 255                        temp ^= round_constant[i / nk];
 256                } else if (nk == 8 && (i % 4 == 0)) {
 257                        temp = aes_ks_subword(temp);
 258                }
 259                w_ring[i % nk] ^= temp;
 260                temp = w_ring[i % nk];
 261                i++;
 262        }
 263        i--;
 264        for (k = 0, j = i % nk; k < nk; k++) {
 265                *((u32 *)dec_key + k) = htonl(w_ring[j]);
 266                j--;
 267                if (j < 0)
 268                        j += nk;
 269        }
 270}
 271
 272static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
 273{
 274        struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
 275
 276        switch (ds) {
 277        case SHA1_DIGEST_SIZE:
 278                base_hash = crypto_alloc_shash("sha1", 0, 0);
 279                break;
 280        case SHA224_DIGEST_SIZE:
 281                base_hash = crypto_alloc_shash("sha224", 0, 0);
 282                break;
 283        case SHA256_DIGEST_SIZE:
 284                base_hash = crypto_alloc_shash("sha256", 0, 0);
 285                break;
 286        case SHA384_DIGEST_SIZE:
 287                base_hash = crypto_alloc_shash("sha384", 0, 0);
 288                break;
 289        case SHA512_DIGEST_SIZE:
 290                base_hash = crypto_alloc_shash("sha512", 0, 0);
 291                break;
 292        }
 293
 294        return base_hash;
 295}
 296
 297static int chcr_compute_partial_hash(struct shash_desc *desc,
 298                                     char *iopad, char *result_hash,
 299                                     int digest_size)
 300{
 301        struct sha1_state sha1_st;
 302        struct sha256_state sha256_st;
 303        struct sha512_state sha512_st;
 304        int error;
 305
 306        if (digest_size == SHA1_DIGEST_SIZE) {
 307                error = crypto_shash_init(desc) ?:
 308                        crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
 309                        crypto_shash_export(desc, (void *)&sha1_st);
 310                memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
 311        } else if (digest_size == SHA224_DIGEST_SIZE) {
 312                error = crypto_shash_init(desc) ?:
 313                        crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
 314                        crypto_shash_export(desc, (void *)&sha256_st);
 315                memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
 316
 317        } else if (digest_size == SHA256_DIGEST_SIZE) {
 318                error = crypto_shash_init(desc) ?:
 319                        crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
 320                        crypto_shash_export(desc, (void *)&sha256_st);
 321                memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
 322
 323        } else if (digest_size == SHA384_DIGEST_SIZE) {
 324                error = crypto_shash_init(desc) ?:
 325                        crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
 326                        crypto_shash_export(desc, (void *)&sha512_st);
 327                memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
 328
 329        } else if (digest_size == SHA512_DIGEST_SIZE) {
 330                error = crypto_shash_init(desc) ?:
 331                        crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
 332                        crypto_shash_export(desc, (void *)&sha512_st);
 333                memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
 334        } else {
 335                error = -EINVAL;
 336                pr_err("Unknown digest size %d\n", digest_size);
 337        }
 338        return error;
 339}
 340
 341static void chcr_change_order(char *buf, int ds)
 342{
 343        int i;
 344
 345        if (ds == SHA512_DIGEST_SIZE) {
 346                for (i = 0; i < (ds / sizeof(u64)); i++)
 347                        *((__be64 *)buf + i) =
 348                                cpu_to_be64(*((u64 *)buf + i));
 349        } else {
 350                for (i = 0; i < (ds / sizeof(u32)); i++)
 351                        *((__be32 *)buf + i) =
 352                                cpu_to_be32(*((u32 *)buf + i));
 353        }
 354}
 355
 356static inline int is_hmac(struct crypto_tfm *tfm)
 357{
 358        struct crypto_alg *alg = tfm->__crt_alg;
 359        struct chcr_alg_template *chcr_crypto_alg =
 360                container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
 361                             alg.hash);
 362        if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
 363                return 1;
 364        return 0;
 365}
 366
 367static inline void dsgl_walk_init(struct dsgl_walk *walk,
 368                                   struct cpl_rx_phys_dsgl *dsgl)
 369{
 370        walk->dsgl = dsgl;
 371        walk->nents = 0;
 372        walk->to = (struct phys_sge_pairs *)(dsgl + 1);
 373}
 374
 375static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
 376{
 377        struct cpl_rx_phys_dsgl *phys_cpl;
 378
 379        phys_cpl = walk->dsgl;
 380
 381        phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
 382                                    | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
 383        phys_cpl->pcirlxorder_to_noofsgentr =
 384                htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
 385                      CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
 386                      CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
 387                      CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
 388                      CPL_RX_PHYS_DSGL_DCAID_V(0) |
 389                      CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
 390        phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
 391        phys_cpl->rss_hdr_int.qid = htons(qid);
 392        phys_cpl->rss_hdr_int.hash_val = 0;
 393}
 394
 395static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
 396                                        size_t size,
 397                                        dma_addr_t *addr)
 398{
 399        int j;
 400
 401        if (!size)
 402                return;
 403        j = walk->nents;
 404        walk->to->len[j % 8] = htons(size);
 405        walk->to->addr[j % 8] = cpu_to_be64(*addr);
 406        j++;
 407        if ((j % 8) == 0)
 408                walk->to++;
 409        walk->nents = j;
 410}
 411
 412static void  dsgl_walk_add_sg(struct dsgl_walk *walk,
 413                           struct scatterlist *sg,
 414                              unsigned int slen,
 415                              unsigned int skip)
 416{
 417        int skip_len = 0;
 418        unsigned int left_size = slen, len = 0;
 419        unsigned int j = walk->nents;
 420        int offset, ent_len;
 421
 422        if (!slen)
 423                return;
 424        while (sg && skip) {
 425                if (sg_dma_len(sg) <= skip) {
 426                        skip -= sg_dma_len(sg);
 427                        skip_len = 0;
 428                        sg = sg_next(sg);
 429                } else {
 430                        skip_len = skip;
 431                        skip = 0;
 432                }
 433        }
 434
 435        while (left_size && sg) {
 436                len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
 437                offset = 0;
 438                while (len) {
 439                        ent_len =  min_t(u32, len, CHCR_DST_SG_SIZE);
 440                        walk->to->len[j % 8] = htons(ent_len);
 441                        walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
 442                                                      offset + skip_len);
 443                        offset += ent_len;
 444                        len -= ent_len;
 445                        j++;
 446                        if ((j % 8) == 0)
 447                                walk->to++;
 448                }
 449                walk->last_sg = sg;
 450                walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
 451                                          skip_len) + skip_len;
 452                left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
 453                skip_len = 0;
 454                sg = sg_next(sg);
 455        }
 456        walk->nents = j;
 457}
 458
 459static inline void ulptx_walk_init(struct ulptx_walk *walk,
 460                                   struct ulptx_sgl *ulp)
 461{
 462        walk->sgl = ulp;
 463        walk->nents = 0;
 464        walk->pair_idx = 0;
 465        walk->pair = ulp->sge;
 466        walk->last_sg = NULL;
 467        walk->last_sg_len = 0;
 468}
 469
 470static inline void ulptx_walk_end(struct ulptx_walk *walk)
 471{
 472        walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
 473                              ULPTX_NSGE_V(walk->nents));
 474}
 475
 476
 477static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
 478                                        size_t size,
 479                                        dma_addr_t *addr)
 480{
 481        if (!size)
 482                return;
 483
 484        if (walk->nents == 0) {
 485                walk->sgl->len0 = cpu_to_be32(size);
 486                walk->sgl->addr0 = cpu_to_be64(*addr);
 487        } else {
 488                walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr);
 489                walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
 490                walk->pair_idx = !walk->pair_idx;
 491                if (!walk->pair_idx)
 492                        walk->pair++;
 493        }
 494        walk->nents++;
 495}
 496
 497static void  ulptx_walk_add_sg(struct ulptx_walk *walk,
 498                                        struct scatterlist *sg,
 499                               unsigned int len,
 500                               unsigned int skip)
 501{
 502        int small;
 503        int skip_len = 0;
 504        unsigned int sgmin;
 505
 506        if (!len)
 507                return;
 508        while (sg && skip) {
 509                if (sg_dma_len(sg) <= skip) {
 510                        skip -= sg_dma_len(sg);
 511                        skip_len = 0;
 512                        sg = sg_next(sg);
 513                } else {
 514                        skip_len = skip;
 515                        skip = 0;
 516                }
 517        }
 518        WARN(!sg, "SG should not be null here\n");
 519        if (sg && (walk->nents == 0)) {
 520                small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
 521                sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
 522                walk->sgl->len0 = cpu_to_be32(sgmin);
 523                walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
 524                walk->nents++;
 525                len -= sgmin;
 526                walk->last_sg = sg;
 527                walk->last_sg_len = sgmin + skip_len;
 528                skip_len += sgmin;
 529                if (sg_dma_len(sg) == skip_len) {
 530                        sg = sg_next(sg);
 531                        skip_len = 0;
 532                }
 533        }
 534
 535        while (sg && len) {
 536                small = min(sg_dma_len(sg) - skip_len, len);
 537                sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
 538                walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
 539                walk->pair->addr[walk->pair_idx] =
 540                        cpu_to_be64(sg_dma_address(sg) + skip_len);
 541                walk->pair_idx = !walk->pair_idx;
 542                walk->nents++;
 543                if (!walk->pair_idx)
 544                        walk->pair++;
 545                len -= sgmin;
 546                skip_len += sgmin;
 547                walk->last_sg = sg;
 548                walk->last_sg_len = skip_len;
 549                if (sg_dma_len(sg) == skip_len) {
 550                        sg = sg_next(sg);
 551                        skip_len = 0;
 552                }
 553        }
 554}
 555
 556static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
 557{
 558        struct crypto_alg *alg = tfm->__crt_alg;
 559        struct chcr_alg_template *chcr_crypto_alg =
 560                container_of(alg, struct chcr_alg_template, alg.crypto);
 561
 562        return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
 563}
 564
 565static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
 566{
 567        struct adapter *adap = netdev2adap(dev);
 568        struct sge_uld_txq_info *txq_info =
 569                adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
 570        struct sge_uld_txq *txq;
 571        int ret = 0;
 572
 573        local_bh_disable();
 574        txq = &txq_info->uldtxq[idx];
 575        spin_lock(&txq->sendq.lock);
 576        if (txq->full)
 577                ret = -1;
 578        spin_unlock(&txq->sendq.lock);
 579        local_bh_enable();
 580        return ret;
 581}
 582
 583static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
 584                               struct _key_ctx *key_ctx)
 585{
 586        if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
 587                memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
 588        } else {
 589                memcpy(key_ctx->key,
 590                       ablkctx->key + (ablkctx->enckey_len >> 1),
 591                       ablkctx->enckey_len >> 1);
 592                memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
 593                       ablkctx->rrkey, ablkctx->enckey_len >> 1);
 594        }
 595        return 0;
 596}
 597
 598static int chcr_hash_ent_in_wr(struct scatterlist *src,
 599                             unsigned int minsg,
 600                             unsigned int space,
 601                             unsigned int srcskip)
 602{
 603        int srclen = 0;
 604        int srcsg = minsg;
 605        int soffset = 0, sless;
 606
 607        if (sg_dma_len(src) == srcskip) {
 608                src = sg_next(src);
 609                srcskip = 0;
 610        }
 611        while (src && space > (sgl_ent_len[srcsg + 1])) {
 612                sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip,
 613                                                        CHCR_SRC_SG_SIZE);
 614                srclen += sless;
 615                soffset += sless;
 616                srcsg++;
 617                if (sg_dma_len(src) == (soffset + srcskip)) {
 618                        src = sg_next(src);
 619                        soffset = 0;
 620                        srcskip = 0;
 621                }
 622        }
 623        return srclen;
 624}
 625
 626static int chcr_sg_ent_in_wr(struct scatterlist *src,
 627                             struct scatterlist *dst,
 628                             unsigned int minsg,
 629                             unsigned int space,
 630                             unsigned int srcskip,
 631                             unsigned int dstskip)
 632{
 633        int srclen = 0, dstlen = 0;
 634        int srcsg = minsg, dstsg = minsg;
 635        int offset = 0, soffset = 0, less, sless = 0;
 636
 637        if (sg_dma_len(src) == srcskip) {
 638                src = sg_next(src);
 639                srcskip = 0;
 640        }
 641
 642        if (sg_dma_len(dst) == dstskip) {
 643                dst = sg_next(dst);
 644                dstskip = 0;
 645        }
 646
 647        while (src && dst &&
 648               space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
 649                sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
 650                                CHCR_SRC_SG_SIZE);
 651                srclen += sless;
 652                srcsg++;
 653                offset = 0;
 654                while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
 655                       space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
 656                        if (srclen <= dstlen)
 657                                break;
 658                        less = min_t(unsigned int, sg_dma_len(dst) - offset -
 659                                     dstskip, CHCR_DST_SG_SIZE);
 660                        dstlen += less;
 661                        offset += less;
 662                        if ((offset + dstskip) == sg_dma_len(dst)) {
 663                                dst = sg_next(dst);
 664                                offset = 0;
 665                        }
 666                        dstsg++;
 667                        dstskip = 0;
 668                }
 669                soffset += sless;
 670                if ((soffset + srcskip) == sg_dma_len(src)) {
 671                        src = sg_next(src);
 672                        srcskip = 0;
 673                        soffset = 0;
 674                }
 675
 676        }
 677        return min(srclen, dstlen);
 678}
 679
 680static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
 681                                u32 flags,
 682                                struct scatterlist *src,
 683                                struct scatterlist *dst,
 684                                unsigned int nbytes,
 685                                u8 *iv,
 686                                unsigned short op_type)
 687{
 688        int err;
 689
 690        SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
 691        skcipher_request_set_tfm(subreq, cipher);
 692        skcipher_request_set_callback(subreq, flags, NULL, NULL);
 693        skcipher_request_set_crypt(subreq, src, dst,
 694                                   nbytes, iv);
 695
 696        err = op_type ? crypto_skcipher_decrypt(subreq) :
 697                crypto_skcipher_encrypt(subreq);
 698        skcipher_request_zero(subreq);
 699
 700        return err;
 701
 702}
 703static inline void create_wreq(struct chcr_context *ctx,
 704                               struct chcr_wr *chcr_req,
 705                               struct crypto_async_request *req,
 706                               unsigned int imm,
 707                               int hash_sz,
 708                               unsigned int len16,
 709                               unsigned int sc_len,
 710                               unsigned int lcb)
 711{
 712        struct uld_ctx *u_ctx = ULD_CTX(ctx);
 713        int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
 714
 715
 716        chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
 717        chcr_req->wreq.pld_size_hash_size =
 718                htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
 719        chcr_req->wreq.len16_pkd =
 720                htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
 721        chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
 722        chcr_req->wreq.rx_chid_to_rx_q_id =
 723                FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
 724                                !!lcb, ctx->tx_qidx);
 725
 726        chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
 727                                                       qid);
 728        chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
 729                                     ((sizeof(chcr_req->wreq)) >> 4)));
 730
 731        chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
 732        chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
 733                                           sizeof(chcr_req->key_ctx) + sc_len);
 734}
 735
 736/**
 737 *      create_cipher_wr - form the WR for cipher operations
 738 *      @req: cipher req.
 739 *      @ctx: crypto driver context of the request.
 740 *      @qid: ingress qid where response of this WR should be received.
 741 *      @op_type:       encryption or decryption
 742 */
 743static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
 744{
 745        struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
 746        struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
 747        struct sk_buff *skb = NULL;
 748        struct chcr_wr *chcr_req;
 749        struct cpl_rx_phys_dsgl *phys_cpl;
 750        struct ulptx_sgl *ulptx;
 751        struct chcr_blkcipher_req_ctx *reqctx =
 752                ablkcipher_request_ctx(wrparam->req);
 753        unsigned int temp = 0, transhdr_len, dst_size;
 754        int error;
 755        int nents;
 756        unsigned int kctx_len;
 757        gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
 758                        GFP_KERNEL : GFP_ATOMIC;
 759        struct adapter *adap = padap(c_ctx(tfm)->dev);
 760
 761        nents = sg_nents_xlen(reqctx->dstsg,  wrparam->bytes, CHCR_DST_SG_SIZE,
 762                              reqctx->dst_ofst);
 763        dst_size = get_space_for_phys_dsgl(nents + 1);
 764        kctx_len = roundup(ablkctx->enckey_len, 16);
 765        transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
 766        nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
 767                                  CHCR_SRC_SG_SIZE, reqctx->src_ofst);
 768        temp = reqctx->imm ? roundup(IV + wrparam->req->nbytes, 16) :
 769                                     (sgl_len(nents + MIN_CIPHER_SG) * 8);
 770        transhdr_len += temp;
 771        transhdr_len = roundup(transhdr_len, 16);
 772        skb = alloc_skb(SGE_MAX_WR_LEN, flags);
 773        if (!skb) {
 774                error = -ENOMEM;
 775                goto err;
 776        }
 777        chcr_req = __skb_put_zero(skb, transhdr_len);
 778        chcr_req->sec_cpl.op_ivinsrtofst =
 779                FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1);
 780
 781        chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
 782        chcr_req->sec_cpl.aadstart_cipherstop_hi =
 783                        FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
 784
 785        chcr_req->sec_cpl.cipherstop_lo_authinsert =
 786                        FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
 787        chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
 788                                                         ablkctx->ciph_mode,
 789                                                         0, 0, IV >> 1);
 790        chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
 791                                                          0, 0, dst_size);
 792
 793        chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
 794        if ((reqctx->op == CHCR_DECRYPT_OP) &&
 795            (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
 796               CRYPTO_ALG_SUB_TYPE_CTR)) &&
 797            (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
 798               CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
 799                generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
 800        } else {
 801                if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
 802                    (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
 803                        memcpy(chcr_req->key_ctx.key, ablkctx->key,
 804                               ablkctx->enckey_len);
 805                } else {
 806                        memcpy(chcr_req->key_ctx.key, ablkctx->key +
 807                               (ablkctx->enckey_len >> 1),
 808                               ablkctx->enckey_len >> 1);
 809                        memcpy(chcr_req->key_ctx.key +
 810                               (ablkctx->enckey_len >> 1),
 811                               ablkctx->key,
 812                               ablkctx->enckey_len >> 1);
 813                }
 814        }
 815        phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
 816        ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
 817        chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
 818        chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
 819
 820        atomic_inc(&adap->chcr_stats.cipher_rqst);
 821        temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len
 822                +(reqctx->imm ? (IV + wrparam->bytes) : 0);
 823        create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
 824                    transhdr_len, temp,
 825                        ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
 826        reqctx->skb = skb;
 827
 828        if (reqctx->op && (ablkctx->ciph_mode ==
 829                           CHCR_SCMD_CIPHER_MODE_AES_CBC))
 830                sg_pcopy_to_buffer(wrparam->req->src,
 831                        sg_nents(wrparam->req->src), wrparam->req->info, 16,
 832                        reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
 833
 834        return skb;
 835err:
 836        return ERR_PTR(error);
 837}
 838
 839static inline int chcr_keyctx_ck_size(unsigned int keylen)
 840{
 841        int ck_size = 0;
 842
 843        if (keylen == AES_KEYSIZE_128)
 844                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
 845        else if (keylen == AES_KEYSIZE_192)
 846                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
 847        else if (keylen == AES_KEYSIZE_256)
 848                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
 849        else
 850                ck_size = 0;
 851
 852        return ck_size;
 853}
 854static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
 855                                       const u8 *key,
 856                                       unsigned int keylen)
 857{
 858        struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
 859        struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 860        int err = 0;
 861
 862        crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
 863        crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags &
 864                                  CRYPTO_TFM_REQ_MASK);
 865        err = crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
 866        tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
 867        tfm->crt_flags |=
 868                crypto_skcipher_get_flags(ablkctx->sw_cipher) &
 869                CRYPTO_TFM_RES_MASK;
 870        return err;
 871}
 872
 873static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
 874                               const u8 *key,
 875                               unsigned int keylen)
 876{
 877        struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 878        unsigned int ck_size, context_size;
 879        u16 alignment = 0;
 880        int err;
 881
 882        err = chcr_cipher_fallback_setkey(cipher, key, keylen);
 883        if (err)
 884                goto badkey_err;
 885
 886        ck_size = chcr_keyctx_ck_size(keylen);
 887        alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
 888        memcpy(ablkctx->key, key, keylen);
 889        ablkctx->enckey_len = keylen;
 890        get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
 891        context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
 892                        keylen + alignment) >> 4;
 893
 894        ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
 895                                                0, 0, context_size);
 896        ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
 897        return 0;
 898badkey_err:
 899        crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
 900        ablkctx->enckey_len = 0;
 901
 902        return err;
 903}
 904
 905static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
 906                                   const u8 *key,
 907                                   unsigned int keylen)
 908{
 909        struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 910        unsigned int ck_size, context_size;
 911        u16 alignment = 0;
 912        int err;
 913
 914        err = chcr_cipher_fallback_setkey(cipher, key, keylen);
 915        if (err)
 916                goto badkey_err;
 917        ck_size = chcr_keyctx_ck_size(keylen);
 918        alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
 919        memcpy(ablkctx->key, key, keylen);
 920        ablkctx->enckey_len = keylen;
 921        context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
 922                        keylen + alignment) >> 4;
 923
 924        ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
 925                                                0, 0, context_size);
 926        ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
 927
 928        return 0;
 929badkey_err:
 930        crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
 931        ablkctx->enckey_len = 0;
 932
 933        return err;
 934}
 935
 936static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
 937                                   const u8 *key,
 938                                   unsigned int keylen)
 939{
 940        struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 941        unsigned int ck_size, context_size;
 942        u16 alignment = 0;
 943        int err;
 944
 945        if (keylen < CTR_RFC3686_NONCE_SIZE)
 946                return -EINVAL;
 947        memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
 948               CTR_RFC3686_NONCE_SIZE);
 949
 950        keylen -= CTR_RFC3686_NONCE_SIZE;
 951        err = chcr_cipher_fallback_setkey(cipher, key, keylen);
 952        if (err)
 953                goto badkey_err;
 954
 955        ck_size = chcr_keyctx_ck_size(keylen);
 956        alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
 957        memcpy(ablkctx->key, key, keylen);
 958        ablkctx->enckey_len = keylen;
 959        context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
 960                        keylen + alignment) >> 4;
 961
 962        ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
 963                                                0, 0, context_size);
 964        ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
 965
 966        return 0;
 967badkey_err:
 968        crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
 969        ablkctx->enckey_len = 0;
 970
 971        return err;
 972}
 973static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
 974{
 975        unsigned int size = AES_BLOCK_SIZE;
 976        __be32 *b = (__be32 *)(dstiv + size);
 977        u32 c, prev;
 978
 979        memcpy(dstiv, srciv, AES_BLOCK_SIZE);
 980        for (; size >= 4; size -= 4) {
 981                prev = be32_to_cpu(*--b);
 982                c = prev + add;
 983                *b = cpu_to_be32(c);
 984                if (prev < c)
 985                        break;
 986                add = 1;
 987        }
 988
 989}
 990
 991static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
 992{
 993        __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
 994        u64 c;
 995        u32 temp = be32_to_cpu(*--b);
 996
 997        temp = ~temp;
 998        c = (u64)temp +  1; // No of block can processed withou overflow
 999        if ((bytes / AES_BLOCK_SIZE) > c)
1000                bytes = c * AES_BLOCK_SIZE;
1001        return bytes;
1002}
1003
1004static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
1005                             u32 isfinal)
1006{
1007        struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1008        struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1009        struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1010        struct crypto_cipher *cipher;
1011        int ret, i;
1012        u8 *key;
1013        unsigned int keylen;
1014        int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1015        int round8 = round / 8;
1016
1017        cipher = ablkctx->aes_generic;
1018        memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1019
1020        keylen = ablkctx->enckey_len / 2;
1021        key = ablkctx->key + keylen;
1022        ret = crypto_cipher_setkey(cipher, key, keylen);
1023        if (ret)
1024                goto out;
1025        /*H/W sends the encrypted IV in dsgl when AADIVDROP bit is 0*/
1026        for (i = 0; i < round8; i++)
1027                gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1028
1029        for (i = 0; i < (round % 8); i++)
1030                gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1031
1032        if (!isfinal)
1033                crypto_cipher_decrypt_one(cipher, iv, iv);
1034out:
1035        return ret;
1036}
1037
1038static int chcr_update_cipher_iv(struct ablkcipher_request *req,
1039                                   struct cpl_fw6_pld *fw6_pld, u8 *iv)
1040{
1041        struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1042        struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1043        int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
1044        int ret = 0;
1045
1046        if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1047                ctr_add_iv(iv, req->info, (reqctx->processed /
1048                           AES_BLOCK_SIZE));
1049        else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1050                *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1051                        CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1052                                                AES_BLOCK_SIZE) + 1);
1053        else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1054                ret = chcr_update_tweak(req, iv, 0);
1055        else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1056                if (reqctx->op)
1057                        /*Updated before sending last WR*/
1058                        memcpy(iv, req->info, AES_BLOCK_SIZE);
1059                else
1060                        memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1061        }
1062
1063        return ret;
1064
1065}
1066
1067/* We need separate function for final iv because in rfc3686  Initial counter
1068 * starts from 1 and buffer size of iv is 8 byte only which remains constant
1069 * for subsequent update requests
1070 */
1071
1072static int chcr_final_cipher_iv(struct ablkcipher_request *req,
1073                                   struct cpl_fw6_pld *fw6_pld, u8 *iv)
1074{
1075        struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1076        struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1077        int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
1078        int ret = 0;
1079
1080        if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1081                ctr_add_iv(iv, req->info, (reqctx->processed /
1082                           AES_BLOCK_SIZE));
1083        else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1084                ret = chcr_update_tweak(req, iv, 1);
1085        else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1086                /*Already updated for Decrypt*/
1087                if (!reqctx->op)
1088                        memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1089
1090        }
1091        return ret;
1092
1093}
1094
1095static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
1096                                   unsigned char *input, int err)
1097{
1098        struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1099        struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1100        struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1101        struct sk_buff *skb;
1102        struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1103        struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1104        struct  cipher_wr_param wrparam;
1105        int bytes;
1106
1107        if (err)
1108                goto unmap;
1109        if (req->nbytes == reqctx->processed) {
1110                chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1111                                      req);
1112                err = chcr_final_cipher_iv(req, fw6_pld, req->info);
1113                goto complete;
1114        }
1115
1116        if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1117                                            c_ctx(tfm)->tx_qidx))) {
1118                if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1119                        err = -EBUSY;
1120                        goto unmap;
1121                }
1122
1123        }
1124        if (!reqctx->imm) {
1125                bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1,
1126                                          CIP_SPACE_LEFT(ablkctx->enckey_len),
1127                                          reqctx->src_ofst, reqctx->dst_ofst);
1128                if ((bytes + reqctx->processed) >= req->nbytes)
1129                        bytes  = req->nbytes - reqctx->processed;
1130                else
1131                        bytes = rounddown(bytes, 16);
1132        } else {
1133                /*CTR mode counter overfloa*/
1134                bytes  = req->nbytes - reqctx->processed;
1135        }
1136        dma_sync_single_for_cpu(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1137                                reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
1138        err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1139        dma_sync_single_for_device(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1140                                   reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
1141        if (err)
1142                goto unmap;
1143
1144        if (unlikely(bytes == 0)) {
1145                chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1146                                      req);
1147                err = chcr_cipher_fallback(ablkctx->sw_cipher,
1148                                     req->base.flags,
1149                                     req->src,
1150                                     req->dst,
1151                                     req->nbytes,
1152                                     req->info,
1153                                     reqctx->op);
1154                goto complete;
1155        }
1156
1157        if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1158            CRYPTO_ALG_SUB_TYPE_CTR)
1159                bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1160        wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
1161        wrparam.req = req;
1162        wrparam.bytes = bytes;
1163        skb = create_cipher_wr(&wrparam);
1164        if (IS_ERR(skb)) {
1165                pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
1166                err = PTR_ERR(skb);
1167                goto unmap;
1168        }
1169        skb->dev = u_ctx->lldi.ports[0];
1170        set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1171        chcr_send_wr(skb);
1172        reqctx->last_req_len = bytes;
1173        reqctx->processed += bytes;
1174        return 0;
1175unmap:
1176        chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1177complete:
1178        req->base.complete(&req->base, err);
1179        return err;
1180}
1181
1182static int process_cipher(struct ablkcipher_request *req,
1183                                  unsigned short qid,
1184                                  struct sk_buff **skb,
1185                                  unsigned short op_type)
1186{
1187        struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1188        unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
1189        struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1190        struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1191        struct  cipher_wr_param wrparam;
1192        int bytes, err = -EINVAL;
1193
1194        reqctx->processed = 0;
1195        if (!req->info)
1196                goto error;
1197        if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1198            (req->nbytes == 0) ||
1199            (req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
1200                pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1201                       ablkctx->enckey_len, req->nbytes, ivsize);
1202                goto error;
1203        }
1204        chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1205        if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1206                                            AES_MIN_KEY_SIZE +
1207                                            sizeof(struct cpl_rx_phys_dsgl) +
1208                                        /*Min dsgl size*/
1209                                            32))) {
1210                /* Can be sent as Imm*/
1211                unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1212
1213                dnents = sg_nents_xlen(req->dst, req->nbytes,
1214                                       CHCR_DST_SG_SIZE, 0);
1215                dnents += 1; // IV
1216                phys_dsgl = get_space_for_phys_dsgl(dnents);
1217                kctx_len = roundup(ablkctx->enckey_len, 16);
1218                transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1219                reqctx->imm = (transhdr_len + IV + req->nbytes) <=
1220                        SGE_MAX_WR_LEN;
1221                bytes = IV + req->nbytes;
1222
1223        } else {
1224                reqctx->imm = 0;
1225        }
1226
1227        if (!reqctx->imm) {
1228                bytes = chcr_sg_ent_in_wr(req->src, req->dst,
1229                                          MIN_CIPHER_SG,
1230                                          CIP_SPACE_LEFT(ablkctx->enckey_len),
1231                                          0, 0);
1232                if ((bytes + reqctx->processed) >= req->nbytes)
1233                        bytes  = req->nbytes - reqctx->processed;
1234                else
1235                        bytes = rounddown(bytes, 16);
1236        } else {
1237                bytes = req->nbytes;
1238        }
1239        if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1240            CRYPTO_ALG_SUB_TYPE_CTR) {
1241                bytes = adjust_ctr_overflow(req->info, bytes);
1242        }
1243        if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1244            CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1245                memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1246                memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
1247                                CTR_RFC3686_IV_SIZE);
1248
1249                /* initialize counter portion of counter block */
1250                *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1251                        CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1252
1253        } else {
1254
1255                memcpy(reqctx->iv, req->info, IV);
1256        }
1257        if (unlikely(bytes == 0)) {
1258                chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1259                                      req);
1260                err = chcr_cipher_fallback(ablkctx->sw_cipher,
1261                                           req->base.flags,
1262                                           req->src,
1263                                           req->dst,
1264                                           req->nbytes,
1265                                           reqctx->iv,
1266                                           op_type);
1267                goto error;
1268        }
1269        reqctx->op = op_type;
1270        reqctx->srcsg = req->src;
1271        reqctx->dstsg = req->dst;
1272        reqctx->src_ofst = 0;
1273        reqctx->dst_ofst = 0;
1274        wrparam.qid = qid;
1275        wrparam.req = req;
1276        wrparam.bytes = bytes;
1277        *skb = create_cipher_wr(&wrparam);
1278        if (IS_ERR(*skb)) {
1279                err = PTR_ERR(*skb);
1280                goto unmap;
1281        }
1282        reqctx->processed = bytes;
1283        reqctx->last_req_len = bytes;
1284
1285        return 0;
1286unmap:
1287        chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1288error:
1289        return err;
1290}
1291
1292static int chcr_aes_encrypt(struct ablkcipher_request *req)
1293{
1294        struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1295        struct sk_buff *skb = NULL;
1296        int err;
1297        struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1298
1299        if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1300                                            c_ctx(tfm)->tx_qidx))) {
1301                if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1302                        return -EBUSY;
1303        }
1304
1305        err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1306                             &skb, CHCR_ENCRYPT_OP);
1307        if (err || !skb)
1308                return  err;
1309        skb->dev = u_ctx->lldi.ports[0];
1310        set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1311        chcr_send_wr(skb);
1312        return -EINPROGRESS;
1313}
1314
1315static int chcr_aes_decrypt(struct ablkcipher_request *req)
1316{
1317        struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1318        struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1319        struct sk_buff *skb = NULL;
1320        int err;
1321
1322        if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1323                                            c_ctx(tfm)->tx_qidx))) {
1324                if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1325                        return -EBUSY;
1326        }
1327
1328         err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1329                              &skb, CHCR_DECRYPT_OP);
1330        if (err || !skb)
1331                return err;
1332        skb->dev = u_ctx->lldi.ports[0];
1333        set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1334        chcr_send_wr(skb);
1335        return -EINPROGRESS;
1336}
1337
1338static int chcr_device_init(struct chcr_context *ctx)
1339{
1340        struct uld_ctx *u_ctx = NULL;
1341        struct adapter *adap;
1342        unsigned int id;
1343        int txq_perchan, txq_idx, ntxq;
1344        int err = 0, rxq_perchan, rxq_idx;
1345
1346        id = smp_processor_id();
1347        if (!ctx->dev) {
1348                u_ctx = assign_chcr_device();
1349                if (!u_ctx) {
1350                        pr_err("chcr device assignment fails\n");
1351                        goto out;
1352                }
1353                ctx->dev = u_ctx->dev;
1354                adap = padap(ctx->dev);
1355                ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq,
1356                                    adap->vres.ncrypto_fc);
1357                rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1358                txq_perchan = ntxq / u_ctx->lldi.nchan;
1359                rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
1360                rxq_idx += id % rxq_perchan;
1361                txq_idx = ctx->dev->tx_channel_id * txq_perchan;
1362                txq_idx += id % txq_perchan;
1363                spin_lock(&ctx->dev->lock_chcr_dev);
1364                ctx->rx_qidx = rxq_idx;
1365                ctx->tx_qidx = txq_idx;
1366                ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
1367                ctx->dev->rx_channel_id = 0;
1368                spin_unlock(&ctx->dev->lock_chcr_dev);
1369        }
1370out:
1371        return err;
1372}
1373
1374static int chcr_cra_init(struct crypto_tfm *tfm)
1375{
1376        struct crypto_alg *alg = tfm->__crt_alg;
1377        struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1378        struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1379
1380        ablkctx->sw_cipher = crypto_alloc_skcipher(alg->cra_name, 0,
1381                                CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
1382        if (IS_ERR(ablkctx->sw_cipher)) {
1383                pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1384                return PTR_ERR(ablkctx->sw_cipher);
1385        }
1386
1387        if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) {
1388                /* To update tweak*/
1389                ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0);
1390                if (IS_ERR(ablkctx->aes_generic)) {
1391                        pr_err("failed to allocate aes cipher for tweak\n");
1392                        return PTR_ERR(ablkctx->aes_generic);
1393                }
1394        } else
1395                ablkctx->aes_generic = NULL;
1396
1397        tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
1398        return chcr_device_init(crypto_tfm_ctx(tfm));
1399}
1400
1401static int chcr_rfc3686_init(struct crypto_tfm *tfm)
1402{
1403        struct crypto_alg *alg = tfm->__crt_alg;
1404        struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1405        struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1406
1407        /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1408         * cannot be used as fallback in chcr_handle_cipher_response
1409         */
1410        ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
1411                                CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
1412        if (IS_ERR(ablkctx->sw_cipher)) {
1413                pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1414                return PTR_ERR(ablkctx->sw_cipher);
1415        }
1416        tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
1417        return chcr_device_init(crypto_tfm_ctx(tfm));
1418}
1419
1420
1421static void chcr_cra_exit(struct crypto_tfm *tfm)
1422{
1423        struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1424        struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1425
1426        crypto_free_skcipher(ablkctx->sw_cipher);
1427        if (ablkctx->aes_generic)
1428                crypto_free_cipher(ablkctx->aes_generic);
1429}
1430
1431static int get_alg_config(struct algo_param *params,
1432                          unsigned int auth_size)
1433{
1434        switch (auth_size) {
1435        case SHA1_DIGEST_SIZE:
1436                params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1437                params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1438                params->result_size = SHA1_DIGEST_SIZE;
1439                break;
1440        case SHA224_DIGEST_SIZE:
1441                params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1442                params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1443                params->result_size = SHA256_DIGEST_SIZE;
1444                break;
1445        case SHA256_DIGEST_SIZE:
1446                params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1447                params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1448                params->result_size = SHA256_DIGEST_SIZE;
1449                break;
1450        case SHA384_DIGEST_SIZE:
1451                params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1452                params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1453                params->result_size = SHA512_DIGEST_SIZE;
1454                break;
1455        case SHA512_DIGEST_SIZE:
1456                params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1457                params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1458                params->result_size = SHA512_DIGEST_SIZE;
1459                break;
1460        default:
1461                pr_err("chcr : ERROR, unsupported digest size\n");
1462                return -EINVAL;
1463        }
1464        return 0;
1465}
1466
1467static inline void chcr_free_shash(struct crypto_shash *base_hash)
1468{
1469                crypto_free_shash(base_hash);
1470}
1471
1472/**
1473 *      create_hash_wr - Create hash work request
1474 *      @req - Cipher req base
1475 */
1476static struct sk_buff *create_hash_wr(struct ahash_request *req,
1477                                      struct hash_wr_param *param)
1478{
1479        struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1480        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1481        struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
1482        struct sk_buff *skb = NULL;
1483        struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
1484        struct chcr_wr *chcr_req;
1485        struct ulptx_sgl *ulptx;
1486        unsigned int nents = 0, transhdr_len;
1487        unsigned int temp = 0;
1488        gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1489                GFP_ATOMIC;
1490        struct adapter *adap = padap(h_ctx(tfm)->dev);
1491        int error = 0;
1492
1493        transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1494        req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1495                                param->sg_len) <= SGE_MAX_WR_LEN;
1496        nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1497                      CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
1498        nents += param->bfr_len ? 1 : 0;
1499        transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1500                                param->sg_len, 16) : (sgl_len(nents) * 8);
1501        transhdr_len = roundup(transhdr_len, 16);
1502
1503        skb = alloc_skb(transhdr_len, flags);
1504        if (!skb)
1505                return ERR_PTR(-ENOMEM);
1506        chcr_req = __skb_put_zero(skb, transhdr_len);
1507
1508        chcr_req->sec_cpl.op_ivinsrtofst =
1509                FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0);
1510        chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1511
1512        chcr_req->sec_cpl.aadstart_cipherstop_hi =
1513                FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1514        chcr_req->sec_cpl.cipherstop_lo_authinsert =
1515                FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1516        chcr_req->sec_cpl.seqno_numivs =
1517                FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1518                                         param->opad_needed, 0);
1519
1520        chcr_req->sec_cpl.ivgen_hdrlen =
1521                FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1522
1523        memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1524               param->alg_prm.result_size);
1525
1526        if (param->opad_needed)
1527                memcpy(chcr_req->key_ctx.key +
1528                       ((param->alg_prm.result_size <= 32) ? 32 :
1529                        CHCR_HASH_MAX_DIGEST_SIZE),
1530                       hmacctx->opad, param->alg_prm.result_size);
1531
1532        chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1533                                            param->alg_prm.mk_size, 0,
1534                                            param->opad_needed,
1535                                            ((param->kctx_len +
1536                                             sizeof(chcr_req->key_ctx)) >> 4));
1537        chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1538        ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
1539                                     DUMMY_BYTES);
1540        if (param->bfr_len != 0) {
1541                req_ctx->hctx_wr.dma_addr =
1542                        dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1543                                       param->bfr_len, DMA_TO_DEVICE);
1544                if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1545                                       req_ctx->hctx_wr. dma_addr)) {
1546                        error = -ENOMEM;
1547                        goto err;
1548                }
1549                req_ctx->hctx_wr.dma_len = param->bfr_len;
1550        } else {
1551                req_ctx->hctx_wr.dma_addr = 0;
1552        }
1553        chcr_add_hash_src_ent(req, ulptx, param);
1554        /* Request upto max wr size */
1555        temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1556                                (param->sg_len + param->bfr_len) : 0);
1557        atomic_inc(&adap->chcr_stats.digest_rqst);
1558        create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1559                    param->hash_size, transhdr_len,
1560                    temp,  0);
1561        req_ctx->hctx_wr.skb = skb;
1562        return skb;
1563err:
1564        kfree_skb(skb);
1565        return  ERR_PTR(error);
1566}
1567
1568static int chcr_ahash_update(struct ahash_request *req)
1569{
1570        struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1571        struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1572        struct uld_ctx *u_ctx = NULL;
1573        struct sk_buff *skb;
1574        u8 remainder = 0, bs;
1575        unsigned int nbytes = req->nbytes;
1576        struct hash_wr_param params;
1577        int error;
1578
1579        bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1580        u_ctx = ULD_CTX(h_ctx(rtfm));
1581        if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1582                                            h_ctx(rtfm)->tx_qidx))) {
1583                if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1584                        return -EBUSY;
1585        }
1586
1587        if (nbytes + req_ctx->reqlen >= bs) {
1588                remainder = (nbytes + req_ctx->reqlen) % bs;
1589                nbytes = nbytes + req_ctx->reqlen - remainder;
1590        } else {
1591                sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1592                                   + req_ctx->reqlen, nbytes, 0);
1593                req_ctx->reqlen += nbytes;
1594                return 0;
1595        }
1596        chcr_init_hctx_per_wr(req_ctx);
1597        error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1598        if (error)
1599                return -ENOMEM;
1600        get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1601        params.kctx_len = roundup(params.alg_prm.result_size, 16);
1602        params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1603                                     HASH_SPACE_LEFT(params.kctx_len), 0);
1604        if (params.sg_len > req->nbytes)
1605                params.sg_len = req->nbytes;
1606        params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1607                        req_ctx->reqlen;
1608        params.opad_needed = 0;
1609        params.more = 1;
1610        params.last = 0;
1611        params.bfr_len = req_ctx->reqlen;
1612        params.scmd1 = 0;
1613        req_ctx->hctx_wr.srcsg = req->src;
1614
1615        params.hash_size = params.alg_prm.result_size;
1616        req_ctx->data_len += params.sg_len + params.bfr_len;
1617        skb = create_hash_wr(req, &params);
1618        if (IS_ERR(skb)) {
1619                error = PTR_ERR(skb);
1620                goto unmap;
1621        }
1622
1623        req_ctx->hctx_wr.processed += params.sg_len;
1624        if (remainder) {
1625                /* Swap buffers */
1626                swap(req_ctx->reqbfr, req_ctx->skbfr);
1627                sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1628                                   req_ctx->reqbfr, remainder, req->nbytes -
1629                                   remainder);
1630        }
1631        req_ctx->reqlen = remainder;
1632        skb->dev = u_ctx->lldi.ports[0];
1633        set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1634        chcr_send_wr(skb);
1635
1636        return -EINPROGRESS;
1637unmap:
1638        chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1639        return error;
1640}
1641
1642static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1643{
1644        memset(bfr_ptr, 0, bs);
1645        *bfr_ptr = 0x80;
1646        if (bs == 64)
1647                *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1  << 3);
1648        else
1649                *(__be64 *)(bfr_ptr + 120) =  cpu_to_be64(scmd1  << 3);
1650}
1651
1652static int chcr_ahash_final(struct ahash_request *req)
1653{
1654        struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1655        struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1656        struct hash_wr_param params;
1657        struct sk_buff *skb;
1658        struct uld_ctx *u_ctx = NULL;
1659        u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1660
1661        chcr_init_hctx_per_wr(req_ctx);
1662        u_ctx = ULD_CTX(h_ctx(rtfm));
1663        if (is_hmac(crypto_ahash_tfm(rtfm)))
1664                params.opad_needed = 1;
1665        else
1666                params.opad_needed = 0;
1667        params.sg_len = 0;
1668        req_ctx->hctx_wr.isfinal = 1;
1669        get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1670        params.kctx_len = roundup(params.alg_prm.result_size, 16);
1671        if (is_hmac(crypto_ahash_tfm(rtfm))) {
1672                params.opad_needed = 1;
1673                params.kctx_len *= 2;
1674        } else {
1675                params.opad_needed = 0;
1676        }
1677
1678        req_ctx->hctx_wr.result = 1;
1679        params.bfr_len = req_ctx->reqlen;
1680        req_ctx->data_len += params.bfr_len + params.sg_len;
1681        req_ctx->hctx_wr.srcsg = req->src;
1682        if (req_ctx->reqlen == 0) {
1683                create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1684                params.last = 0;
1685                params.more = 1;
1686                params.scmd1 = 0;
1687                params.bfr_len = bs;
1688
1689        } else {
1690                params.scmd1 = req_ctx->data_len;
1691                params.last = 1;
1692                params.more = 0;
1693        }
1694        params.hash_size = crypto_ahash_digestsize(rtfm);
1695        skb = create_hash_wr(req, &params);
1696        if (IS_ERR(skb))
1697                return PTR_ERR(skb);
1698        req_ctx->reqlen = 0;
1699        skb->dev = u_ctx->lldi.ports[0];
1700        set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1701        chcr_send_wr(skb);
1702        return -EINPROGRESS;
1703}
1704
1705static int chcr_ahash_finup(struct ahash_request *req)
1706{
1707        struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1708        struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1709        struct uld_ctx *u_ctx = NULL;
1710        struct sk_buff *skb;
1711        struct hash_wr_param params;
1712        u8  bs;
1713        int error;
1714
1715        bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1716        u_ctx = ULD_CTX(h_ctx(rtfm));
1717
1718        if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1719                                            h_ctx(rtfm)->tx_qidx))) {
1720                if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1721                        return -EBUSY;
1722        }
1723        chcr_init_hctx_per_wr(req_ctx);
1724        error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1725        if (error)
1726                return -ENOMEM;
1727
1728        get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1729        params.kctx_len = roundup(params.alg_prm.result_size, 16);
1730        if (is_hmac(crypto_ahash_tfm(rtfm))) {
1731                params.kctx_len *= 2;
1732                params.opad_needed = 1;
1733        } else {
1734                params.opad_needed = 0;
1735        }
1736
1737        params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1738                                    HASH_SPACE_LEFT(params.kctx_len), 0);
1739        if (params.sg_len < req->nbytes) {
1740                if (is_hmac(crypto_ahash_tfm(rtfm))) {
1741                        params.kctx_len /= 2;
1742                        params.opad_needed = 0;
1743                }
1744                params.last = 0;
1745                params.more = 1;
1746                params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1747                                        - req_ctx->reqlen;
1748                params.hash_size = params.alg_prm.result_size;
1749                params.scmd1 = 0;
1750        } else {
1751                params.last = 1;
1752                params.more = 0;
1753                params.sg_len = req->nbytes;
1754                params.hash_size = crypto_ahash_digestsize(rtfm);
1755                params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1756                                params.sg_len;
1757        }
1758        params.bfr_len = req_ctx->reqlen;
1759        req_ctx->data_len += params.bfr_len + params.sg_len;
1760        req_ctx->hctx_wr.result = 1;
1761        req_ctx->hctx_wr.srcsg = req->src;
1762        if ((req_ctx->reqlen + req->nbytes) == 0) {
1763                create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1764                params.last = 0;
1765                params.more = 1;
1766                params.scmd1 = 0;
1767                params.bfr_len = bs;
1768        }
1769        skb = create_hash_wr(req, &params);
1770        if (IS_ERR(skb)) {
1771                error = PTR_ERR(skb);
1772                goto unmap;
1773        }
1774        req_ctx->reqlen = 0;
1775        req_ctx->hctx_wr.processed += params.sg_len;
1776        skb->dev = u_ctx->lldi.ports[0];
1777        set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1778        chcr_send_wr(skb);
1779
1780        return -EINPROGRESS;
1781unmap:
1782        chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1783        return error;
1784}
1785
1786static int chcr_ahash_digest(struct ahash_request *req)
1787{
1788        struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1789        struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1790        struct uld_ctx *u_ctx = NULL;
1791        struct sk_buff *skb;
1792        struct hash_wr_param params;
1793        u8  bs;
1794        int error;
1795
1796        rtfm->init(req);
1797        bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1798
1799        u_ctx = ULD_CTX(h_ctx(rtfm));
1800        if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1801                                            h_ctx(rtfm)->tx_qidx))) {
1802                if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1803                        return -EBUSY;
1804        }
1805
1806        chcr_init_hctx_per_wr(req_ctx);
1807        error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1808        if (error)
1809                return -ENOMEM;
1810
1811        get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1812        params.kctx_len = roundup(params.alg_prm.result_size, 16);
1813        if (is_hmac(crypto_ahash_tfm(rtfm))) {
1814                params.kctx_len *= 2;
1815                params.opad_needed = 1;
1816        } else {
1817                params.opad_needed = 0;
1818        }
1819        params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1820                                HASH_SPACE_LEFT(params.kctx_len), 0);
1821        if (params.sg_len < req->nbytes) {
1822                if (is_hmac(crypto_ahash_tfm(rtfm))) {
1823                        params.kctx_len /= 2;
1824                        params.opad_needed = 0;
1825                }
1826                params.last = 0;
1827                params.more = 1;
1828                params.scmd1 = 0;
1829                params.sg_len = rounddown(params.sg_len, bs);
1830                params.hash_size = params.alg_prm.result_size;
1831        } else {
1832                params.sg_len = req->nbytes;
1833                params.hash_size = crypto_ahash_digestsize(rtfm);
1834                params.last = 1;
1835                params.more = 0;
1836                params.scmd1 = req->nbytes + req_ctx->data_len;
1837
1838        }
1839        params.bfr_len = 0;
1840        req_ctx->hctx_wr.result = 1;
1841        req_ctx->hctx_wr.srcsg = req->src;
1842        req_ctx->data_len += params.bfr_len + params.sg_len;
1843
1844        if (req->nbytes == 0) {
1845                create_last_hash_block(req_ctx->reqbfr, bs, 0);
1846                params.more = 1;
1847                params.bfr_len = bs;
1848        }
1849
1850        skb = create_hash_wr(req, &params);
1851        if (IS_ERR(skb)) {
1852                error = PTR_ERR(skb);
1853                goto unmap;
1854        }
1855        req_ctx->hctx_wr.processed += params.sg_len;
1856        skb->dev = u_ctx->lldi.ports[0];
1857        set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1858        chcr_send_wr(skb);
1859        return -EINPROGRESS;
1860unmap:
1861        chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1862        return error;
1863}
1864
1865static int chcr_ahash_continue(struct ahash_request *req)
1866{
1867        struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
1868        struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
1869        struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1870        struct uld_ctx *u_ctx = NULL;
1871        struct sk_buff *skb;
1872        struct hash_wr_param params;
1873        u8  bs;
1874        int error;
1875
1876        bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1877        u_ctx = ULD_CTX(h_ctx(rtfm));
1878        if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1879                                            h_ctx(rtfm)->tx_qidx))) {
1880                if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1881                        return -EBUSY;
1882        }
1883        get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1884        params.kctx_len = roundup(params.alg_prm.result_size, 16);
1885        if (is_hmac(crypto_ahash_tfm(rtfm))) {
1886                params.kctx_len *= 2;
1887                params.opad_needed = 1;
1888        } else {
1889                params.opad_needed = 0;
1890        }
1891        params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
1892                                            HASH_SPACE_LEFT(params.kctx_len),
1893                                            hctx_wr->src_ofst);
1894        if ((params.sg_len + hctx_wr->processed) > req->nbytes)
1895                params.sg_len = req->nbytes - hctx_wr->processed;
1896        if (!hctx_wr->result ||
1897            ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
1898                if (is_hmac(crypto_ahash_tfm(rtfm))) {
1899                        params.kctx_len /= 2;
1900                        params.opad_needed = 0;
1901                }
1902                params.last = 0;
1903                params.more = 1;
1904                params.sg_len = rounddown(params.sg_len, bs);
1905                params.hash_size = params.alg_prm.result_size;
1906                params.scmd1 = 0;
1907        } else {
1908                params.last = 1;
1909                params.more = 0;
1910                params.hash_size = crypto_ahash_digestsize(rtfm);
1911                params.scmd1 = reqctx->data_len + params.sg_len;
1912        }
1913        params.bfr_len = 0;
1914        reqctx->data_len += params.sg_len;
1915        skb = create_hash_wr(req, &params);
1916        if (IS_ERR(skb)) {
1917                error = PTR_ERR(skb);
1918                goto err;
1919        }
1920        hctx_wr->processed += params.sg_len;
1921        skb->dev = u_ctx->lldi.ports[0];
1922        set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1923        chcr_send_wr(skb);
1924        return 0;
1925err:
1926        return error;
1927}
1928
1929static inline void chcr_handle_ahash_resp(struct ahash_request *req,
1930                                          unsigned char *input,
1931                                          int err)
1932{
1933        struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
1934        struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
1935        int digestsize, updated_digestsize;
1936        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1937        struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
1938
1939        if (input == NULL)
1940                goto out;
1941        digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
1942        updated_digestsize = digestsize;
1943        if (digestsize == SHA224_DIGEST_SIZE)
1944                updated_digestsize = SHA256_DIGEST_SIZE;
1945        else if (digestsize == SHA384_DIGEST_SIZE)
1946                updated_digestsize = SHA512_DIGEST_SIZE;
1947
1948        if (hctx_wr->dma_addr) {
1949                dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
1950                                 hctx_wr->dma_len, DMA_TO_DEVICE);
1951                hctx_wr->dma_addr = 0;
1952        }
1953        if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
1954                                 req->nbytes)) {
1955                if (hctx_wr->result == 1) {
1956                        hctx_wr->result = 0;
1957                        memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
1958                               digestsize);
1959                } else {
1960                        memcpy(reqctx->partial_hash,
1961                               input + sizeof(struct cpl_fw6_pld),
1962                               updated_digestsize);
1963
1964                }
1965                goto unmap;
1966        }
1967        memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
1968               updated_digestsize);
1969
1970        err = chcr_ahash_continue(req);
1971        if (err)
1972                goto unmap;
1973        return;
1974unmap:
1975        if (hctx_wr->is_sg_map)
1976                chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1977
1978
1979out:
1980        req->base.complete(&req->base, err);
1981}
1982
1983/*
1984 *      chcr_handle_resp - Unmap the DMA buffers associated with the request
1985 *      @req: crypto request
1986 */
1987int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
1988                         int err)
1989{
1990        struct crypto_tfm *tfm = req->tfm;
1991        struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1992        struct adapter *adap = padap(ctx->dev);
1993
1994        switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
1995        case CRYPTO_ALG_TYPE_AEAD:
1996                chcr_handle_aead_resp(aead_request_cast(req), input, err);
1997                break;
1998
1999        case CRYPTO_ALG_TYPE_ABLKCIPHER:
2000                 err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
2001                                               input, err);
2002                break;
2003
2004        case CRYPTO_ALG_TYPE_AHASH:
2005                chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
2006                }
2007        atomic_inc(&adap->chcr_stats.complete);
2008        return err;
2009}
2010static int chcr_ahash_export(struct ahash_request *areq, void *out)
2011{
2012        struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2013        struct chcr_ahash_req_ctx *state = out;
2014
2015        state->reqlen = req_ctx->reqlen;
2016        state->data_len = req_ctx->data_len;
2017        memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
2018        memcpy(state->partial_hash, req_ctx->partial_hash,
2019               CHCR_HASH_MAX_DIGEST_SIZE);
2020        chcr_init_hctx_per_wr(state);
2021                return 0;
2022}
2023
2024static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2025{
2026        struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2027        struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2028
2029        req_ctx->reqlen = state->reqlen;
2030        req_ctx->data_len = state->data_len;
2031        req_ctx->reqbfr = req_ctx->bfr1;
2032        req_ctx->skbfr = req_ctx->bfr2;
2033        memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
2034        memcpy(req_ctx->partial_hash, state->partial_hash,
2035               CHCR_HASH_MAX_DIGEST_SIZE);
2036        chcr_init_hctx_per_wr(req_ctx);
2037        return 0;
2038}
2039
2040static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2041                             unsigned int keylen)
2042{
2043        struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
2044        unsigned int digestsize = crypto_ahash_digestsize(tfm);
2045        unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2046        unsigned int i, err = 0, updated_digestsize;
2047
2048        SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
2049
2050        /* use the key to calculate the ipad and opad. ipad will sent with the
2051         * first request's data. opad will be sent with the final hash result
2052         * ipad in hmacctx->ipad and opad in hmacctx->opad location
2053         */
2054        shash->tfm = hmacctx->base_hash;
2055        shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
2056        if (keylen > bs) {
2057                err = crypto_shash_digest(shash, key, keylen,
2058                                          hmacctx->ipad);
2059                if (err)
2060                        goto out;
2061                keylen = digestsize;
2062        } else {
2063                memcpy(hmacctx->ipad, key, keylen);
2064        }
2065        memset(hmacctx->ipad + keylen, 0, bs - keylen);
2066        memcpy(hmacctx->opad, hmacctx->ipad, bs);
2067
2068        for (i = 0; i < bs / sizeof(int); i++) {
2069                *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
2070                *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
2071        }
2072
2073        updated_digestsize = digestsize;
2074        if (digestsize == SHA224_DIGEST_SIZE)
2075                updated_digestsize = SHA256_DIGEST_SIZE;
2076        else if (digestsize == SHA384_DIGEST_SIZE)
2077                updated_digestsize = SHA512_DIGEST_SIZE;
2078        err = chcr_compute_partial_hash(shash, hmacctx->ipad,
2079                                        hmacctx->ipad, digestsize);
2080        if (err)
2081                goto out;
2082        chcr_change_order(hmacctx->ipad, updated_digestsize);
2083
2084        err = chcr_compute_partial_hash(shash, hmacctx->opad,
2085                                        hmacctx->opad, digestsize);
2086        if (err)
2087                goto out;
2088        chcr_change_order(hmacctx->opad, updated_digestsize);
2089out:
2090        return err;
2091}
2092
2093static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
2094                               unsigned int key_len)
2095{
2096        struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
2097        unsigned short context_size = 0;
2098        int err;
2099
2100        err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2101        if (err)
2102                goto badkey_err;
2103
2104        memcpy(ablkctx->key, key, key_len);
2105        ablkctx->enckey_len = key_len;
2106        get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2107        context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2108        ablkctx->key_ctx_hdr =
2109                FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2110                                 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2111                                 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2112                                 CHCR_KEYCTX_NO_KEY, 1,
2113                                 0, context_size);
2114        ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2115        return 0;
2116badkey_err:
2117        crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
2118        ablkctx->enckey_len = 0;
2119
2120        return err;
2121}
2122
2123static int chcr_sha_init(struct ahash_request *areq)
2124{
2125        struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2126        struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2127        int digestsize =  crypto_ahash_digestsize(tfm);
2128
2129        req_ctx->data_len = 0;
2130        req_ctx->reqlen = 0;
2131        req_ctx->reqbfr = req_ctx->bfr1;
2132        req_ctx->skbfr = req_ctx->bfr2;
2133        copy_hash_init_values(req_ctx->partial_hash, digestsize);
2134
2135        return 0;
2136}
2137
2138static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2139{
2140        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2141                                 sizeof(struct chcr_ahash_req_ctx));
2142        return chcr_device_init(crypto_tfm_ctx(tfm));
2143}
2144
2145static int chcr_hmac_init(struct ahash_request *areq)
2146{
2147        struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2148        struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2149        struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
2150        unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2151        unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2152
2153        chcr_sha_init(areq);
2154        req_ctx->data_len = bs;
2155        if (is_hmac(crypto_ahash_tfm(rtfm))) {
2156                if (digestsize == SHA224_DIGEST_SIZE)
2157                        memcpy(req_ctx->partial_hash, hmacctx->ipad,
2158                               SHA256_DIGEST_SIZE);
2159                else if (digestsize == SHA384_DIGEST_SIZE)
2160                        memcpy(req_ctx->partial_hash, hmacctx->ipad,
2161                               SHA512_DIGEST_SIZE);
2162                else
2163                        memcpy(req_ctx->partial_hash, hmacctx->ipad,
2164                               digestsize);
2165        }
2166        return 0;
2167}
2168
2169static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2170{
2171        struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2172        struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2173        unsigned int digestsize =
2174                crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2175
2176        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2177                                 sizeof(struct chcr_ahash_req_ctx));
2178        hmacctx->base_hash = chcr_alloc_shash(digestsize);
2179        if (IS_ERR(hmacctx->base_hash))
2180                return PTR_ERR(hmacctx->base_hash);
2181        return chcr_device_init(crypto_tfm_ctx(tfm));
2182}
2183
2184static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2185{
2186        struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2187        struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2188
2189        if (hmacctx->base_hash) {
2190                chcr_free_shash(hmacctx->base_hash);
2191                hmacctx->base_hash = NULL;
2192        }
2193}
2194
2195static int chcr_aead_common_init(struct aead_request *req,
2196                                 unsigned short op_type)
2197{
2198        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2199        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2200        struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2201        int error = -EINVAL;
2202        unsigned int authsize = crypto_aead_authsize(tfm);
2203
2204        /* validate key size */
2205        if (aeadctx->enckey_len == 0)
2206                goto err;
2207        if (op_type && req->cryptlen < authsize)
2208                goto err;
2209        error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2210                                  op_type);
2211        if (error) {
2212                error = -ENOMEM;
2213                goto err;
2214        }
2215        reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen,
2216                                          CHCR_SRC_SG_SIZE, 0);
2217        reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen,
2218                                          CHCR_SRC_SG_SIZE, req->assoclen);
2219        return 0;
2220err:
2221        return error;
2222}
2223
2224static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
2225                                   int aadmax, int wrlen,
2226                                   unsigned short op_type)
2227{
2228        unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2229
2230        if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2231            dst_nents > MAX_DSGL_ENT ||
2232            (req->assoclen > aadmax) ||
2233            (wrlen > SGE_MAX_WR_LEN))
2234                return 1;
2235        return 0;
2236}
2237
2238static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2239{
2240        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2241        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2242        struct aead_request *subreq = aead_request_ctx(req);
2243
2244        aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2245        aead_request_set_callback(subreq, req->base.flags,
2246                                  req->base.complete, req->base.data);
2247         aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2248                                 req->iv);
2249         aead_request_set_ad(subreq, req->assoclen);
2250        return op_type ? crypto_aead_decrypt(subreq) :
2251                crypto_aead_encrypt(subreq);
2252}
2253
2254static struct sk_buff *create_authenc_wr(struct aead_request *req,
2255                                         unsigned short qid,
2256                                         int size,
2257                                         unsigned short op_type)
2258{
2259        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2260        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2261        struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2262        struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2263        struct sk_buff *skb = NULL;
2264        struct chcr_wr *chcr_req;
2265        struct cpl_rx_phys_dsgl *phys_cpl;
2266        struct ulptx_sgl *ulptx;
2267        unsigned int transhdr_len;
2268        unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2269        unsigned int   kctx_len = 0, dnents;
2270        unsigned int  assoclen = req->assoclen;
2271        unsigned int  authsize = crypto_aead_authsize(tfm);
2272        int error = -EINVAL;
2273        int null = 0;
2274        gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2275                GFP_ATOMIC;
2276        struct adapter *adap = padap(a_ctx(tfm)->dev);
2277
2278        if (req->cryptlen == 0)
2279                return NULL;
2280
2281        reqctx->b0_dma = 0;
2282        if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2283        subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2284                null = 1;
2285                assoclen = 0;
2286        }
2287        error = chcr_aead_common_init(req, op_type);
2288        if (error)
2289                return ERR_PTR(error);
2290        dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
2291        dnents += sg_nents_xlen(req->dst, req->cryptlen +
2292                (op_type ? -authsize : authsize), CHCR_DST_SG_SIZE,
2293                req->assoclen);
2294        dnents += MIN_AUTH_SG; // For IV
2295
2296        dst_size = get_space_for_phys_dsgl(dnents);
2297        kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
2298                - sizeof(chcr_req->key_ctx);
2299        transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2300        reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <
2301                        SGE_MAX_WR_LEN;
2302        temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16)
2303                        : (sgl_len(reqctx->src_nents + reqctx->aad_nents
2304                        + MIN_GCM_SG) * 8);
2305        transhdr_len += temp;
2306        transhdr_len = roundup(transhdr_len, 16);
2307
2308        if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2309                                    transhdr_len, op_type)) {
2310                atomic_inc(&adap->chcr_stats.fallback);
2311                chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2312                                    op_type);
2313                return ERR_PTR(chcr_aead_fallback(req, op_type));
2314        }
2315        skb = alloc_skb(SGE_MAX_WR_LEN, flags);
2316        if (!skb) {
2317                error = -ENOMEM;
2318                goto err;
2319        }
2320
2321        chcr_req = __skb_put_zero(skb, transhdr_len);
2322
2323        temp  = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
2324
2325        /*
2326         * Input order  is AAD,IV and Payload. where IV should be included as
2327         * the part of authdata. All other fields should be filled according
2328         * to the hardware spec
2329         */
2330        chcr_req->sec_cpl.op_ivinsrtofst =
2331                FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2,
2332                                       assoclen + 1);
2333        chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen);
2334        chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2335                                        assoclen ? 1 : 0, assoclen,
2336                                        assoclen + IV + 1,
2337                                        (temp & 0x1F0) >> 4);
2338        chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2339                                        temp & 0xF,
2340                                        null ? 0 : assoclen + IV + 1,
2341                                        temp, temp);
2342        if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2343            subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2344                temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2345        else
2346                temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2347        chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2348                                        (op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
2349                                        temp,
2350                                        actx->auth_mode, aeadctx->hmac_ctrl,
2351                                        IV >> 1);
2352        chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2353                                         0, 0, dst_size);
2354
2355        chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2356        if (op_type == CHCR_ENCRYPT_OP ||
2357                subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2358                subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2359                memcpy(chcr_req->key_ctx.key, aeadctx->key,
2360                       aeadctx->enckey_len);
2361        else
2362                memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2363                       aeadctx->enckey_len);
2364
2365        memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2366               actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
2367        if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2368            subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2369                memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2370                memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
2371                                CTR_RFC3686_IV_SIZE);
2372                *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
2373                        CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2374        } else {
2375                memcpy(reqctx->iv, req->iv, IV);
2376        }
2377        phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2378        ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
2379        chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
2380        chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
2381        atomic_inc(&adap->chcr_stats.cipher_rqst);
2382        temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
2383                kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
2384        create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2385                   transhdr_len, temp, 0);
2386        reqctx->skb = skb;
2387        reqctx->op = op_type;
2388
2389        return skb;
2390err:
2391        chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2392                            op_type);
2393
2394        return ERR_PTR(error);
2395}
2396
2397int chcr_aead_dma_map(struct device *dev,
2398                      struct aead_request *req,
2399                      unsigned short op_type)
2400{
2401        int error;
2402        struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2403        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2404        unsigned int authsize = crypto_aead_authsize(tfm);
2405        int dst_size;
2406
2407        dst_size = req->assoclen + req->cryptlen + (op_type ?
2408                                -authsize : authsize);
2409        if (!req->cryptlen || !dst_size)
2410                return 0;
2411        reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV,
2412                                        DMA_BIDIRECTIONAL);
2413        if (dma_mapping_error(dev, reqctx->iv_dma))
2414                return -ENOMEM;
2415
2416        if (req->src == req->dst) {
2417                error = dma_map_sg(dev, req->src, sg_nents(req->src),
2418                                   DMA_BIDIRECTIONAL);
2419                if (!error)
2420                        goto err;
2421        } else {
2422                error = dma_map_sg(dev, req->src, sg_nents(req->src),
2423                                   DMA_TO_DEVICE);
2424                if (!error)
2425                        goto err;
2426                error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2427                                   DMA_FROM_DEVICE);
2428                if (!error) {
2429                        dma_unmap_sg(dev, req->src, sg_nents(req->src),
2430                                   DMA_TO_DEVICE);
2431                        goto err;
2432                }
2433        }
2434
2435        return 0;
2436err:
2437        dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2438        return -ENOMEM;
2439}
2440
2441void chcr_aead_dma_unmap(struct device *dev,
2442                         struct aead_request *req,
2443                         unsigned short op_type)
2444{
2445        struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2446        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2447        unsigned int authsize = crypto_aead_authsize(tfm);
2448        int dst_size;
2449
2450        dst_size = req->assoclen + req->cryptlen + (op_type ?
2451                                        -authsize : authsize);
2452        if (!req->cryptlen || !dst_size)
2453                return;
2454
2455        dma_unmap_single(dev, reqctx->iv_dma, IV,
2456                                        DMA_BIDIRECTIONAL);
2457        if (req->src == req->dst) {
2458                dma_unmap_sg(dev, req->src, sg_nents(req->src),
2459                                   DMA_BIDIRECTIONAL);
2460        } else {
2461                dma_unmap_sg(dev, req->src, sg_nents(req->src),
2462                                   DMA_TO_DEVICE);
2463                dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2464                                   DMA_FROM_DEVICE);
2465        }
2466}
2467
2468void chcr_add_aead_src_ent(struct aead_request *req,
2469                           struct ulptx_sgl *ulptx,
2470                           unsigned int assoclen,
2471                           unsigned short op_type)
2472{
2473        struct ulptx_walk ulp_walk;
2474        struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2475
2476        if (reqctx->imm) {
2477                u8 *buf = (u8 *)ulptx;
2478
2479                if (reqctx->b0_dma) {
2480                        memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2481                        buf += reqctx->b0_len;
2482                }
2483                sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2484                                   buf, assoclen, 0);
2485                buf += assoclen;
2486                memcpy(buf, reqctx->iv, IV);
2487                buf += IV;
2488                sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2489                                   buf, req->cryptlen, req->assoclen);
2490        } else {
2491                ulptx_walk_init(&ulp_walk, ulptx);
2492                if (reqctx->b0_dma)
2493                        ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2494                                            &reqctx->b0_dma);
2495                ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0);
2496                ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
2497                ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen,
2498                                  req->assoclen);
2499                ulptx_walk_end(&ulp_walk);
2500        }
2501}
2502
2503void chcr_add_aead_dst_ent(struct aead_request *req,
2504                           struct cpl_rx_phys_dsgl *phys_cpl,
2505                           unsigned int assoclen,
2506                           unsigned short op_type,
2507                           unsigned short qid)
2508{
2509        struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2510        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2511        struct dsgl_walk dsgl_walk;
2512        unsigned int authsize = crypto_aead_authsize(tfm);
2513        u32 temp;
2514
2515        dsgl_walk_init(&dsgl_walk, phys_cpl);
2516        if (reqctx->b0_dma)
2517                dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma);
2518        dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0);
2519        dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
2520        temp = req->cryptlen + (op_type ? -authsize : authsize);
2521        dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
2522        dsgl_walk_end(&dsgl_walk, qid);
2523}
2524
2525void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
2526                             struct ulptx_sgl *ulptx,
2527                             struct  cipher_wr_param *wrparam)
2528{
2529        struct ulptx_walk ulp_walk;
2530        struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2531
2532        if (reqctx->imm) {
2533                u8 *buf = (u8 *)ulptx;
2534
2535                memcpy(buf, reqctx->iv, IV);
2536                buf += IV;
2537                sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2538                                   buf, wrparam->bytes, reqctx->processed);
2539        } else {
2540                ulptx_walk_init(&ulp_walk, ulptx);
2541                ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
2542                ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2543                                  reqctx->src_ofst);
2544                reqctx->srcsg = ulp_walk.last_sg;
2545                reqctx->src_ofst = ulp_walk.last_sg_len;
2546                ulptx_walk_end(&ulp_walk);
2547        }
2548}
2549
2550void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
2551                             struct cpl_rx_phys_dsgl *phys_cpl,
2552                             struct  cipher_wr_param *wrparam,
2553                             unsigned short qid)
2554{
2555        struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2556        struct dsgl_walk dsgl_walk;
2557
2558        dsgl_walk_init(&dsgl_walk, phys_cpl);
2559        dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
2560        dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2561                         reqctx->dst_ofst);
2562        reqctx->dstsg = dsgl_walk.last_sg;
2563        reqctx->dst_ofst = dsgl_walk.last_sg_len;
2564
2565        dsgl_walk_end(&dsgl_walk, qid);
2566}
2567
2568void chcr_add_hash_src_ent(struct ahash_request *req,
2569                           struct ulptx_sgl *ulptx,
2570                           struct hash_wr_param *param)
2571{
2572        struct ulptx_walk ulp_walk;
2573        struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2574
2575        if (reqctx->hctx_wr.imm) {
2576                u8 *buf = (u8 *)ulptx;
2577
2578                if (param->bfr_len) {
2579                        memcpy(buf, reqctx->reqbfr, param->bfr_len);
2580                        buf += param->bfr_len;
2581                }
2582
2583                sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2584                                   sg_nents(reqctx->hctx_wr.srcsg), buf,
2585                                   param->sg_len, 0);
2586        } else {
2587                ulptx_walk_init(&ulp_walk, ulptx);
2588                if (param->bfr_len)
2589                        ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2590                                            &reqctx->hctx_wr.dma_addr);
2591                ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2592                                  param->sg_len, reqctx->hctx_wr.src_ofst);
2593                reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2594                reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
2595                ulptx_walk_end(&ulp_walk);
2596        }
2597}
2598
2599int chcr_hash_dma_map(struct device *dev,
2600                      struct ahash_request *req)
2601{
2602        struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2603        int error = 0;
2604
2605        if (!req->nbytes)
2606                return 0;
2607        error = dma_map_sg(dev, req->src, sg_nents(req->src),
2608                           DMA_TO_DEVICE);
2609        if (!error)
2610                return -ENOMEM;
2611        req_ctx->hctx_wr.is_sg_map = 1;
2612        return 0;
2613}
2614
2615void chcr_hash_dma_unmap(struct device *dev,
2616                         struct ahash_request *req)
2617{
2618        struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2619
2620        if (!req->nbytes)
2621                return;
2622
2623        dma_unmap_sg(dev, req->src, sg_nents(req->src),
2624                           DMA_TO_DEVICE);
2625        req_ctx->hctx_wr.is_sg_map = 0;
2626
2627}
2628
2629int chcr_cipher_dma_map(struct device *dev,
2630                        struct ablkcipher_request *req)
2631{
2632        int error;
2633        struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2634
2635        reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV,
2636                                        DMA_BIDIRECTIONAL);
2637        if (dma_mapping_error(dev, reqctx->iv_dma))
2638                return -ENOMEM;
2639
2640        if (req->src == req->dst) {
2641                error = dma_map_sg(dev, req->src, sg_nents(req->src),
2642                                   DMA_BIDIRECTIONAL);
2643                if (!error)
2644                        goto err;
2645        } else {
2646                error = dma_map_sg(dev, req->src, sg_nents(req->src),
2647                                   DMA_TO_DEVICE);
2648                if (!error)
2649                        goto err;
2650                error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2651                                   DMA_FROM_DEVICE);
2652                if (!error) {
2653                        dma_unmap_sg(dev, req->src, sg_nents(req->src),
2654                                   DMA_TO_DEVICE);
2655                        goto err;
2656                }
2657        }
2658
2659        return 0;
2660err:
2661        dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2662        return -ENOMEM;
2663}
2664
2665void chcr_cipher_dma_unmap(struct device *dev,
2666                           struct ablkcipher_request *req)
2667{
2668        struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2669
2670        dma_unmap_single(dev, reqctx->iv_dma, IV,
2671                                        DMA_BIDIRECTIONAL);
2672        if (req->src == req->dst) {
2673                dma_unmap_sg(dev, req->src, sg_nents(req->src),
2674                                   DMA_BIDIRECTIONAL);
2675        } else {
2676                dma_unmap_sg(dev, req->src, sg_nents(req->src),
2677                                   DMA_TO_DEVICE);
2678                dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2679                                   DMA_FROM_DEVICE);
2680        }
2681}
2682
2683static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2684{
2685        __be32 data;
2686
2687        memset(block, 0, csize);
2688        block += csize;
2689
2690        if (csize >= 4)
2691                csize = 4;
2692        else if (msglen > (unsigned int)(1 << (8 * csize)))
2693                return -EOVERFLOW;
2694
2695        data = cpu_to_be32(msglen);
2696        memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2697
2698        return 0;
2699}
2700
2701static void generate_b0(struct aead_request *req,
2702                        struct chcr_aead_ctx *aeadctx,
2703                        unsigned short op_type)
2704{
2705        unsigned int l, lp, m;
2706        int rc;
2707        struct crypto_aead *aead = crypto_aead_reqtfm(req);
2708        struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2709        u8 *b0 = reqctx->scratch_pad;
2710
2711        m = crypto_aead_authsize(aead);
2712
2713        memcpy(b0, reqctx->iv, 16);
2714
2715        lp = b0[0];
2716        l = lp + 1;
2717
2718        /* set m, bits 3-5 */
2719        *b0 |= (8 * ((m - 2) / 2));
2720
2721        /* set adata, bit 6, if associated data is used */
2722        if (req->assoclen)
2723                *b0 |= 64;
2724        rc = set_msg_len(b0 + 16 - l,
2725                         (op_type == CHCR_DECRYPT_OP) ?
2726                         req->cryptlen - m : req->cryptlen, l);
2727}
2728
2729static inline int crypto_ccm_check_iv(const u8 *iv)
2730{
2731        /* 2 <= L <= 8, so 1 <= L' <= 7. */
2732        if (iv[0] < 1 || iv[0] > 7)
2733                return -EINVAL;
2734
2735        return 0;
2736}
2737
2738static int ccm_format_packet(struct aead_request *req,
2739                             struct chcr_aead_ctx *aeadctx,
2740                             unsigned int sub_type,
2741                             unsigned short op_type)
2742{
2743        struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2744        int rc = 0;
2745
2746        if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2747                reqctx->iv[0] = 3;
2748                memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
2749                memcpy(reqctx->iv + 4, req->iv, 8);
2750                memset(reqctx->iv + 12, 0, 4);
2751                *((unsigned short *)(reqctx->scratch_pad + 16)) =
2752                        htons(req->assoclen - 8);
2753        } else {
2754                memcpy(reqctx->iv, req->iv, 16);
2755                *((unsigned short *)(reqctx->scratch_pad + 16)) =
2756                        htons(req->assoclen);
2757        }
2758        generate_b0(req, aeadctx, op_type);
2759        /* zero the ctr value */
2760        memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
2761        return rc;
2762}
2763
2764static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2765                                  unsigned int dst_size,
2766                                  struct aead_request *req,
2767                                  unsigned short op_type)
2768{
2769        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2770        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2771        unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2772        unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2773        unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id;
2774        unsigned int ccm_xtra;
2775        unsigned char tag_offset = 0, auth_offset = 0;
2776        unsigned int assoclen;
2777
2778        if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2779                assoclen = req->assoclen - 8;
2780        else
2781                assoclen = req->assoclen;
2782        ccm_xtra = CCM_B0_SIZE +
2783                ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2784
2785        auth_offset = req->cryptlen ?
2786                (assoclen + IV + 1 + ccm_xtra) : 0;
2787        if (op_type == CHCR_DECRYPT_OP) {
2788                if (crypto_aead_authsize(tfm) != req->cryptlen)
2789                        tag_offset = crypto_aead_authsize(tfm);
2790                else
2791                        auth_offset = 0;
2792        }
2793
2794
2795        sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
2796                                         2, assoclen + 1 + ccm_xtra);
2797        sec_cpl->pldlen =
2798                htonl(assoclen + IV + req->cryptlen + ccm_xtra);
2799        /* For CCM there wil be b0 always. So AAD start will be 1 always */
2800        sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2801                                        1, assoclen + ccm_xtra, assoclen
2802                                        + IV + 1 + ccm_xtra, 0);
2803
2804        sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2805                                        auth_offset, tag_offset,
2806                                        (op_type == CHCR_ENCRYPT_OP) ? 0 :
2807                                        crypto_aead_authsize(tfm));
2808        sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2809                                        (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
2810                                        cipher_mode, mac_mode,
2811                                        aeadctx->hmac_ctrl, IV >> 1);
2812
2813        sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2814                                        0, dst_size);
2815}
2816
2817static int aead_ccm_validate_input(unsigned short op_type,
2818                                   struct aead_request *req,
2819                                   struct chcr_aead_ctx *aeadctx,
2820                                   unsigned int sub_type)
2821{
2822        if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2823                if (crypto_ccm_check_iv(req->iv)) {
2824                        pr_err("CCM: IV check fails\n");
2825                        return -EINVAL;
2826                }
2827        } else {
2828                if (req->assoclen != 16 && req->assoclen != 20) {
2829                        pr_err("RFC4309: Invalid AAD length %d\n",
2830                               req->assoclen);
2831                        return -EINVAL;
2832                }
2833        }
2834        return 0;
2835}
2836
2837static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
2838                                          unsigned short qid,
2839                                          int size,
2840                                          unsigned short op_type)
2841{
2842        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2843        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2844        struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2845        struct sk_buff *skb = NULL;
2846        struct chcr_wr *chcr_req;
2847        struct cpl_rx_phys_dsgl *phys_cpl;
2848        struct ulptx_sgl *ulptx;
2849        unsigned int transhdr_len;
2850        unsigned int dst_size = 0, kctx_len, dnents, temp;
2851        unsigned int sub_type, assoclen = req->assoclen;
2852        unsigned int authsize = crypto_aead_authsize(tfm);
2853        int error = -EINVAL;
2854        gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2855                GFP_ATOMIC;
2856        struct adapter *adap = padap(a_ctx(tfm)->dev);
2857
2858        reqctx->b0_dma = 0;
2859        sub_type = get_aead_subtype(tfm);
2860        if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2861                assoclen -= 8;
2862        error = chcr_aead_common_init(req, op_type);
2863        if (error)
2864                return ERR_PTR(error);
2865
2866
2867        reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
2868        error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type);
2869        if (error)
2870                goto err;
2871        dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
2872        dnents += sg_nents_xlen(req->dst, req->cryptlen
2873                        + (op_type ? -authsize : authsize),
2874                        CHCR_DST_SG_SIZE, req->assoclen);
2875        dnents += MIN_CCM_SG; // For IV and B0
2876        dst_size = get_space_for_phys_dsgl(dnents);
2877        kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
2878        transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2879        reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen +
2880                       reqctx->b0_len) <= SGE_MAX_WR_LEN;
2881        temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen +
2882                                     reqctx->b0_len, 16) :
2883                (sgl_len(reqctx->src_nents + reqctx->aad_nents +
2884                                    MIN_CCM_SG) *  8);
2885        transhdr_len += temp;
2886        transhdr_len = roundup(transhdr_len, 16);
2887
2888        if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
2889                                    reqctx->b0_len, transhdr_len, op_type)) {
2890                atomic_inc(&adap->chcr_stats.fallback);
2891                chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2892                                    op_type);
2893                return ERR_PTR(chcr_aead_fallback(req, op_type));
2894        }
2895        skb = alloc_skb(SGE_MAX_WR_LEN,  flags);
2896
2897        if (!skb) {
2898                error = -ENOMEM;
2899                goto err;
2900        }
2901
2902        chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len);
2903
2904        fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type);
2905
2906        chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2907        memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
2908        memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2909                        aeadctx->key, aeadctx->enckey_len);
2910
2911        phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2912        ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
2913        error = ccm_format_packet(req, aeadctx, sub_type, op_type);
2914        if (error)
2915                goto dstmap_fail;
2916
2917        reqctx->b0_dma = dma_map_single(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev,
2918                                        &reqctx->scratch_pad, reqctx->b0_len,
2919                                        DMA_BIDIRECTIONAL);
2920        if (dma_mapping_error(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev,
2921                              reqctx->b0_dma)) {
2922                error = -ENOMEM;
2923                goto dstmap_fail;
2924        }
2925
2926        chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
2927        chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
2928
2929        atomic_inc(&adap->chcr_stats.aead_rqst);
2930        temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
2931                kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen +
2932                reqctx->b0_len) : 0);
2933        create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
2934                    transhdr_len, temp, 0);
2935        reqctx->skb = skb;
2936        reqctx->op = op_type;
2937
2938        return skb;
2939dstmap_fail:
2940        kfree_skb(skb);
2941err:
2942        chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type);
2943        return ERR_PTR(error);
2944}
2945
2946static struct sk_buff *create_gcm_wr(struct aead_request *req,
2947                                     unsigned short qid,
2948                                     int size,
2949                                     unsigned short op_type)
2950{
2951        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2952        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2953        struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2954        struct sk_buff *skb = NULL;
2955        struct chcr_wr *chcr_req;
2956        struct cpl_rx_phys_dsgl *phys_cpl;
2957        struct ulptx_sgl *ulptx;
2958        unsigned int transhdr_len, dnents = 0;
2959        unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
2960        unsigned int authsize = crypto_aead_authsize(tfm);
2961        int error = -EINVAL;
2962        gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2963                GFP_ATOMIC;
2964        struct adapter *adap = padap(a_ctx(tfm)->dev);
2965
2966        if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
2967                assoclen = req->assoclen - 8;
2968
2969        reqctx->b0_dma = 0;
2970        error = chcr_aead_common_init(req, op_type);
2971        if (error)
2972                return ERR_PTR(error);
2973        dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
2974        dnents += sg_nents_xlen(req->dst, req->cryptlen +
2975                                (op_type ? -authsize : authsize),
2976                                CHCR_DST_SG_SIZE, req->assoclen);
2977        dnents += MIN_GCM_SG; // For IV
2978        dst_size = get_space_for_phys_dsgl(dnents);
2979        kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
2980        transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2981        reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <=
2982                        SGE_MAX_WR_LEN;
2983        temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16) :
2984                (sgl_len(reqctx->src_nents +
2985                reqctx->aad_nents + MIN_GCM_SG) * 8);
2986        transhdr_len += temp;
2987        transhdr_len = roundup(transhdr_len, 16);
2988        if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2989                            transhdr_len, op_type)) {
2990                atomic_inc(&adap->chcr_stats.fallback);
2991                chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2992                                    op_type);
2993                return ERR_PTR(chcr_aead_fallback(req, op_type));
2994        }
2995        skb = alloc_skb(SGE_MAX_WR_LEN, flags);
2996        if (!skb) {
2997                error = -ENOMEM;
2998                goto err;
2999        }
3000
3001        chcr_req = __skb_put_zero(skb, transhdr_len);
3002
3003        //Offset of tag from end
3004        temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
3005        chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
3006                                        a_ctx(tfm)->dev->rx_channel_id, 2,
3007                                        (assoclen + 1));
3008        chcr_req->sec_cpl.pldlen =
3009                htonl(assoclen + IV + req->cryptlen);
3010        chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
3011                                        assoclen ? 1 : 0, assoclen,
3012                                        assoclen + IV + 1, 0);
3013        chcr_req->sec_cpl.cipherstop_lo_authinsert =
3014                        FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
3015                                                temp, temp);
3016        chcr_req->sec_cpl.seqno_numivs =
3017                        FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
3018                                        CHCR_ENCRYPT_OP) ? 1 : 0,
3019                                        CHCR_SCMD_CIPHER_MODE_AES_GCM,
3020                                        CHCR_SCMD_AUTH_MODE_GHASH,
3021                                        aeadctx->hmac_ctrl, IV >> 1);
3022        chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3023                                        0, 0, dst_size);
3024        chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3025        memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3026        memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3027               GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
3028
3029        /* prepare a 16 byte iv */
3030        /* S   A   L  T |  IV | 0x00000001 */
3031        if (get_aead_subtype(tfm) ==
3032            CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
3033                memcpy(reqctx->iv, aeadctx->salt, 4);
3034                memcpy(reqctx->iv + 4, req->iv, GCM_RFC4106_IV_SIZE);
3035        } else {
3036                memcpy(reqctx->iv, req->iv, GCM_AES_IV_SIZE);
3037        }
3038        *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
3039
3040        phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3041        ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
3042
3043        chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
3044        chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
3045        atomic_inc(&adap->chcr_stats.aead_rqst);
3046        temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
3047                kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
3048        create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3049                    transhdr_len, temp, reqctx->verify);
3050        reqctx->skb = skb;
3051        reqctx->op = op_type;
3052        return skb;
3053
3054err:
3055        chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type);
3056        return ERR_PTR(error);
3057}
3058
3059
3060
3061static int chcr_aead_cra_init(struct crypto_aead *tfm)
3062{
3063        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3064        struct aead_alg *alg = crypto_aead_alg(tfm);
3065
3066        aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
3067                                               CRYPTO_ALG_NEED_FALLBACK |
3068                                               CRYPTO_ALG_ASYNC);
3069        if  (IS_ERR(aeadctx->sw_cipher))
3070                return PTR_ERR(aeadctx->sw_cipher);
3071        crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
3072                                 sizeof(struct aead_request) +
3073                                 crypto_aead_reqsize(aeadctx->sw_cipher)));
3074        return chcr_device_init(a_ctx(tfm));
3075}
3076
3077static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3078{
3079        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3080
3081        crypto_free_aead(aeadctx->sw_cipher);
3082}
3083
3084static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3085                                        unsigned int authsize)
3086{
3087        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3088
3089        aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3090        aeadctx->mayverify = VERIFY_HW;
3091        return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3092}
3093static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3094                                    unsigned int authsize)
3095{
3096        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3097        u32 maxauth = crypto_aead_maxauthsize(tfm);
3098
3099        /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3100         * true for sha1. authsize == 12 condition should be before
3101         * authsize == (maxauth >> 1)
3102         */
3103        if (authsize == ICV_4) {
3104                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3105                aeadctx->mayverify = VERIFY_HW;
3106        } else if (authsize == ICV_6) {
3107                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3108                aeadctx->mayverify = VERIFY_HW;
3109        } else if (authsize == ICV_10) {
3110                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3111                aeadctx->mayverify = VERIFY_HW;
3112        } else if (authsize == ICV_12) {
3113                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3114                aeadctx->mayverify = VERIFY_HW;
3115        } else if (authsize == ICV_14) {
3116                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3117                aeadctx->mayverify = VERIFY_HW;
3118        } else if (authsize == (maxauth >> 1)) {
3119                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3120                aeadctx->mayverify = VERIFY_HW;
3121        } else if (authsize == maxauth) {
3122                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3123                aeadctx->mayverify = VERIFY_HW;
3124        } else {
3125                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3126                aeadctx->mayverify = VERIFY_SW;
3127        }
3128        return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3129}
3130
3131
3132static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3133{
3134        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3135
3136        switch (authsize) {
3137        case ICV_4:
3138                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3139                aeadctx->mayverify = VERIFY_HW;
3140                break;
3141        case ICV_8:
3142                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3143                aeadctx->mayverify = VERIFY_HW;
3144                break;
3145        case ICV_12:
3146                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3147                 aeadctx->mayverify = VERIFY_HW;
3148                break;
3149        case ICV_14:
3150                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3151                 aeadctx->mayverify = VERIFY_HW;
3152                break;
3153        case ICV_16:
3154                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3155                aeadctx->mayverify = VERIFY_HW;
3156                break;
3157        case ICV_13:
3158        case ICV_15:
3159                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3160                aeadctx->mayverify = VERIFY_SW;
3161                break;
3162        default:
3163
3164                  crypto_tfm_set_flags((struct crypto_tfm *) tfm,
3165                        CRYPTO_TFM_RES_BAD_KEY_LEN);
3166                return -EINVAL;
3167        }
3168        return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3169}
3170
3171static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3172                                          unsigned int authsize)
3173{
3174        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3175
3176        switch (authsize) {
3177        case ICV_8:
3178                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3179                aeadctx->mayverify = VERIFY_HW;
3180                break;
3181        case ICV_12:
3182                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3183                aeadctx->mayverify = VERIFY_HW;
3184                break;
3185        case ICV_16:
3186                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3187                aeadctx->mayverify = VERIFY_HW;
3188                break;
3189        default:
3190                crypto_tfm_set_flags((struct crypto_tfm *)tfm,
3191                                     CRYPTO_TFM_RES_BAD_KEY_LEN);
3192                return -EINVAL;
3193        }
3194        return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3195}
3196
3197static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3198                                unsigned int authsize)
3199{
3200        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3201
3202        switch (authsize) {
3203        case ICV_4:
3204                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3205                aeadctx->mayverify = VERIFY_HW;
3206                break;
3207        case ICV_6:
3208                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3209                aeadctx->mayverify = VERIFY_HW;
3210                break;
3211        case ICV_8:
3212                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3213                aeadctx->mayverify = VERIFY_HW;
3214                break;
3215        case ICV_10:
3216                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3217                aeadctx->mayverify = VERIFY_HW;
3218                break;
3219        case ICV_12:
3220                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3221                aeadctx->mayverify = VERIFY_HW;
3222                break;
3223        case ICV_14:
3224                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3225                aeadctx->mayverify = VERIFY_HW;
3226                break;
3227        case ICV_16:
3228                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3229                aeadctx->mayverify = VERIFY_HW;
3230                break;
3231        default:
3232                crypto_tfm_set_flags((struct crypto_tfm *)tfm,
3233                                     CRYPTO_TFM_RES_BAD_KEY_LEN);
3234                return -EINVAL;
3235        }
3236        return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3237}
3238
3239static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3240                                const u8 *key,
3241                                unsigned int keylen)
3242{
3243        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3244        unsigned char ck_size, mk_size;
3245        int key_ctx_size = 0;
3246
3247        key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
3248        if (keylen == AES_KEYSIZE_128) {
3249                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3250                mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
3251        } else if (keylen == AES_KEYSIZE_192) {
3252                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3253                mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3254        } else if (keylen == AES_KEYSIZE_256) {
3255                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3256                mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3257        } else {
3258                crypto_tfm_set_flags((struct crypto_tfm *)aead,
3259                                     CRYPTO_TFM_RES_BAD_KEY_LEN);
3260                aeadctx->enckey_len = 0;
3261                return  -EINVAL;
3262        }
3263        aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3264                                                key_ctx_size >> 4);
3265        memcpy(aeadctx->key, key, keylen);
3266        aeadctx->enckey_len = keylen;
3267
3268        return 0;
3269}
3270
3271static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3272                                const u8 *key,
3273                                unsigned int keylen)
3274{
3275        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3276        int error;
3277
3278        crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3279        crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3280                              CRYPTO_TFM_REQ_MASK);
3281        error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3282        crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3283        crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3284                              CRYPTO_TFM_RES_MASK);
3285        if (error)
3286                return error;
3287        return chcr_ccm_common_setkey(aead, key, keylen);
3288}
3289
3290static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3291                                    unsigned int keylen)
3292{
3293        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3294        int error;
3295
3296        if (keylen < 3) {
3297                crypto_tfm_set_flags((struct crypto_tfm *)aead,
3298                                     CRYPTO_TFM_RES_BAD_KEY_LEN);
3299                aeadctx->enckey_len = 0;
3300                return  -EINVAL;
3301        }
3302        crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3303        crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3304                              CRYPTO_TFM_REQ_MASK);
3305        error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3306        crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3307        crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3308                              CRYPTO_TFM_RES_MASK);
3309        if (error)
3310                return error;
3311        keylen -= 3;
3312        memcpy(aeadctx->salt, key + keylen, 3);
3313        return chcr_ccm_common_setkey(aead, key, keylen);
3314}
3315
3316static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3317                           unsigned int keylen)
3318{
3319        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3320        struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
3321        struct crypto_cipher *cipher;
3322        unsigned int ck_size;
3323        int ret = 0, key_ctx_size = 0;
3324
3325        aeadctx->enckey_len = 0;
3326        crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3327        crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3328                              & CRYPTO_TFM_REQ_MASK);
3329        ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3330        crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3331        crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3332                              CRYPTO_TFM_RES_MASK);
3333        if (ret)
3334                goto out;
3335
3336        if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3337            keylen > 3) {
3338                keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
3339                memcpy(aeadctx->salt, key + keylen, 4);
3340        }
3341        if (keylen == AES_KEYSIZE_128) {
3342                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3343        } else if (keylen == AES_KEYSIZE_192) {
3344                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3345        } else if (keylen == AES_KEYSIZE_256) {
3346                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3347        } else {
3348                crypto_tfm_set_flags((struct crypto_tfm *)aead,
3349                                     CRYPTO_TFM_RES_BAD_KEY_LEN);
3350                pr_err("GCM: Invalid key length %d\n", keylen);
3351                ret = -EINVAL;
3352                goto out;
3353        }
3354
3355        memcpy(aeadctx->key, key, keylen);
3356        aeadctx->enckey_len = keylen;
3357        key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
3358                AEAD_H_SIZE;
3359        aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3360                                                CHCR_KEYCTX_MAC_KEY_SIZE_128,
3361                                                0, 0,
3362                                                key_ctx_size >> 4);
3363        /* Calculate the H = CIPH(K, 0 repeated 16 times).
3364         * It will go in key context
3365         */
3366        cipher = crypto_alloc_cipher("aes-generic", 0, 0);
3367        if (IS_ERR(cipher)) {
3368                aeadctx->enckey_len = 0;
3369                ret = -ENOMEM;
3370                goto out;
3371        }
3372
3373        ret = crypto_cipher_setkey(cipher, key, keylen);
3374        if (ret) {
3375                aeadctx->enckey_len = 0;
3376                goto out1;
3377        }
3378        memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3379        crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h);
3380
3381out1:
3382        crypto_free_cipher(cipher);
3383out:
3384        return ret;
3385}
3386
3387static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3388                                   unsigned int keylen)
3389{
3390        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3391        struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3392        /* it contains auth and cipher key both*/
3393        struct crypto_authenc_keys keys;
3394        unsigned int bs, subtype;
3395        unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3396        int err = 0, i, key_ctx_len = 0;
3397        unsigned char ck_size = 0;
3398        unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
3399        struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
3400        struct algo_param param;
3401        int align;
3402        u8 *o_ptr = NULL;
3403
3404        crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3405        crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3406                              & CRYPTO_TFM_REQ_MASK);
3407        err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3408        crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3409        crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3410                              & CRYPTO_TFM_RES_MASK);
3411        if (err)
3412                goto out;
3413
3414        if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3415                crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3416                goto out;
3417        }
3418
3419        if (get_alg_config(&param, max_authsize)) {
3420                pr_err("chcr : Unsupported digest size\n");
3421                goto out;
3422        }
3423        subtype = get_aead_subtype(authenc);
3424        if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3425                subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3426                if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3427                        goto out;
3428                memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3429                - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3430                keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3431        }
3432        if (keys.enckeylen == AES_KEYSIZE_128) {
3433                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3434        } else if (keys.enckeylen == AES_KEYSIZE_192) {
3435                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3436        } else if (keys.enckeylen == AES_KEYSIZE_256) {
3437                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3438        } else {
3439                pr_err("chcr : Unsupported cipher key\n");
3440                goto out;
3441        }
3442
3443        /* Copy only encryption key. We use authkey to generate h(ipad) and
3444         * h(opad) so authkey is not needed again. authkeylen size have the
3445         * size of the hash digest size.
3446         */
3447        memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3448        aeadctx->enckey_len = keys.enckeylen;
3449        if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3450                subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3451
3452                get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3453                            aeadctx->enckey_len << 3);
3454        }
3455        base_hash  = chcr_alloc_shash(max_authsize);
3456        if (IS_ERR(base_hash)) {
3457                pr_err("chcr : Base driver cannot be loaded\n");
3458                aeadctx->enckey_len = 0;
3459                memzero_explicit(&keys, sizeof(keys));
3460                return -EINVAL;
3461        }
3462        {
3463                SHASH_DESC_ON_STACK(shash, base_hash);
3464                shash->tfm = base_hash;
3465                shash->flags = crypto_shash_get_flags(base_hash);
3466                bs = crypto_shash_blocksize(base_hash);
3467                align = KEYCTX_ALIGN_PAD(max_authsize);
3468                o_ptr =  actx->h_iopad + param.result_size + align;
3469
3470                if (keys.authkeylen > bs) {
3471                        err = crypto_shash_digest(shash, keys.authkey,
3472                                                  keys.authkeylen,
3473                                                  o_ptr);
3474                        if (err) {
3475                                pr_err("chcr : Base driver cannot be loaded\n");
3476                                goto out;
3477                        }
3478                        keys.authkeylen = max_authsize;
3479                } else
3480                        memcpy(o_ptr, keys.authkey, keys.authkeylen);
3481
3482                /* Compute the ipad-digest*/
3483                memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3484                memcpy(pad, o_ptr, keys.authkeylen);
3485                for (i = 0; i < bs >> 2; i++)
3486                        *((unsigned int *)pad + i) ^= IPAD_DATA;
3487
3488                if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3489                                              max_authsize))
3490                        goto out;
3491                /* Compute the opad-digest */
3492                memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3493                memcpy(pad, o_ptr, keys.authkeylen);
3494                for (i = 0; i < bs >> 2; i++)
3495                        *((unsigned int *)pad + i) ^= OPAD_DATA;
3496
3497                if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3498                        goto out;
3499
3500                /* convert the ipad and opad digest to network order */
3501                chcr_change_order(actx->h_iopad, param.result_size);
3502                chcr_change_order(o_ptr, param.result_size);
3503                key_ctx_len = sizeof(struct _key_ctx) +
3504                        roundup(keys.enckeylen, 16) +
3505                        (param.result_size + align) * 2;
3506                aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3507                                                0, 1, key_ctx_len >> 4);
3508                actx->auth_mode = param.auth_mode;
3509                chcr_free_shash(base_hash);
3510
3511                memzero_explicit(&keys, sizeof(keys));
3512                return 0;
3513        }
3514out:
3515        aeadctx->enckey_len = 0;
3516        memzero_explicit(&keys, sizeof(keys));
3517        if (!IS_ERR(base_hash))
3518                chcr_free_shash(base_hash);
3519        return -EINVAL;
3520}
3521
3522static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3523                                        const u8 *key, unsigned int keylen)
3524{
3525        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3526        struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3527        struct crypto_authenc_keys keys;
3528        int err;
3529        /* it contains auth and cipher key both*/
3530        unsigned int subtype;
3531        int key_ctx_len = 0;
3532        unsigned char ck_size = 0;
3533
3534        crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3535        crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3536                              & CRYPTO_TFM_REQ_MASK);
3537        err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3538        crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3539        crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3540                              & CRYPTO_TFM_RES_MASK);
3541        if (err)
3542                goto out;
3543
3544        if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3545                crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3546                goto out;
3547        }
3548        subtype = get_aead_subtype(authenc);
3549        if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3550            subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3551                if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3552                        goto out;
3553                memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3554                        - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3555                keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3556        }
3557        if (keys.enckeylen == AES_KEYSIZE_128) {
3558                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3559        } else if (keys.enckeylen == AES_KEYSIZE_192) {
3560                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3561        } else if (keys.enckeylen == AES_KEYSIZE_256) {
3562                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3563        } else {
3564                pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
3565                goto out;
3566        }
3567        memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3568        aeadctx->enckey_len = keys.enckeylen;
3569        if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3570            subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3571                get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3572                                aeadctx->enckey_len << 3);
3573        }
3574        key_ctx_len =  sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
3575
3576        aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3577                                                0, key_ctx_len >> 4);
3578        actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3579        memzero_explicit(&keys, sizeof(keys));
3580        return 0;
3581out:
3582        aeadctx->enckey_len = 0;
3583        memzero_explicit(&keys, sizeof(keys));
3584        return -EINVAL;
3585}
3586
3587static int chcr_aead_op(struct aead_request *req,
3588                        unsigned short op_type,
3589                        int size,
3590                        create_wr_t create_wr_fn)
3591{
3592        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3593        struct uld_ctx *u_ctx;
3594        struct sk_buff *skb;
3595
3596        if (!a_ctx(tfm)->dev) {
3597                pr_err("chcr : %s : No crypto device.\n", __func__);
3598                return -ENXIO;
3599        }
3600        u_ctx = ULD_CTX(a_ctx(tfm));
3601        if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3602                                   a_ctx(tfm)->tx_qidx)) {
3603                if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3604                        return -EBUSY;
3605        }
3606
3607        /* Form a WR from req */
3608        skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size,
3609                           op_type);
3610
3611        if (IS_ERR(skb) || !skb)
3612                return PTR_ERR(skb);
3613
3614        skb->dev = u_ctx->lldi.ports[0];
3615        set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
3616        chcr_send_wr(skb);
3617        return -EINPROGRESS;
3618}
3619
3620static int chcr_aead_encrypt(struct aead_request *req)
3621{
3622        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3623        struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3624
3625        reqctx->verify = VERIFY_HW;
3626
3627        switch (get_aead_subtype(tfm)) {
3628        case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3629        case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3630        case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3631        case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3632                return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
3633                                    create_authenc_wr);
3634        case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3635        case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3636                return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
3637                                    create_aead_ccm_wr);
3638        default:
3639                return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
3640                                    create_gcm_wr);
3641        }
3642}
3643
3644static int chcr_aead_decrypt(struct aead_request *req)
3645{
3646        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3647        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3648        struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3649        int size;
3650
3651        if (aeadctx->mayverify == VERIFY_SW) {
3652                size = crypto_aead_maxauthsize(tfm);
3653                reqctx->verify = VERIFY_SW;
3654        } else {
3655                size = 0;
3656                reqctx->verify = VERIFY_HW;
3657        }
3658
3659        switch (get_aead_subtype(tfm)) {
3660        case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3661        case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3662        case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3663        case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3664                return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
3665                                    create_authenc_wr);
3666        case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3667        case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3668                return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
3669                                    create_aead_ccm_wr);
3670        default:
3671                return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
3672                                    create_gcm_wr);
3673        }
3674}
3675
3676static struct chcr_alg_template driver_algs[] = {
3677        /* AES-CBC */
3678        {
3679                .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3680                .is_registered = 0,
3681                .alg.crypto = {
3682                        .cra_name               = "cbc(aes)",
3683                        .cra_driver_name        = "cbc-aes-chcr",
3684                        .cra_blocksize          = AES_BLOCK_SIZE,
3685                        .cra_init               = chcr_cra_init,
3686                        .cra_exit               = chcr_cra_exit,
3687                        .cra_u.ablkcipher       = {
3688                                .min_keysize    = AES_MIN_KEY_SIZE,
3689                                .max_keysize    = AES_MAX_KEY_SIZE,
3690                                .ivsize         = AES_BLOCK_SIZE,
3691                                .setkey                 = chcr_aes_cbc_setkey,
3692                                .encrypt                = chcr_aes_encrypt,
3693                                .decrypt                = chcr_aes_decrypt,
3694                        }
3695                }
3696        },
3697        {
3698                .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3699                .is_registered = 0,
3700                .alg.crypto =   {
3701                        .cra_name               = "xts(aes)",
3702                        .cra_driver_name        = "xts-aes-chcr",
3703                        .cra_blocksize          = AES_BLOCK_SIZE,
3704                        .cra_init               = chcr_cra_init,
3705                        .cra_exit               = NULL,
3706                        .cra_u .ablkcipher = {
3707                                        .min_keysize    = 2 * AES_MIN_KEY_SIZE,
3708                                        .max_keysize    = 2 * AES_MAX_KEY_SIZE,
3709                                        .ivsize         = AES_BLOCK_SIZE,
3710                                        .setkey         = chcr_aes_xts_setkey,
3711                                        .encrypt        = chcr_aes_encrypt,
3712                                        .decrypt        = chcr_aes_decrypt,
3713                                }
3714                        }
3715        },
3716        {
3717                .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3718                .is_registered = 0,
3719                .alg.crypto = {
3720                        .cra_name               = "ctr(aes)",
3721                        .cra_driver_name        = "ctr-aes-chcr",
3722                        .cra_blocksize          = 1,
3723                        .cra_init               = chcr_cra_init,
3724                        .cra_exit               = chcr_cra_exit,
3725                        .cra_u.ablkcipher       = {
3726                                .min_keysize    = AES_MIN_KEY_SIZE,
3727                                .max_keysize    = AES_MAX_KEY_SIZE,
3728                                .ivsize         = AES_BLOCK_SIZE,
3729                                .setkey         = chcr_aes_ctr_setkey,
3730                                .encrypt        = chcr_aes_encrypt,
3731                                .decrypt        = chcr_aes_decrypt,
3732                        }
3733                }
3734        },
3735        {
3736                .type = CRYPTO_ALG_TYPE_ABLKCIPHER |
3737                        CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3738                .is_registered = 0,
3739                .alg.crypto = {
3740                        .cra_name               = "rfc3686(ctr(aes))",
3741                        .cra_driver_name        = "rfc3686-ctr-aes-chcr",
3742                        .cra_blocksize          = 1,
3743                        .cra_init               = chcr_rfc3686_init,
3744                        .cra_exit               = chcr_cra_exit,
3745                        .cra_u.ablkcipher       = {
3746                                .min_keysize    = AES_MIN_KEY_SIZE +
3747                                        CTR_RFC3686_NONCE_SIZE,
3748                                .max_keysize    = AES_MAX_KEY_SIZE +
3749                                        CTR_RFC3686_NONCE_SIZE,
3750                                .ivsize         = CTR_RFC3686_IV_SIZE,
3751                                .setkey         = chcr_aes_rfc3686_setkey,
3752                                .encrypt        = chcr_aes_encrypt,
3753                                .decrypt        = chcr_aes_decrypt,
3754                                .geniv          = "seqiv",
3755                        }
3756                }
3757        },
3758        /* SHA */
3759        {
3760                .type = CRYPTO_ALG_TYPE_AHASH,
3761                .is_registered = 0,
3762                .alg.hash = {
3763                        .halg.digestsize = SHA1_DIGEST_SIZE,
3764                        .halg.base = {
3765                                .cra_name = "sha1",
3766                                .cra_driver_name = "sha1-chcr",
3767                                .cra_blocksize = SHA1_BLOCK_SIZE,
3768                        }
3769                }
3770        },
3771        {
3772                .type = CRYPTO_ALG_TYPE_AHASH,
3773                .is_registered = 0,
3774                .alg.hash = {
3775                        .halg.digestsize = SHA256_DIGEST_SIZE,
3776                        .halg.base = {
3777                                .cra_name = "sha256",
3778                                .cra_driver_name = "sha256-chcr",
3779                                .cra_blocksize = SHA256_BLOCK_SIZE,
3780                        }
3781                }
3782        },
3783        {
3784                .type = CRYPTO_ALG_TYPE_AHASH,
3785                .is_registered = 0,
3786                .alg.hash = {
3787                        .halg.digestsize = SHA224_DIGEST_SIZE,
3788                        .halg.base = {
3789                                .cra_name = "sha224",
3790                                .cra_driver_name = "sha224-chcr",
3791                                .cra_blocksize = SHA224_BLOCK_SIZE,
3792                        }
3793                }
3794        },
3795        {
3796                .type = CRYPTO_ALG_TYPE_AHASH,
3797                .is_registered = 0,
3798                .alg.hash = {
3799                        .halg.digestsize = SHA384_DIGEST_SIZE,
3800                        .halg.base = {
3801                                .cra_name = "sha384",
3802                                .cra_driver_name = "sha384-chcr",
3803                                .cra_blocksize = SHA384_BLOCK_SIZE,
3804                        }
3805                }
3806        },
3807        {
3808                .type = CRYPTO_ALG_TYPE_AHASH,
3809                .is_registered = 0,
3810                .alg.hash = {
3811                        .halg.digestsize = SHA512_DIGEST_SIZE,
3812                        .halg.base = {
3813                                .cra_name = "sha512",
3814                                .cra_driver_name = "sha512-chcr",
3815                                .cra_blocksize = SHA512_BLOCK_SIZE,
3816                        }
3817                }
3818        },
3819        /* HMAC */
3820        {
3821                .type = CRYPTO_ALG_TYPE_HMAC,
3822                .is_registered = 0,
3823                .alg.hash = {
3824                        .halg.digestsize = SHA1_DIGEST_SIZE,
3825                        .halg.base = {
3826                                .cra_name = "hmac(sha1)",
3827                                .cra_driver_name = "hmac-sha1-chcr",
3828                                .cra_blocksize = SHA1_BLOCK_SIZE,
3829                        }
3830                }
3831        },
3832        {
3833                .type = CRYPTO_ALG_TYPE_HMAC,
3834                .is_registered = 0,
3835                .alg.hash = {
3836                        .halg.digestsize = SHA224_DIGEST_SIZE,
3837                        .halg.base = {
3838                                .cra_name = "hmac(sha224)",
3839                                .cra_driver_name = "hmac-sha224-chcr",
3840                                .cra_blocksize = SHA224_BLOCK_SIZE,
3841                        }
3842                }
3843        },
3844        {
3845                .type = CRYPTO_ALG_TYPE_HMAC,
3846                .is_registered = 0,
3847                .alg.hash = {
3848                        .halg.digestsize = SHA256_DIGEST_SIZE,
3849                        .halg.base = {
3850                                .cra_name = "hmac(sha256)",
3851                                .cra_driver_name = "hmac-sha256-chcr",
3852                                .cra_blocksize = SHA256_BLOCK_SIZE,
3853                        }
3854                }
3855        },
3856        {
3857                .type = CRYPTO_ALG_TYPE_HMAC,
3858                .is_registered = 0,
3859                .alg.hash = {
3860                        .halg.digestsize = SHA384_DIGEST_SIZE,
3861                        .halg.base = {
3862                                .cra_name = "hmac(sha384)",
3863                                .cra_driver_name = "hmac-sha384-chcr",
3864                                .cra_blocksize = SHA384_BLOCK_SIZE,
3865                        }
3866                }
3867        },
3868        {
3869                .type = CRYPTO_ALG_TYPE_HMAC,
3870                .is_registered = 0,
3871                .alg.hash = {
3872                        .halg.digestsize = SHA512_DIGEST_SIZE,
3873                        .halg.base = {
3874                                .cra_name = "hmac(sha512)",
3875                                .cra_driver_name = "hmac-sha512-chcr",
3876                                .cra_blocksize = SHA512_BLOCK_SIZE,
3877                        }
3878                }
3879        },
3880        /* Add AEAD Algorithms */
3881        {
3882                .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
3883                .is_registered = 0,
3884                .alg.aead = {
3885                        .base = {
3886                                .cra_name = "gcm(aes)",
3887                                .cra_driver_name = "gcm-aes-chcr",
3888                                .cra_blocksize  = 1,
3889                                .cra_priority = CHCR_AEAD_PRIORITY,
3890                                .cra_ctxsize =  sizeof(struct chcr_context) +
3891                                                sizeof(struct chcr_aead_ctx) +
3892                                                sizeof(struct chcr_gcm_ctx),
3893                        },
3894                        .ivsize = GCM_AES_IV_SIZE,
3895                        .maxauthsize = GHASH_DIGEST_SIZE,
3896                        .setkey = chcr_gcm_setkey,
3897                        .setauthsize = chcr_gcm_setauthsize,
3898                }
3899        },
3900        {
3901                .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
3902                .is_registered = 0,
3903                .alg.aead = {
3904                        .base = {
3905                                .cra_name = "rfc4106(gcm(aes))",
3906                                .cra_driver_name = "rfc4106-gcm-aes-chcr",
3907                                .cra_blocksize   = 1,
3908                                .cra_priority = CHCR_AEAD_PRIORITY + 1,
3909                                .cra_ctxsize =  sizeof(struct chcr_context) +
3910                                                sizeof(struct chcr_aead_ctx) +
3911                                                sizeof(struct chcr_gcm_ctx),
3912
3913                        },
3914                        .ivsize = GCM_RFC4106_IV_SIZE,
3915                        .maxauthsize    = GHASH_DIGEST_SIZE,
3916                        .setkey = chcr_gcm_setkey,
3917                        .setauthsize    = chcr_4106_4309_setauthsize,
3918                }
3919        },
3920        {
3921                .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
3922                .is_registered = 0,
3923                .alg.aead = {
3924                        .base = {
3925                                .cra_name = "ccm(aes)",
3926                                .cra_driver_name = "ccm-aes-chcr",
3927                                .cra_blocksize   = 1,
3928                                .cra_priority = CHCR_AEAD_PRIORITY,
3929                                .cra_ctxsize =  sizeof(struct chcr_context) +
3930                                                sizeof(struct chcr_aead_ctx),
3931
3932                        },
3933                        .ivsize = AES_BLOCK_SIZE,
3934                        .maxauthsize    = GHASH_DIGEST_SIZE,
3935                        .setkey = chcr_aead_ccm_setkey,
3936                        .setauthsize    = chcr_ccm_setauthsize,
3937                }
3938        },
3939        {
3940                .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
3941                .is_registered = 0,
3942                .alg.aead = {
3943                        .base = {
3944                                .cra_name = "rfc4309(ccm(aes))",
3945                                .cra_driver_name = "rfc4309-ccm-aes-chcr",
3946                                .cra_blocksize   = 1,
3947                                .cra_priority = CHCR_AEAD_PRIORITY + 1,
3948                                .cra_ctxsize =  sizeof(struct chcr_context) +
3949                                                sizeof(struct chcr_aead_ctx),
3950
3951                        },
3952                        .ivsize = 8,
3953                        .maxauthsize    = GHASH_DIGEST_SIZE,
3954                        .setkey = chcr_aead_rfc4309_setkey,
3955                        .setauthsize = chcr_4106_4309_setauthsize,
3956                }
3957        },
3958        {
3959                .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
3960                .is_registered = 0,
3961                .alg.aead = {
3962                        .base = {
3963                                .cra_name = "authenc(hmac(sha1),cbc(aes))",
3964                                .cra_driver_name =
3965                                        "authenc-hmac-sha1-cbc-aes-chcr",
3966                                .cra_blocksize   = AES_BLOCK_SIZE,
3967                                .cra_priority = CHCR_AEAD_PRIORITY,
3968                                .cra_ctxsize =  sizeof(struct chcr_context) +
3969                                                sizeof(struct chcr_aead_ctx) +
3970                                                sizeof(struct chcr_authenc_ctx),
3971
3972                        },
3973                        .ivsize = AES_BLOCK_SIZE,
3974                        .maxauthsize = SHA1_DIGEST_SIZE,
3975                        .setkey = chcr_authenc_setkey,
3976                        .setauthsize = chcr_authenc_setauthsize,
3977                }
3978        },
3979        {
3980                .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
3981                .is_registered = 0,
3982                .alg.aead = {
3983                        .base = {
3984
3985                                .cra_name = "authenc(hmac(sha256),cbc(aes))",
3986                                .cra_driver_name =
3987                                        "authenc-hmac-sha256-cbc-aes-chcr",
3988                                .cra_blocksize   = AES_BLOCK_SIZE,
3989                                .cra_priority = CHCR_AEAD_PRIORITY,
3990                                .cra_ctxsize =  sizeof(struct chcr_context) +
3991                                                sizeof(struct chcr_aead_ctx) +
3992                                                sizeof(struct chcr_authenc_ctx),
3993
3994                        },
3995                        .ivsize = AES_BLOCK_SIZE,
3996                        .maxauthsize    = SHA256_DIGEST_SIZE,
3997                        .setkey = chcr_authenc_setkey,
3998                        .setauthsize = chcr_authenc_setauthsize,
3999                }
4000        },
4001        {
4002                .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4003                .is_registered = 0,
4004                .alg.aead = {
4005                        .base = {
4006                                .cra_name = "authenc(hmac(sha224),cbc(aes))",
4007                                .cra_driver_name =
4008                                        "authenc-hmac-sha224-cbc-aes-chcr",
4009                                .cra_blocksize   = AES_BLOCK_SIZE,
4010                                .cra_priority = CHCR_AEAD_PRIORITY,
4011                                .cra_ctxsize =  sizeof(struct chcr_context) +
4012                                                sizeof(struct chcr_aead_ctx) +
4013                                                sizeof(struct chcr_authenc_ctx),
4014                        },
4015                        .ivsize = AES_BLOCK_SIZE,
4016                        .maxauthsize = SHA224_DIGEST_SIZE,
4017                        .setkey = chcr_authenc_setkey,
4018                        .setauthsize = chcr_authenc_setauthsize,
4019                }
4020        },
4021        {
4022                .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4023                .is_registered = 0,
4024                .alg.aead = {
4025                        .base = {
4026                                .cra_name = "authenc(hmac(sha384),cbc(aes))",
4027                                .cra_driver_name =
4028                                        "authenc-hmac-sha384-cbc-aes-chcr",
4029                                .cra_blocksize   = AES_BLOCK_SIZE,
4030                                .cra_priority = CHCR_AEAD_PRIORITY,
4031                                .cra_ctxsize =  sizeof(struct chcr_context) +
4032                                                sizeof(struct chcr_aead_ctx) +
4033                                                sizeof(struct chcr_authenc_ctx),
4034
4035                        },
4036                        .ivsize = AES_BLOCK_SIZE,
4037                        .maxauthsize = SHA384_DIGEST_SIZE,
4038                        .setkey = chcr_authenc_setkey,
4039                        .setauthsize = chcr_authenc_setauthsize,
4040                }
4041        },
4042        {
4043                .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4044                .is_registered = 0,
4045                .alg.aead = {
4046                        .base = {
4047                                .cra_name = "authenc(hmac(sha512),cbc(aes))",
4048                                .cra_driver_name =
4049                                        "authenc-hmac-sha512-cbc-aes-chcr",
4050                                .cra_blocksize   = AES_BLOCK_SIZE,
4051                                .cra_priority = CHCR_AEAD_PRIORITY,
4052                                .cra_ctxsize =  sizeof(struct chcr_context) +
4053                                                sizeof(struct chcr_aead_ctx) +
4054                                                sizeof(struct chcr_authenc_ctx),
4055
4056                        },
4057                        .ivsize = AES_BLOCK_SIZE,
4058                        .maxauthsize = SHA512_DIGEST_SIZE,
4059                        .setkey = chcr_authenc_setkey,
4060                        .setauthsize = chcr_authenc_setauthsize,
4061                }
4062        },
4063        {
4064                .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
4065                .is_registered = 0,
4066                .alg.aead = {
4067                        .base = {
4068                                .cra_name = "authenc(digest_null,cbc(aes))",
4069                                .cra_driver_name =
4070                                        "authenc-digest_null-cbc-aes-chcr",
4071                                .cra_blocksize   = AES_BLOCK_SIZE,
4072                                .cra_priority = CHCR_AEAD_PRIORITY,
4073                                .cra_ctxsize =  sizeof(struct chcr_context) +
4074                                                sizeof(struct chcr_aead_ctx) +
4075                                                sizeof(struct chcr_authenc_ctx),
4076
4077                        },
4078                        .ivsize  = AES_BLOCK_SIZE,
4079                        .maxauthsize = 0,
4080                        .setkey  = chcr_aead_digest_null_setkey,
4081                        .setauthsize = chcr_authenc_null_setauthsize,
4082                }
4083        },
4084        {
4085                .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4086                .is_registered = 0,
4087                .alg.aead = {
4088                        .base = {
4089                                .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4090                                .cra_driver_name =
4091                                "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4092                                .cra_blocksize   = 1,
4093                                .cra_priority = CHCR_AEAD_PRIORITY,
4094                                .cra_ctxsize =  sizeof(struct chcr_context) +
4095                                                sizeof(struct chcr_aead_ctx) +
4096                                                sizeof(struct chcr_authenc_ctx),
4097
4098                        },
4099                        .ivsize = CTR_RFC3686_IV_SIZE,
4100                        .maxauthsize = SHA1_DIGEST_SIZE,
4101                        .setkey = chcr_authenc_setkey,
4102                        .setauthsize = chcr_authenc_setauthsize,
4103                }
4104        },
4105        {
4106                .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4107                .is_registered = 0,
4108                .alg.aead = {
4109                        .base = {
4110
4111                                .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4112                                .cra_driver_name =
4113                                "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4114                                .cra_blocksize   = 1,
4115                                .cra_priority = CHCR_AEAD_PRIORITY,
4116                                .cra_ctxsize =  sizeof(struct chcr_context) +
4117                                                sizeof(struct chcr_aead_ctx) +
4118                                                sizeof(struct chcr_authenc_ctx),
4119
4120                        },
4121                        .ivsize = CTR_RFC3686_IV_SIZE,
4122                        .maxauthsize    = SHA256_DIGEST_SIZE,
4123                        .setkey = chcr_authenc_setkey,
4124                        .setauthsize = chcr_authenc_setauthsize,
4125                }
4126        },
4127        {
4128                .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4129                .is_registered = 0,
4130                .alg.aead = {
4131                        .base = {
4132                                .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4133                                .cra_driver_name =
4134                                "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4135                                .cra_blocksize   = 1,
4136                                .cra_priority = CHCR_AEAD_PRIORITY,
4137                                .cra_ctxsize =  sizeof(struct chcr_context) +
4138                                                sizeof(struct chcr_aead_ctx) +
4139                                                sizeof(struct chcr_authenc_ctx),
4140                        },
4141                        .ivsize = CTR_RFC3686_IV_SIZE,
4142                        .maxauthsize = SHA224_DIGEST_SIZE,
4143                        .setkey = chcr_authenc_setkey,
4144                        .setauthsize = chcr_authenc_setauthsize,
4145                }
4146        },
4147        {
4148                .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4149                .is_registered = 0,
4150                .alg.aead = {
4151                        .base = {
4152                                .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4153                                .cra_driver_name =
4154                                "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4155                                .cra_blocksize   = 1,
4156                                .cra_priority = CHCR_AEAD_PRIORITY,
4157                                .cra_ctxsize =  sizeof(struct chcr_context) +
4158                                                sizeof(struct chcr_aead_ctx) +
4159                                                sizeof(struct chcr_authenc_ctx),
4160
4161                        },
4162                        .ivsize = CTR_RFC3686_IV_SIZE,
4163                        .maxauthsize = SHA384_DIGEST_SIZE,
4164                        .setkey = chcr_authenc_setkey,
4165                        .setauthsize = chcr_authenc_setauthsize,
4166                }
4167        },
4168        {
4169                .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4170                .is_registered = 0,
4171                .alg.aead = {
4172                        .base = {
4173                                .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4174                                .cra_driver_name =
4175                                "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4176                                .cra_blocksize   = 1,
4177                                .cra_priority = CHCR_AEAD_PRIORITY,
4178                                .cra_ctxsize =  sizeof(struct chcr_context) +
4179                                                sizeof(struct chcr_aead_ctx) +
4180                                                sizeof(struct chcr_authenc_ctx),
4181
4182                        },
4183                        .ivsize = CTR_RFC3686_IV_SIZE,
4184                        .maxauthsize = SHA512_DIGEST_SIZE,
4185                        .setkey = chcr_authenc_setkey,
4186                        .setauthsize = chcr_authenc_setauthsize,
4187                }
4188        },
4189        {
4190                .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4191                .is_registered = 0,
4192                .alg.aead = {
4193                        .base = {
4194                                .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4195                                .cra_driver_name =
4196                                "authenc-digest_null-rfc3686-ctr-aes-chcr",
4197                                .cra_blocksize   = 1,
4198                                .cra_priority = CHCR_AEAD_PRIORITY,
4199                                .cra_ctxsize =  sizeof(struct chcr_context) +
4200                                                sizeof(struct chcr_aead_ctx) +
4201                                                sizeof(struct chcr_authenc_ctx),
4202
4203                        },
4204                        .ivsize  = CTR_RFC3686_IV_SIZE,
4205                        .maxauthsize = 0,
4206                        .setkey  = chcr_aead_digest_null_setkey,
4207                        .setauthsize = chcr_authenc_null_setauthsize,
4208                }
4209        },
4210
4211};
4212
4213/*
4214 *      chcr_unregister_alg - Deregister crypto algorithms with
4215 *      kernel framework.
4216 */
4217static int chcr_unregister_alg(void)
4218{
4219        int i;
4220
4221        for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4222                switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4223                case CRYPTO_ALG_TYPE_ABLKCIPHER:
4224                        if (driver_algs[i].is_registered)
4225                                crypto_unregister_alg(
4226                                                &driver_algs[i].alg.crypto);
4227                        break;
4228                case CRYPTO_ALG_TYPE_AEAD:
4229                        if (driver_algs[i].is_registered)
4230                                crypto_unregister_aead(
4231                                                &driver_algs[i].alg.aead);
4232                        break;
4233                case CRYPTO_ALG_TYPE_AHASH:
4234                        if (driver_algs[i].is_registered)
4235                                crypto_unregister_ahash(
4236                                                &driver_algs[i].alg.hash);
4237                        break;
4238                }
4239                driver_algs[i].is_registered = 0;
4240        }
4241        return 0;
4242}
4243
4244#define SZ_AHASH_CTX sizeof(struct chcr_context)
4245#define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4246#define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4247#define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC)
4248
4249/*
4250 *      chcr_register_alg - Register crypto algorithms with kernel framework.
4251 */
4252static int chcr_register_alg(void)
4253{
4254        struct crypto_alg ai;
4255        struct ahash_alg *a_hash;
4256        int err = 0, i;
4257        char *name = NULL;
4258
4259        for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4260                if (driver_algs[i].is_registered)
4261                        continue;
4262                switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4263                case CRYPTO_ALG_TYPE_ABLKCIPHER:
4264                        driver_algs[i].alg.crypto.cra_priority =
4265                                CHCR_CRA_PRIORITY;
4266                        driver_algs[i].alg.crypto.cra_module = THIS_MODULE;
4267                        driver_algs[i].alg.crypto.cra_flags =
4268                                CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
4269                                CRYPTO_ALG_NEED_FALLBACK;
4270                        driver_algs[i].alg.crypto.cra_ctxsize =
4271                                sizeof(struct chcr_context) +
4272                                sizeof(struct ablk_ctx);
4273                        driver_algs[i].alg.crypto.cra_alignmask = 0;
4274                        driver_algs[i].alg.crypto.cra_type =
4275                                &crypto_ablkcipher_type;
4276                        err = crypto_register_alg(&driver_algs[i].alg.crypto);
4277                        name = driver_algs[i].alg.crypto.cra_driver_name;
4278                        break;
4279                case CRYPTO_ALG_TYPE_AEAD:
4280                        driver_algs[i].alg.aead.base.cra_flags =
4281                                CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
4282                                CRYPTO_ALG_NEED_FALLBACK;
4283                        driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4284                        driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4285                        driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4286                        driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4287                        driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4288                        err = crypto_register_aead(&driver_algs[i].alg.aead);
4289                        name = driver_algs[i].alg.aead.base.cra_driver_name;
4290                        break;
4291                case CRYPTO_ALG_TYPE_AHASH:
4292                        a_hash = &driver_algs[i].alg.hash;
4293                        a_hash->update = chcr_ahash_update;
4294                        a_hash->final = chcr_ahash_final;
4295                        a_hash->finup = chcr_ahash_finup;
4296                        a_hash->digest = chcr_ahash_digest;
4297                        a_hash->export = chcr_ahash_export;
4298                        a_hash->import = chcr_ahash_import;
4299                        a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4300                        a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4301                        a_hash->halg.base.cra_module = THIS_MODULE;
4302                        a_hash->halg.base.cra_flags = AHASH_CRA_FLAGS;
4303                        a_hash->halg.base.cra_alignmask = 0;
4304                        a_hash->halg.base.cra_exit = NULL;
4305                        a_hash->halg.base.cra_type = &crypto_ahash_type;
4306
4307                        if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4308                                a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4309                                a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4310                                a_hash->init = chcr_hmac_init;
4311                                a_hash->setkey = chcr_ahash_setkey;
4312                                a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4313                        } else {
4314                                a_hash->init = chcr_sha_init;
4315                                a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4316                                a_hash->halg.base.cra_init = chcr_sha_cra_init;
4317                        }
4318                        err = crypto_register_ahash(&driver_algs[i].alg.hash);
4319                        ai = driver_algs[i].alg.hash.halg.base;
4320                        name = ai.cra_driver_name;
4321                        break;
4322                }
4323                if (err) {
4324                        pr_err("chcr : %s : Algorithm registration failed\n",
4325                               name);
4326                        goto register_err;
4327                } else {
4328                        driver_algs[i].is_registered = 1;
4329                }
4330        }
4331        return 0;
4332
4333register_err:
4334        chcr_unregister_alg();
4335        return err;
4336}
4337
4338/*
4339 *      start_crypto - Register the crypto algorithms.
4340 *      This should called once when the first device comesup. After this
4341 *      kernel will start calling driver APIs for crypto operations.
4342 */
4343int start_crypto(void)
4344{
4345        return chcr_register_alg();
4346}
4347
4348/*
4349 *      stop_crypto - Deregister all the crypto algorithms with kernel.
4350 *      This should be called once when the last device goes down. After this
4351 *      kernel will not call the driver API for crypto operations.
4352 */
4353int stop_crypto(void)
4354{
4355        chcr_unregister_alg();
4356        return 0;
4357}
4358