linux/drivers/crypto/chelsio/chcr_algo.c
<<
>>
Prefs
   1/*
   2 * This file is part of the Chelsio T6 Crypto driver for Linux.
   3 *
   4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 *
  34 * Written and Maintained by:
  35 *      Manoj Malviya (manojmalviya@chelsio.com)
  36 *      Atul Gupta (atul.gupta@chelsio.com)
  37 *      Jitendra Lulla (jlulla@chelsio.com)
  38 *      Yeshaswi M R Gowda (yeshaswi@chelsio.com)
  39 *      Harsh Jain (harsh@chelsio.com)
  40 */
  41
  42#define pr_fmt(fmt) "chcr:" fmt
  43
  44#include <linux/kernel.h>
  45#include <linux/module.h>
  46#include <linux/crypto.h>
  47#include <linux/skbuff.h>
  48#include <linux/rtnetlink.h>
  49#include <linux/highmem.h>
  50#include <linux/scatterlist.h>
  51
  52#include <crypto/aes.h>
  53#include <crypto/algapi.h>
  54#include <crypto/hash.h>
  55#include <crypto/gcm.h>
  56#include <crypto/sha.h>
  57#include <crypto/authenc.h>
  58#include <crypto/ctr.h>
  59#include <crypto/gf128mul.h>
  60#include <crypto/internal/aead.h>
  61#include <crypto/null.h>
  62#include <crypto/internal/skcipher.h>
  63#include <crypto/aead.h>
  64#include <crypto/scatterwalk.h>
  65#include <crypto/internal/hash.h>
  66
  67#include "t4fw_api.h"
  68#include "t4_msg.h"
  69#include "chcr_core.h"
  70#include "chcr_algo.h"
  71#include "chcr_crypto.h"
  72
  73#define IV AES_BLOCK_SIZE
  74
  75static unsigned int sgl_ent_len[] = {
  76        0, 0, 16, 24, 40, 48, 64, 72, 88,
  77        96, 112, 120, 136, 144, 160, 168, 184,
  78        192, 208, 216, 232, 240, 256, 264, 280,
  79        288, 304, 312, 328, 336, 352, 360, 376
  80};
  81
  82static unsigned int dsgl_ent_len[] = {
  83        0, 32, 32, 48, 48, 64, 64, 80, 80,
  84        112, 112, 128, 128, 144, 144, 160, 160,
  85        192, 192, 208, 208, 224, 224, 240, 240,
  86        272, 272, 288, 288, 304, 304, 320, 320
  87};
  88
  89static u32 round_constant[11] = {
  90        0x01000000, 0x02000000, 0x04000000, 0x08000000,
  91        0x10000000, 0x20000000, 0x40000000, 0x80000000,
  92        0x1B000000, 0x36000000, 0x6C000000
  93};
  94
  95static int chcr_handle_cipher_resp(struct skcipher_request *req,
  96                                   unsigned char *input, int err);
  97
  98static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
  99{
 100        return ctx->crypto_ctx->aeadctx;
 101}
 102
 103static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
 104{
 105        return ctx->crypto_ctx->ablkctx;
 106}
 107
 108static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
 109{
 110        return ctx->crypto_ctx->hmacctx;
 111}
 112
 113static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
 114{
 115        return gctx->ctx->gcm;
 116}
 117
 118static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
 119{
 120        return gctx->ctx->authenc;
 121}
 122
 123static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
 124{
 125        return container_of(ctx->dev, struct uld_ctx, dev);
 126}
 127
 128static inline int is_ofld_imm(const struct sk_buff *skb)
 129{
 130        return (skb->len <= SGE_MAX_WR_LEN);
 131}
 132
 133static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
 134{
 135        memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
 136}
 137
 138static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
 139                         unsigned int entlen,
 140                         unsigned int skip)
 141{
 142        int nents = 0;
 143        unsigned int less;
 144        unsigned int skip_len = 0;
 145
 146        while (sg && skip) {
 147                if (sg_dma_len(sg) <= skip) {
 148                        skip -= sg_dma_len(sg);
 149                        skip_len = 0;
 150                        sg = sg_next(sg);
 151                } else {
 152                        skip_len = skip;
 153                        skip = 0;
 154                }
 155        }
 156
 157        while (sg && reqlen) {
 158                less = min(reqlen, sg_dma_len(sg) - skip_len);
 159                nents += DIV_ROUND_UP(less, entlen);
 160                reqlen -= less;
 161                skip_len = 0;
 162                sg = sg_next(sg);
 163        }
 164        return nents;
 165}
 166
 167static inline int get_aead_subtype(struct crypto_aead *aead)
 168{
 169        struct aead_alg *alg = crypto_aead_alg(aead);
 170        struct chcr_alg_template *chcr_crypto_alg =
 171                container_of(alg, struct chcr_alg_template, alg.aead);
 172        return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
 173}
 174
 175void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
 176{
 177        u8 temp[SHA512_DIGEST_SIZE];
 178        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 179        int authsize = crypto_aead_authsize(tfm);
 180        struct cpl_fw6_pld *fw6_pld;
 181        int cmp = 0;
 182
 183        fw6_pld = (struct cpl_fw6_pld *)input;
 184        if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
 185            (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
 186                cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
 187        } else {
 188
 189                sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
 190                                authsize, req->assoclen +
 191                                req->cryptlen - authsize);
 192                cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
 193        }
 194        if (cmp)
 195                *err = -EBADMSG;
 196        else
 197                *err = 0;
 198}
 199
 200static int chcr_inc_wrcount(struct chcr_dev *dev)
 201{
 202        if (dev->state == CHCR_DETACH)
 203                return 1;
 204        atomic_inc(&dev->inflight);
 205        return 0;
 206}
 207
 208static inline void chcr_dec_wrcount(struct chcr_dev *dev)
 209{
 210        atomic_dec(&dev->inflight);
 211}
 212
 213static inline int chcr_handle_aead_resp(struct aead_request *req,
 214                                         unsigned char *input,
 215                                         int err)
 216{
 217        struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
 218        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 219        struct chcr_dev *dev = a_ctx(tfm)->dev;
 220
 221        chcr_aead_common_exit(req);
 222        if (reqctx->verify == VERIFY_SW) {
 223                chcr_verify_tag(req, input, &err);
 224                reqctx->verify = VERIFY_HW;
 225        }
 226        chcr_dec_wrcount(dev);
 227        req->base.complete(&req->base, err);
 228
 229        return err;
 230}
 231
 232static void get_aes_decrypt_key(unsigned char *dec_key,
 233                                       const unsigned char *key,
 234                                       unsigned int keylength)
 235{
 236        u32 temp;
 237        u32 w_ring[MAX_NK];
 238        int i, j, k;
 239        u8  nr, nk;
 240
 241        switch (keylength) {
 242        case AES_KEYLENGTH_128BIT:
 243                nk = KEYLENGTH_4BYTES;
 244                nr = NUMBER_OF_ROUNDS_10;
 245                break;
 246        case AES_KEYLENGTH_192BIT:
 247                nk = KEYLENGTH_6BYTES;
 248                nr = NUMBER_OF_ROUNDS_12;
 249                break;
 250        case AES_KEYLENGTH_256BIT:
 251                nk = KEYLENGTH_8BYTES;
 252                nr = NUMBER_OF_ROUNDS_14;
 253                break;
 254        default:
 255                return;
 256        }
 257        for (i = 0; i < nk; i++)
 258                w_ring[i] = get_unaligned_be32(&key[i * 4]);
 259
 260        i = 0;
 261        temp = w_ring[nk - 1];
 262        while (i + nk < (nr + 1) * 4) {
 263                if (!(i % nk)) {
 264                        /* RotWord(temp) */
 265                        temp = (temp << 8) | (temp >> 24);
 266                        temp = aes_ks_subword(temp);
 267                        temp ^= round_constant[i / nk];
 268                } else if (nk == 8 && (i % 4 == 0)) {
 269                        temp = aes_ks_subword(temp);
 270                }
 271                w_ring[i % nk] ^= temp;
 272                temp = w_ring[i % nk];
 273                i++;
 274        }
 275        i--;
 276        for (k = 0, j = i % nk; k < nk; k++) {
 277                put_unaligned_be32(w_ring[j], &dec_key[k * 4]);
 278                j--;
 279                if (j < 0)
 280                        j += nk;
 281        }
 282}
 283
 284static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
 285{
 286        struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
 287
 288        switch (ds) {
 289        case SHA1_DIGEST_SIZE:
 290                base_hash = crypto_alloc_shash("sha1", 0, 0);
 291                break;
 292        case SHA224_DIGEST_SIZE:
 293                base_hash = crypto_alloc_shash("sha224", 0, 0);
 294                break;
 295        case SHA256_DIGEST_SIZE:
 296                base_hash = crypto_alloc_shash("sha256", 0, 0);
 297                break;
 298        case SHA384_DIGEST_SIZE:
 299                base_hash = crypto_alloc_shash("sha384", 0, 0);
 300                break;
 301        case SHA512_DIGEST_SIZE:
 302                base_hash = crypto_alloc_shash("sha512", 0, 0);
 303                break;
 304        }
 305
 306        return base_hash;
 307}
 308
 309static int chcr_compute_partial_hash(struct shash_desc *desc,
 310                                     char *iopad, char *result_hash,
 311                                     int digest_size)
 312{
 313        struct sha1_state sha1_st;
 314        struct sha256_state sha256_st;
 315        struct sha512_state sha512_st;
 316        int error;
 317
 318        if (digest_size == SHA1_DIGEST_SIZE) {
 319                error = crypto_shash_init(desc) ?:
 320                        crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
 321                        crypto_shash_export(desc, (void *)&sha1_st);
 322                memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
 323        } else if (digest_size == SHA224_DIGEST_SIZE) {
 324                error = crypto_shash_init(desc) ?:
 325                        crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
 326                        crypto_shash_export(desc, (void *)&sha256_st);
 327                memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
 328
 329        } else if (digest_size == SHA256_DIGEST_SIZE) {
 330                error = crypto_shash_init(desc) ?:
 331                        crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
 332                        crypto_shash_export(desc, (void *)&sha256_st);
 333                memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
 334
 335        } else if (digest_size == SHA384_DIGEST_SIZE) {
 336                error = crypto_shash_init(desc) ?:
 337                        crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
 338                        crypto_shash_export(desc, (void *)&sha512_st);
 339                memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
 340
 341        } else if (digest_size == SHA512_DIGEST_SIZE) {
 342                error = crypto_shash_init(desc) ?:
 343                        crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
 344                        crypto_shash_export(desc, (void *)&sha512_st);
 345                memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
 346        } else {
 347                error = -EINVAL;
 348                pr_err("Unknown digest size %d\n", digest_size);
 349        }
 350        return error;
 351}
 352
 353static void chcr_change_order(char *buf, int ds)
 354{
 355        int i;
 356
 357        if (ds == SHA512_DIGEST_SIZE) {
 358                for (i = 0; i < (ds / sizeof(u64)); i++)
 359                        *((__be64 *)buf + i) =
 360                                cpu_to_be64(*((u64 *)buf + i));
 361        } else {
 362                for (i = 0; i < (ds / sizeof(u32)); i++)
 363                        *((__be32 *)buf + i) =
 364                                cpu_to_be32(*((u32 *)buf + i));
 365        }
 366}
 367
 368static inline int is_hmac(struct crypto_tfm *tfm)
 369{
 370        struct crypto_alg *alg = tfm->__crt_alg;
 371        struct chcr_alg_template *chcr_crypto_alg =
 372                container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
 373                             alg.hash);
 374        if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
 375                return 1;
 376        return 0;
 377}
 378
 379static inline void dsgl_walk_init(struct dsgl_walk *walk,
 380                                   struct cpl_rx_phys_dsgl *dsgl)
 381{
 382        walk->dsgl = dsgl;
 383        walk->nents = 0;
 384        walk->to = (struct phys_sge_pairs *)(dsgl + 1);
 385}
 386
 387static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
 388                                 int pci_chan_id)
 389{
 390        struct cpl_rx_phys_dsgl *phys_cpl;
 391
 392        phys_cpl = walk->dsgl;
 393
 394        phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
 395                                    | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
 396        phys_cpl->pcirlxorder_to_noofsgentr =
 397                htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
 398                      CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
 399                      CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
 400                      CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
 401                      CPL_RX_PHYS_DSGL_DCAID_V(0) |
 402                      CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
 403        phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
 404        phys_cpl->rss_hdr_int.qid = htons(qid);
 405        phys_cpl->rss_hdr_int.hash_val = 0;
 406        phys_cpl->rss_hdr_int.channel = pci_chan_id;
 407}
 408
 409static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
 410                                        size_t size,
 411                                        dma_addr_t addr)
 412{
 413        int j;
 414
 415        if (!size)
 416                return;
 417        j = walk->nents;
 418        walk->to->len[j % 8] = htons(size);
 419        walk->to->addr[j % 8] = cpu_to_be64(addr);
 420        j++;
 421        if ((j % 8) == 0)
 422                walk->to++;
 423        walk->nents = j;
 424}
 425
 426static void  dsgl_walk_add_sg(struct dsgl_walk *walk,
 427                           struct scatterlist *sg,
 428                              unsigned int slen,
 429                              unsigned int skip)
 430{
 431        int skip_len = 0;
 432        unsigned int left_size = slen, len = 0;
 433        unsigned int j = walk->nents;
 434        int offset, ent_len;
 435
 436        if (!slen)
 437                return;
 438        while (sg && skip) {
 439                if (sg_dma_len(sg) <= skip) {
 440                        skip -= sg_dma_len(sg);
 441                        skip_len = 0;
 442                        sg = sg_next(sg);
 443                } else {
 444                        skip_len = skip;
 445                        skip = 0;
 446                }
 447        }
 448
 449        while (left_size && sg) {
 450                len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
 451                offset = 0;
 452                while (len) {
 453                        ent_len =  min_t(u32, len, CHCR_DST_SG_SIZE);
 454                        walk->to->len[j % 8] = htons(ent_len);
 455                        walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
 456                                                      offset + skip_len);
 457                        offset += ent_len;
 458                        len -= ent_len;
 459                        j++;
 460                        if ((j % 8) == 0)
 461                                walk->to++;
 462                }
 463                walk->last_sg = sg;
 464                walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
 465                                          skip_len) + skip_len;
 466                left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
 467                skip_len = 0;
 468                sg = sg_next(sg);
 469        }
 470        walk->nents = j;
 471}
 472
 473static inline void ulptx_walk_init(struct ulptx_walk *walk,
 474                                   struct ulptx_sgl *ulp)
 475{
 476        walk->sgl = ulp;
 477        walk->nents = 0;
 478        walk->pair_idx = 0;
 479        walk->pair = ulp->sge;
 480        walk->last_sg = NULL;
 481        walk->last_sg_len = 0;
 482}
 483
 484static inline void ulptx_walk_end(struct ulptx_walk *walk)
 485{
 486        walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
 487                              ULPTX_NSGE_V(walk->nents));
 488}
 489
 490
 491static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
 492                                        size_t size,
 493                                        dma_addr_t addr)
 494{
 495        if (!size)
 496                return;
 497
 498        if (walk->nents == 0) {
 499                walk->sgl->len0 = cpu_to_be32(size);
 500                walk->sgl->addr0 = cpu_to_be64(addr);
 501        } else {
 502                walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
 503                walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
 504                walk->pair_idx = !walk->pair_idx;
 505                if (!walk->pair_idx)
 506                        walk->pair++;
 507        }
 508        walk->nents++;
 509}
 510
 511static void  ulptx_walk_add_sg(struct ulptx_walk *walk,
 512                                        struct scatterlist *sg,
 513                               unsigned int len,
 514                               unsigned int skip)
 515{
 516        int small;
 517        int skip_len = 0;
 518        unsigned int sgmin;
 519
 520        if (!len)
 521                return;
 522        while (sg && skip) {
 523                if (sg_dma_len(sg) <= skip) {
 524                        skip -= sg_dma_len(sg);
 525                        skip_len = 0;
 526                        sg = sg_next(sg);
 527                } else {
 528                        skip_len = skip;
 529                        skip = 0;
 530                }
 531        }
 532        WARN(!sg, "SG should not be null here\n");
 533        if (sg && (walk->nents == 0)) {
 534                small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
 535                sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
 536                walk->sgl->len0 = cpu_to_be32(sgmin);
 537                walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
 538                walk->nents++;
 539                len -= sgmin;
 540                walk->last_sg = sg;
 541                walk->last_sg_len = sgmin + skip_len;
 542                skip_len += sgmin;
 543                if (sg_dma_len(sg) == skip_len) {
 544                        sg = sg_next(sg);
 545                        skip_len = 0;
 546                }
 547        }
 548
 549        while (sg && len) {
 550                small = min(sg_dma_len(sg) - skip_len, len);
 551                sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
 552                walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
 553                walk->pair->addr[walk->pair_idx] =
 554                        cpu_to_be64(sg_dma_address(sg) + skip_len);
 555                walk->pair_idx = !walk->pair_idx;
 556                walk->nents++;
 557                if (!walk->pair_idx)
 558                        walk->pair++;
 559                len -= sgmin;
 560                skip_len += sgmin;
 561                walk->last_sg = sg;
 562                walk->last_sg_len = skip_len;
 563                if (sg_dma_len(sg) == skip_len) {
 564                        sg = sg_next(sg);
 565                        skip_len = 0;
 566                }
 567        }
 568}
 569
 570static inline int get_cryptoalg_subtype(struct crypto_skcipher *tfm)
 571{
 572        struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
 573        struct chcr_alg_template *chcr_crypto_alg =
 574                container_of(alg, struct chcr_alg_template, alg.skcipher);
 575
 576        return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
 577}
 578
 579static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
 580{
 581        struct adapter *adap = netdev2adap(dev);
 582        struct sge_uld_txq_info *txq_info =
 583                adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
 584        struct sge_uld_txq *txq;
 585        int ret = 0;
 586
 587        local_bh_disable();
 588        txq = &txq_info->uldtxq[idx];
 589        spin_lock(&txq->sendq.lock);
 590        if (txq->full)
 591                ret = -1;
 592        spin_unlock(&txq->sendq.lock);
 593        local_bh_enable();
 594        return ret;
 595}
 596
 597static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
 598                               struct _key_ctx *key_ctx)
 599{
 600        if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
 601                memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
 602        } else {
 603                memcpy(key_ctx->key,
 604                       ablkctx->key + (ablkctx->enckey_len >> 1),
 605                       ablkctx->enckey_len >> 1);
 606                memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
 607                       ablkctx->rrkey, ablkctx->enckey_len >> 1);
 608        }
 609        return 0;
 610}
 611
 612static int chcr_hash_ent_in_wr(struct scatterlist *src,
 613                             unsigned int minsg,
 614                             unsigned int space,
 615                             unsigned int srcskip)
 616{
 617        int srclen = 0;
 618        int srcsg = minsg;
 619        int soffset = 0, sless;
 620
 621        if (sg_dma_len(src) == srcskip) {
 622                src = sg_next(src);
 623                srcskip = 0;
 624        }
 625        while (src && space > (sgl_ent_len[srcsg + 1])) {
 626                sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip,
 627                                                        CHCR_SRC_SG_SIZE);
 628                srclen += sless;
 629                soffset += sless;
 630                srcsg++;
 631                if (sg_dma_len(src) == (soffset + srcskip)) {
 632                        src = sg_next(src);
 633                        soffset = 0;
 634                        srcskip = 0;
 635                }
 636        }
 637        return srclen;
 638}
 639
 640static int chcr_sg_ent_in_wr(struct scatterlist *src,
 641                             struct scatterlist *dst,
 642                             unsigned int minsg,
 643                             unsigned int space,
 644                             unsigned int srcskip,
 645                             unsigned int dstskip)
 646{
 647        int srclen = 0, dstlen = 0;
 648        int srcsg = minsg, dstsg = minsg;
 649        int offset = 0, soffset = 0, less, sless = 0;
 650
 651        if (sg_dma_len(src) == srcskip) {
 652                src = sg_next(src);
 653                srcskip = 0;
 654        }
 655        if (sg_dma_len(dst) == dstskip) {
 656                dst = sg_next(dst);
 657                dstskip = 0;
 658        }
 659
 660        while (src && dst &&
 661               space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
 662                sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
 663                                CHCR_SRC_SG_SIZE);
 664                srclen += sless;
 665                srcsg++;
 666                offset = 0;
 667                while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
 668                       space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
 669                        if (srclen <= dstlen)
 670                                break;
 671                        less = min_t(unsigned int, sg_dma_len(dst) - offset -
 672                                     dstskip, CHCR_DST_SG_SIZE);
 673                        dstlen += less;
 674                        offset += less;
 675                        if ((offset + dstskip) == sg_dma_len(dst)) {
 676                                dst = sg_next(dst);
 677                                offset = 0;
 678                        }
 679                        dstsg++;
 680                        dstskip = 0;
 681                }
 682                soffset += sless;
 683                if ((soffset + srcskip) == sg_dma_len(src)) {
 684                        src = sg_next(src);
 685                        srcskip = 0;
 686                        soffset = 0;
 687                }
 688
 689        }
 690        return min(srclen, dstlen);
 691}
 692
 693static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
 694                                struct skcipher_request *req,
 695                                u8 *iv,
 696                                unsigned short op_type)
 697{
 698        struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
 699        int err;
 700
 701        skcipher_request_set_tfm(&reqctx->fallback_req, cipher);
 702        skcipher_request_set_callback(&reqctx->fallback_req, req->base.flags,
 703                                      req->base.complete, req->base.data);
 704        skcipher_request_set_crypt(&reqctx->fallback_req, req->src, req->dst,
 705                                   req->cryptlen, iv);
 706
 707        err = op_type ? crypto_skcipher_decrypt(&reqctx->fallback_req) :
 708                        crypto_skcipher_encrypt(&reqctx->fallback_req);
 709
 710        return err;
 711
 712}
 713
 714static inline int get_qidxs(struct crypto_async_request *req,
 715                            unsigned int *txqidx, unsigned int *rxqidx)
 716{
 717        struct crypto_tfm *tfm = req->tfm;
 718        int ret = 0;
 719
 720        switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
 721        case CRYPTO_ALG_TYPE_AEAD:
 722        {
 723                struct aead_request *aead_req =
 724                        container_of(req, struct aead_request, base);
 725                struct chcr_aead_reqctx *reqctx = aead_request_ctx(aead_req);
 726                *txqidx = reqctx->txqidx;
 727                *rxqidx = reqctx->rxqidx;
 728                break;
 729        }
 730        case CRYPTO_ALG_TYPE_SKCIPHER:
 731        {
 732                struct skcipher_request *sk_req =
 733                        container_of(req, struct skcipher_request, base);
 734                struct chcr_skcipher_req_ctx *reqctx =
 735                        skcipher_request_ctx(sk_req);
 736                *txqidx = reqctx->txqidx;
 737                *rxqidx = reqctx->rxqidx;
 738                break;
 739        }
 740        case CRYPTO_ALG_TYPE_AHASH:
 741        {
 742                struct ahash_request *ahash_req =
 743                        container_of(req, struct ahash_request, base);
 744                struct chcr_ahash_req_ctx *reqctx =
 745                        ahash_request_ctx(ahash_req);
 746                *txqidx = reqctx->txqidx;
 747                *rxqidx = reqctx->rxqidx;
 748                break;
 749        }
 750        default:
 751                ret = -EINVAL;
 752                /* should never get here */
 753                BUG();
 754                break;
 755        }
 756        return ret;
 757}
 758
 759static inline void create_wreq(struct chcr_context *ctx,
 760                               struct chcr_wr *chcr_req,
 761                               struct crypto_async_request *req,
 762                               unsigned int imm,
 763                               int hash_sz,
 764                               unsigned int len16,
 765                               unsigned int sc_len,
 766                               unsigned int lcb)
 767{
 768        struct uld_ctx *u_ctx = ULD_CTX(ctx);
 769        unsigned int tx_channel_id, rx_channel_id;
 770        unsigned int txqidx = 0, rxqidx = 0;
 771        unsigned int qid, fid;
 772
 773        get_qidxs(req, &txqidx, &rxqidx);
 774        qid = u_ctx->lldi.rxq_ids[rxqidx];
 775        fid = u_ctx->lldi.rxq_ids[0];
 776        tx_channel_id = txqidx / ctx->txq_perchan;
 777        rx_channel_id = rxqidx / ctx->rxq_perchan;
 778
 779
 780        chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
 781        chcr_req->wreq.pld_size_hash_size =
 782                htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
 783        chcr_req->wreq.len16_pkd =
 784                htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
 785        chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
 786        chcr_req->wreq.rx_chid_to_rx_q_id = FILL_WR_RX_Q_ID(rx_channel_id, qid,
 787                                                            !!lcb, txqidx);
 788
 789        chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(tx_channel_id, fid);
 790        chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
 791                                ((sizeof(chcr_req->wreq)) >> 4)));
 792        chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
 793        chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
 794                                           sizeof(chcr_req->key_ctx) + sc_len);
 795}
 796
 797/**
 798 *      create_cipher_wr - form the WR for cipher operations
 799 *      @req: cipher req.
 800 *      @ctx: crypto driver context of the request.
 801 *      @qid: ingress qid where response of this WR should be received.
 802 *      @op_type:       encryption or decryption
 803 */
 804static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
 805{
 806        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
 807        struct chcr_context *ctx = c_ctx(tfm);
 808        struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
 809        struct sk_buff *skb = NULL;
 810        struct chcr_wr *chcr_req;
 811        struct cpl_rx_phys_dsgl *phys_cpl;
 812        struct ulptx_sgl *ulptx;
 813        struct chcr_skcipher_req_ctx *reqctx =
 814                skcipher_request_ctx(wrparam->req);
 815        unsigned int temp = 0, transhdr_len, dst_size;
 816        int error;
 817        int nents;
 818        unsigned int kctx_len;
 819        gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
 820                        GFP_KERNEL : GFP_ATOMIC;
 821        struct adapter *adap = padap(ctx->dev);
 822        unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
 823
 824        nents = sg_nents_xlen(reqctx->dstsg,  wrparam->bytes, CHCR_DST_SG_SIZE,
 825                              reqctx->dst_ofst);
 826        dst_size = get_space_for_phys_dsgl(nents);
 827        kctx_len = roundup(ablkctx->enckey_len, 16);
 828        transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
 829        nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
 830                                  CHCR_SRC_SG_SIZE, reqctx->src_ofst);
 831        temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
 832                                     (sgl_len(nents) * 8);
 833        transhdr_len += temp;
 834        transhdr_len = roundup(transhdr_len, 16);
 835        skb = alloc_skb(SGE_MAX_WR_LEN, flags);
 836        if (!skb) {
 837                error = -ENOMEM;
 838                goto err;
 839        }
 840        chcr_req = __skb_put_zero(skb, transhdr_len);
 841        chcr_req->sec_cpl.op_ivinsrtofst =
 842                        FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
 843
 844        chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
 845        chcr_req->sec_cpl.aadstart_cipherstop_hi =
 846                        FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
 847
 848        chcr_req->sec_cpl.cipherstop_lo_authinsert =
 849                        FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
 850        chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
 851                                                         ablkctx->ciph_mode,
 852                                                         0, 0, IV >> 1);
 853        chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
 854                                                          0, 1, dst_size);
 855
 856        chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
 857        if ((reqctx->op == CHCR_DECRYPT_OP) &&
 858            (!(get_cryptoalg_subtype(tfm) ==
 859               CRYPTO_ALG_SUB_TYPE_CTR)) &&
 860            (!(get_cryptoalg_subtype(tfm) ==
 861               CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
 862                generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
 863        } else {
 864                if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
 865                    (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
 866                        memcpy(chcr_req->key_ctx.key, ablkctx->key,
 867                               ablkctx->enckey_len);
 868                } else {
 869                        memcpy(chcr_req->key_ctx.key, ablkctx->key +
 870                               (ablkctx->enckey_len >> 1),
 871                               ablkctx->enckey_len >> 1);
 872                        memcpy(chcr_req->key_ctx.key +
 873                               (ablkctx->enckey_len >> 1),
 874                               ablkctx->key,
 875                               ablkctx->enckey_len >> 1);
 876                }
 877        }
 878        phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
 879        ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
 880        chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
 881        chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
 882
 883        atomic_inc(&adap->chcr_stats.cipher_rqst);
 884        temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
 885                + (reqctx->imm ? (wrparam->bytes) : 0);
 886        create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
 887                    transhdr_len, temp,
 888                        ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
 889        reqctx->skb = skb;
 890
 891        if (reqctx->op && (ablkctx->ciph_mode ==
 892                           CHCR_SCMD_CIPHER_MODE_AES_CBC))
 893                sg_pcopy_to_buffer(wrparam->req->src,
 894                        sg_nents(wrparam->req->src), wrparam->req->iv, 16,
 895                        reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
 896
 897        return skb;
 898err:
 899        return ERR_PTR(error);
 900}
 901
 902static inline int chcr_keyctx_ck_size(unsigned int keylen)
 903{
 904        int ck_size = 0;
 905
 906        if (keylen == AES_KEYSIZE_128)
 907                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
 908        else if (keylen == AES_KEYSIZE_192)
 909                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
 910        else if (keylen == AES_KEYSIZE_256)
 911                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
 912        else
 913                ck_size = 0;
 914
 915        return ck_size;
 916}
 917static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
 918                                       const u8 *key,
 919                                       unsigned int keylen)
 920{
 921        struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 922
 923        crypto_skcipher_clear_flags(ablkctx->sw_cipher,
 924                                CRYPTO_TFM_REQ_MASK);
 925        crypto_skcipher_set_flags(ablkctx->sw_cipher,
 926                                cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
 927        return crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
 928}
 929
 930static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
 931                               const u8 *key,
 932                               unsigned int keylen)
 933{
 934        struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 935        unsigned int ck_size, context_size;
 936        u16 alignment = 0;
 937        int err;
 938
 939        err = chcr_cipher_fallback_setkey(cipher, key, keylen);
 940        if (err)
 941                goto badkey_err;
 942
 943        ck_size = chcr_keyctx_ck_size(keylen);
 944        alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
 945        memcpy(ablkctx->key, key, keylen);
 946        ablkctx->enckey_len = keylen;
 947        get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
 948        context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
 949                        keylen + alignment) >> 4;
 950
 951        ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
 952                                                0, 0, context_size);
 953        ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
 954        return 0;
 955badkey_err:
 956        ablkctx->enckey_len = 0;
 957
 958        return err;
 959}
 960
 961static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher,
 962                                   const u8 *key,
 963                                   unsigned int keylen)
 964{
 965        struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 966        unsigned int ck_size, context_size;
 967        u16 alignment = 0;
 968        int err;
 969
 970        err = chcr_cipher_fallback_setkey(cipher, key, keylen);
 971        if (err)
 972                goto badkey_err;
 973        ck_size = chcr_keyctx_ck_size(keylen);
 974        alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
 975        memcpy(ablkctx->key, key, keylen);
 976        ablkctx->enckey_len = keylen;
 977        context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
 978                        keylen + alignment) >> 4;
 979
 980        ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
 981                                                0, 0, context_size);
 982        ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
 983
 984        return 0;
 985badkey_err:
 986        ablkctx->enckey_len = 0;
 987
 988        return err;
 989}
 990
 991static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
 992                                   const u8 *key,
 993                                   unsigned int keylen)
 994{
 995        struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
 996        unsigned int ck_size, context_size;
 997        u16 alignment = 0;
 998        int err;
 999
1000        if (keylen < CTR_RFC3686_NONCE_SIZE)
1001                return -EINVAL;
1002        memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
1003               CTR_RFC3686_NONCE_SIZE);
1004
1005        keylen -= CTR_RFC3686_NONCE_SIZE;
1006        err = chcr_cipher_fallback_setkey(cipher, key, keylen);
1007        if (err)
1008                goto badkey_err;
1009
1010        ck_size = chcr_keyctx_ck_size(keylen);
1011        alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
1012        memcpy(ablkctx->key, key, keylen);
1013        ablkctx->enckey_len = keylen;
1014        context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
1015                        keylen + alignment) >> 4;
1016
1017        ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
1018                                                0, 0, context_size);
1019        ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
1020
1021        return 0;
1022badkey_err:
1023        ablkctx->enckey_len = 0;
1024
1025        return err;
1026}
1027static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
1028{
1029        unsigned int size = AES_BLOCK_SIZE;
1030        __be32 *b = (__be32 *)(dstiv + size);
1031        u32 c, prev;
1032
1033        memcpy(dstiv, srciv, AES_BLOCK_SIZE);
1034        for (; size >= 4; size -= 4) {
1035                prev = be32_to_cpu(*--b);
1036                c = prev + add;
1037                *b = cpu_to_be32(c);
1038                if (prev < c)
1039                        break;
1040                add = 1;
1041        }
1042
1043}
1044
1045static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
1046{
1047        __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
1048        u64 c;
1049        u32 temp = be32_to_cpu(*--b);
1050
1051        temp = ~temp;
1052        c = (u64)temp +  1; // No of block can processed without overflow
1053        if ((bytes / AES_BLOCK_SIZE) >= c)
1054                bytes = c * AES_BLOCK_SIZE;
1055        return bytes;
1056}
1057
1058static int chcr_update_tweak(struct skcipher_request *req, u8 *iv,
1059                             u32 isfinal)
1060{
1061        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1062        struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1063        struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1064        struct crypto_aes_ctx aes;
1065        int ret, i;
1066        u8 *key;
1067        unsigned int keylen;
1068        int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1069        int round8 = round / 8;
1070
1071        memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1072
1073        keylen = ablkctx->enckey_len / 2;
1074        key = ablkctx->key + keylen;
1075        /* For a 192 bit key remove the padded zeroes which was
1076         * added in chcr_xts_setkey
1077         */
1078        if (KEY_CONTEXT_CK_SIZE_G(ntohl(ablkctx->key_ctx_hdr))
1079                        == CHCR_KEYCTX_CIPHER_KEY_SIZE_192)
1080                ret = aes_expandkey(&aes, key, keylen - 8);
1081        else
1082                ret = aes_expandkey(&aes, key, keylen);
1083        if (ret)
1084                return ret;
1085        aes_encrypt(&aes, iv, iv);
1086        for (i = 0; i < round8; i++)
1087                gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1088
1089        for (i = 0; i < (round % 8); i++)
1090                gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1091
1092        if (!isfinal)
1093                aes_decrypt(&aes, iv, iv);
1094
1095        memzero_explicit(&aes, sizeof(aes));
1096        return 0;
1097}
1098
1099static int chcr_update_cipher_iv(struct skcipher_request *req,
1100                                   struct cpl_fw6_pld *fw6_pld, u8 *iv)
1101{
1102        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1103        struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1104        int subtype = get_cryptoalg_subtype(tfm);
1105        int ret = 0;
1106
1107        if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1108                ctr_add_iv(iv, req->iv, (reqctx->processed /
1109                           AES_BLOCK_SIZE));
1110        else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1111                *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1112                        CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1113                                                AES_BLOCK_SIZE) + 1);
1114        else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1115                ret = chcr_update_tweak(req, iv, 0);
1116        else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1117                if (reqctx->op)
1118                        /*Updated before sending last WR*/
1119                        memcpy(iv, req->iv, AES_BLOCK_SIZE);
1120                else
1121                        memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1122        }
1123
1124        return ret;
1125
1126}
1127
1128/* We need separate function for final iv because in rfc3686  Initial counter
1129 * starts from 1 and buffer size of iv is 8 byte only which remains constant
1130 * for subsequent update requests
1131 */
1132
1133static int chcr_final_cipher_iv(struct skcipher_request *req,
1134                                   struct cpl_fw6_pld *fw6_pld, u8 *iv)
1135{
1136        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1137        struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1138        int subtype = get_cryptoalg_subtype(tfm);
1139        int ret = 0;
1140
1141        if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1142                ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed,
1143                                                       AES_BLOCK_SIZE));
1144        else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) {
1145                if (!reqctx->partial_req)
1146                        memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1147                else
1148                        ret = chcr_update_tweak(req, iv, 1);
1149        }
1150        else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1151                /*Already updated for Decrypt*/
1152                if (!reqctx->op)
1153                        memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1154
1155        }
1156        return ret;
1157
1158}
1159
1160static int chcr_handle_cipher_resp(struct skcipher_request *req,
1161                                   unsigned char *input, int err)
1162{
1163        struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1164        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1165        struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1166        struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1167        struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1168        struct chcr_dev *dev = c_ctx(tfm)->dev;
1169        struct chcr_context *ctx = c_ctx(tfm);
1170        struct adapter *adap = padap(ctx->dev);
1171        struct cipher_wr_param wrparam;
1172        struct sk_buff *skb;
1173        int bytes;
1174
1175        if (err)
1176                goto unmap;
1177        if (req->cryptlen == reqctx->processed) {
1178                chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1179                                      req);
1180                err = chcr_final_cipher_iv(req, fw6_pld, req->iv);
1181                goto complete;
1182        }
1183
1184        if (!reqctx->imm) {
1185                bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
1186                                          CIP_SPACE_LEFT(ablkctx->enckey_len),
1187                                          reqctx->src_ofst, reqctx->dst_ofst);
1188                if ((bytes + reqctx->processed) >= req->cryptlen)
1189                        bytes  = req->cryptlen - reqctx->processed;
1190                else
1191                        bytes = rounddown(bytes, 16);
1192        } else {
1193                /*CTR mode counter overfloa*/
1194                bytes  = req->cryptlen - reqctx->processed;
1195        }
1196        err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1197        if (err)
1198                goto unmap;
1199
1200        if (unlikely(bytes == 0)) {
1201                chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1202                                      req);
1203                memcpy(req->iv, reqctx->init_iv, IV);
1204                atomic_inc(&adap->chcr_stats.fallback);
1205                err = chcr_cipher_fallback(ablkctx->sw_cipher, req, req->iv,
1206                                           reqctx->op);
1207                goto complete;
1208        }
1209
1210        if (get_cryptoalg_subtype(tfm) ==
1211            CRYPTO_ALG_SUB_TYPE_CTR)
1212                bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1213        wrparam.qid = u_ctx->lldi.rxq_ids[reqctx->rxqidx];
1214        wrparam.req = req;
1215        wrparam.bytes = bytes;
1216        skb = create_cipher_wr(&wrparam);
1217        if (IS_ERR(skb)) {
1218                pr_err("%s : Failed to form WR. No memory\n", __func__);
1219                err = PTR_ERR(skb);
1220                goto unmap;
1221        }
1222        skb->dev = u_ctx->lldi.ports[0];
1223        set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1224        chcr_send_wr(skb);
1225        reqctx->last_req_len = bytes;
1226        reqctx->processed += bytes;
1227        if (get_cryptoalg_subtype(tfm) ==
1228                CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1229                        CRYPTO_TFM_REQ_MAY_SLEEP ) {
1230                complete(&ctx->cbc_aes_aio_done);
1231        }
1232        return 0;
1233unmap:
1234        chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1235complete:
1236        if (get_cryptoalg_subtype(tfm) ==
1237                CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1238                        CRYPTO_TFM_REQ_MAY_SLEEP ) {
1239                complete(&ctx->cbc_aes_aio_done);
1240        }
1241        chcr_dec_wrcount(dev);
1242        req->base.complete(&req->base, err);
1243        return err;
1244}
1245
1246static int process_cipher(struct skcipher_request *req,
1247                                  unsigned short qid,
1248                                  struct sk_buff **skb,
1249                                  unsigned short op_type)
1250{
1251        struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1252        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1253        unsigned int ivsize = crypto_skcipher_ivsize(tfm);
1254        struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1255        struct adapter *adap = padap(c_ctx(tfm)->dev);
1256        struct  cipher_wr_param wrparam;
1257        int bytes, err = -EINVAL;
1258        int subtype;
1259
1260        reqctx->processed = 0;
1261        reqctx->partial_req = 0;
1262        if (!req->iv)
1263                goto error;
1264        subtype = get_cryptoalg_subtype(tfm);
1265        if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1266            (req->cryptlen == 0) ||
1267            (req->cryptlen % crypto_skcipher_blocksize(tfm))) {
1268                if (req->cryptlen == 0 && subtype != CRYPTO_ALG_SUB_TYPE_XTS)
1269                        goto fallback;
1270                else if (req->cryptlen % crypto_skcipher_blocksize(tfm) &&
1271                         subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1272                        goto fallback;
1273                pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1274                       ablkctx->enckey_len, req->cryptlen, ivsize);
1275                goto error;
1276        }
1277
1278        err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1279        if (err)
1280                goto error;
1281        if (req->cryptlen < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1282                                            AES_MIN_KEY_SIZE +
1283                                            sizeof(struct cpl_rx_phys_dsgl) +
1284                                        /*Min dsgl size*/
1285                                            32))) {
1286                /* Can be sent as Imm*/
1287                unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1288
1289                dnents = sg_nents_xlen(req->dst, req->cryptlen,
1290                                       CHCR_DST_SG_SIZE, 0);
1291                phys_dsgl = get_space_for_phys_dsgl(dnents);
1292                kctx_len = roundup(ablkctx->enckey_len, 16);
1293                transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1294                reqctx->imm = (transhdr_len + IV + req->cryptlen) <=
1295                        SGE_MAX_WR_LEN;
1296                bytes = IV + req->cryptlen;
1297
1298        } else {
1299                reqctx->imm = 0;
1300        }
1301
1302        if (!reqctx->imm) {
1303                bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
1304                                          CIP_SPACE_LEFT(ablkctx->enckey_len),
1305                                          0, 0);
1306                if ((bytes + reqctx->processed) >= req->cryptlen)
1307                        bytes  = req->cryptlen - reqctx->processed;
1308                else
1309                        bytes = rounddown(bytes, 16);
1310        } else {
1311                bytes = req->cryptlen;
1312        }
1313        if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) {
1314                bytes = adjust_ctr_overflow(req->iv, bytes);
1315        }
1316        if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1317                memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1318                memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
1319                                CTR_RFC3686_IV_SIZE);
1320
1321                /* initialize counter portion of counter block */
1322                *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1323                        CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1324                memcpy(reqctx->init_iv, reqctx->iv, IV);
1325
1326        } else {
1327
1328                memcpy(reqctx->iv, req->iv, IV);
1329                memcpy(reqctx->init_iv, req->iv, IV);
1330        }
1331        if (unlikely(bytes == 0)) {
1332                chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1333                                      req);
1334fallback:       atomic_inc(&adap->chcr_stats.fallback);
1335                err = chcr_cipher_fallback(ablkctx->sw_cipher, req,
1336                                           subtype ==
1337                                           CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ?
1338                                           reqctx->iv : req->iv,
1339                                           op_type);
1340                goto error;
1341        }
1342        reqctx->op = op_type;
1343        reqctx->srcsg = req->src;
1344        reqctx->dstsg = req->dst;
1345        reqctx->src_ofst = 0;
1346        reqctx->dst_ofst = 0;
1347        wrparam.qid = qid;
1348        wrparam.req = req;
1349        wrparam.bytes = bytes;
1350        *skb = create_cipher_wr(&wrparam);
1351        if (IS_ERR(*skb)) {
1352                err = PTR_ERR(*skb);
1353                goto unmap;
1354        }
1355        reqctx->processed = bytes;
1356        reqctx->last_req_len = bytes;
1357        reqctx->partial_req = !!(req->cryptlen - reqctx->processed);
1358
1359        return 0;
1360unmap:
1361        chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1362error:
1363        return err;
1364}
1365
1366static int chcr_aes_encrypt(struct skcipher_request *req)
1367{
1368        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1369        struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1370        struct chcr_dev *dev = c_ctx(tfm)->dev;
1371        struct sk_buff *skb = NULL;
1372        int err;
1373        struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1374        struct chcr_context *ctx = c_ctx(tfm);
1375        unsigned int cpu;
1376
1377        cpu = get_cpu();
1378        reqctx->txqidx = cpu % ctx->ntxq;
1379        reqctx->rxqidx = cpu % ctx->nrxq;
1380        put_cpu();
1381
1382        err = chcr_inc_wrcount(dev);
1383        if (err)
1384                return -ENXIO;
1385        if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1386                                                reqctx->txqidx) &&
1387                (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1388                        err = -ENOSPC;
1389                        goto error;
1390        }
1391
1392        err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1393                             &skb, CHCR_ENCRYPT_OP);
1394        if (err || !skb)
1395                return  err;
1396        skb->dev = u_ctx->lldi.ports[0];
1397        set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1398        chcr_send_wr(skb);
1399        if (get_cryptoalg_subtype(tfm) ==
1400                CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1401                        CRYPTO_TFM_REQ_MAY_SLEEP ) {
1402                        reqctx->partial_req = 1;
1403                        wait_for_completion(&ctx->cbc_aes_aio_done);
1404        }
1405        return -EINPROGRESS;
1406error:
1407        chcr_dec_wrcount(dev);
1408        return err;
1409}
1410
1411static int chcr_aes_decrypt(struct skcipher_request *req)
1412{
1413        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1414        struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1415        struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1416        struct chcr_dev *dev = c_ctx(tfm)->dev;
1417        struct sk_buff *skb = NULL;
1418        int err;
1419        struct chcr_context *ctx = c_ctx(tfm);
1420        unsigned int cpu;
1421
1422        cpu = get_cpu();
1423        reqctx->txqidx = cpu % ctx->ntxq;
1424        reqctx->rxqidx = cpu % ctx->nrxq;
1425        put_cpu();
1426
1427        err = chcr_inc_wrcount(dev);
1428        if (err)
1429                return -ENXIO;
1430
1431        if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1432                                                reqctx->txqidx) &&
1433                (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))))
1434                        return -ENOSPC;
1435        err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1436                             &skb, CHCR_DECRYPT_OP);
1437        if (err || !skb)
1438                return err;
1439        skb->dev = u_ctx->lldi.ports[0];
1440        set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1441        chcr_send_wr(skb);
1442        return -EINPROGRESS;
1443}
1444static int chcr_device_init(struct chcr_context *ctx)
1445{
1446        struct uld_ctx *u_ctx = NULL;
1447        int txq_perchan, ntxq;
1448        int err = 0, rxq_perchan;
1449
1450        if (!ctx->dev) {
1451                u_ctx = assign_chcr_device();
1452                if (!u_ctx) {
1453                        err = -ENXIO;
1454                        pr_err("chcr device assignment fails\n");
1455                        goto out;
1456                }
1457                ctx->dev = &u_ctx->dev;
1458                ntxq = u_ctx->lldi.ntxq;
1459                rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1460                txq_perchan = ntxq / u_ctx->lldi.nchan;
1461                ctx->ntxq = ntxq;
1462                ctx->nrxq = u_ctx->lldi.nrxq;
1463                ctx->rxq_perchan = rxq_perchan;
1464                ctx->txq_perchan = txq_perchan;
1465        }
1466out:
1467        return err;
1468}
1469
1470static int chcr_init_tfm(struct crypto_skcipher *tfm)
1471{
1472        struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1473        struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1474        struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1475
1476        ablkctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0,
1477                                CRYPTO_ALG_NEED_FALLBACK);
1478        if (IS_ERR(ablkctx->sw_cipher)) {
1479                pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1480                return PTR_ERR(ablkctx->sw_cipher);
1481        }
1482        init_completion(&ctx->cbc_aes_aio_done);
1483        crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
1484                                         crypto_skcipher_reqsize(ablkctx->sw_cipher));
1485
1486        return chcr_device_init(ctx);
1487}
1488
1489static int chcr_rfc3686_init(struct crypto_skcipher *tfm)
1490{
1491        struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1492        struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1493        struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1494
1495        /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1496         * cannot be used as fallback in chcr_handle_cipher_response
1497         */
1498        ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
1499                                CRYPTO_ALG_NEED_FALLBACK);
1500        if (IS_ERR(ablkctx->sw_cipher)) {
1501                pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1502                return PTR_ERR(ablkctx->sw_cipher);
1503        }
1504        crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
1505                                    crypto_skcipher_reqsize(ablkctx->sw_cipher));
1506        return chcr_device_init(ctx);
1507}
1508
1509
1510static void chcr_exit_tfm(struct crypto_skcipher *tfm)
1511{
1512        struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1513        struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1514
1515        crypto_free_skcipher(ablkctx->sw_cipher);
1516}
1517
1518static int get_alg_config(struct algo_param *params,
1519                          unsigned int auth_size)
1520{
1521        switch (auth_size) {
1522        case SHA1_DIGEST_SIZE:
1523                params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1524                params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1525                params->result_size = SHA1_DIGEST_SIZE;
1526                break;
1527        case SHA224_DIGEST_SIZE:
1528                params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1529                params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1530                params->result_size = SHA256_DIGEST_SIZE;
1531                break;
1532        case SHA256_DIGEST_SIZE:
1533                params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1534                params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1535                params->result_size = SHA256_DIGEST_SIZE;
1536                break;
1537        case SHA384_DIGEST_SIZE:
1538                params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1539                params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1540                params->result_size = SHA512_DIGEST_SIZE;
1541                break;
1542        case SHA512_DIGEST_SIZE:
1543                params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1544                params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1545                params->result_size = SHA512_DIGEST_SIZE;
1546                break;
1547        default:
1548                pr_err("ERROR, unsupported digest size\n");
1549                return -EINVAL;
1550        }
1551        return 0;
1552}
1553
1554static inline void chcr_free_shash(struct crypto_shash *base_hash)
1555{
1556                crypto_free_shash(base_hash);
1557}
1558
1559/**
1560 *      create_hash_wr - Create hash work request
1561 *      @req - Cipher req base
1562 */
1563static struct sk_buff *create_hash_wr(struct ahash_request *req,
1564                                      struct hash_wr_param *param)
1565{
1566        struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1567        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1568        struct chcr_context *ctx = h_ctx(tfm);
1569        struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1570        struct sk_buff *skb = NULL;
1571        struct uld_ctx *u_ctx = ULD_CTX(ctx);
1572        struct chcr_wr *chcr_req;
1573        struct ulptx_sgl *ulptx;
1574        unsigned int nents = 0, transhdr_len;
1575        unsigned int temp = 0;
1576        gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1577                GFP_ATOMIC;
1578        struct adapter *adap = padap(h_ctx(tfm)->dev);
1579        int error = 0;
1580        unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
1581
1582        transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1583        req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1584                                param->sg_len) <= SGE_MAX_WR_LEN;
1585        nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1586                      CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
1587        nents += param->bfr_len ? 1 : 0;
1588        transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1589                                param->sg_len, 16) : (sgl_len(nents) * 8);
1590        transhdr_len = roundup(transhdr_len, 16);
1591
1592        skb = alloc_skb(transhdr_len, flags);
1593        if (!skb)
1594                return ERR_PTR(-ENOMEM);
1595        chcr_req = __skb_put_zero(skb, transhdr_len);
1596
1597        chcr_req->sec_cpl.op_ivinsrtofst =
1598                FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 0);
1599
1600        chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1601
1602        chcr_req->sec_cpl.aadstart_cipherstop_hi =
1603                FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1604        chcr_req->sec_cpl.cipherstop_lo_authinsert =
1605                FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1606        chcr_req->sec_cpl.seqno_numivs =
1607                FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1608                                         param->opad_needed, 0);
1609
1610        chcr_req->sec_cpl.ivgen_hdrlen =
1611                FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1612
1613        memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1614               param->alg_prm.result_size);
1615
1616        if (param->opad_needed)
1617                memcpy(chcr_req->key_ctx.key +
1618                       ((param->alg_prm.result_size <= 32) ? 32 :
1619                        CHCR_HASH_MAX_DIGEST_SIZE),
1620                       hmacctx->opad, param->alg_prm.result_size);
1621
1622        chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1623                                            param->alg_prm.mk_size, 0,
1624                                            param->opad_needed,
1625                                            ((param->kctx_len +
1626                                             sizeof(chcr_req->key_ctx)) >> 4));
1627        chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1628        ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
1629                                     DUMMY_BYTES);
1630        if (param->bfr_len != 0) {
1631                req_ctx->hctx_wr.dma_addr =
1632                        dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1633                                       param->bfr_len, DMA_TO_DEVICE);
1634                if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1635                                       req_ctx->hctx_wr. dma_addr)) {
1636                        error = -ENOMEM;
1637                        goto err;
1638                }
1639                req_ctx->hctx_wr.dma_len = param->bfr_len;
1640        } else {
1641                req_ctx->hctx_wr.dma_addr = 0;
1642        }
1643        chcr_add_hash_src_ent(req, ulptx, param);
1644        /* Request upto max wr size */
1645        temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1646                                (param->sg_len + param->bfr_len) : 0);
1647        atomic_inc(&adap->chcr_stats.digest_rqst);
1648        create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1649                    param->hash_size, transhdr_len,
1650                    temp,  0);
1651        req_ctx->hctx_wr.skb = skb;
1652        return skb;
1653err:
1654        kfree_skb(skb);
1655        return  ERR_PTR(error);
1656}
1657
1658static int chcr_ahash_update(struct ahash_request *req)
1659{
1660        struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1661        struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1662        struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1663        struct chcr_context *ctx = h_ctx(rtfm);
1664        struct chcr_dev *dev = h_ctx(rtfm)->dev;
1665        struct sk_buff *skb;
1666        u8 remainder = 0, bs;
1667        unsigned int nbytes = req->nbytes;
1668        struct hash_wr_param params;
1669        int error;
1670        unsigned int cpu;
1671
1672        cpu = get_cpu();
1673        req_ctx->txqidx = cpu % ctx->ntxq;
1674        req_ctx->rxqidx = cpu % ctx->nrxq;
1675        put_cpu();
1676
1677        bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1678
1679        if (nbytes + req_ctx->reqlen >= bs) {
1680                remainder = (nbytes + req_ctx->reqlen) % bs;
1681                nbytes = nbytes + req_ctx->reqlen - remainder;
1682        } else {
1683                sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1684                                   + req_ctx->reqlen, nbytes, 0);
1685                req_ctx->reqlen += nbytes;
1686                return 0;
1687        }
1688        error = chcr_inc_wrcount(dev);
1689        if (error)
1690                return -ENXIO;
1691        /* Detach state for CHCR means lldi or padap is freed. Increasing
1692         * inflight count for dev guarantees that lldi and padap is valid
1693         */
1694        if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1695                                                req_ctx->txqidx) &&
1696                (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1697                        error = -ENOSPC;
1698                        goto err;
1699        }
1700
1701        chcr_init_hctx_per_wr(req_ctx);
1702        error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1703        if (error) {
1704                error = -ENOMEM;
1705                goto err;
1706        }
1707        get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1708        params.kctx_len = roundup(params.alg_prm.result_size, 16);
1709        params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1710                                     HASH_SPACE_LEFT(params.kctx_len), 0);
1711        if (params.sg_len > req->nbytes)
1712                params.sg_len = req->nbytes;
1713        params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1714                        req_ctx->reqlen;
1715        params.opad_needed = 0;
1716        params.more = 1;
1717        params.last = 0;
1718        params.bfr_len = req_ctx->reqlen;
1719        params.scmd1 = 0;
1720        req_ctx->hctx_wr.srcsg = req->src;
1721
1722        params.hash_size = params.alg_prm.result_size;
1723        req_ctx->data_len += params.sg_len + params.bfr_len;
1724        skb = create_hash_wr(req, &params);
1725        if (IS_ERR(skb)) {
1726                error = PTR_ERR(skb);
1727                goto unmap;
1728        }
1729
1730        req_ctx->hctx_wr.processed += params.sg_len;
1731        if (remainder) {
1732                /* Swap buffers */
1733                swap(req_ctx->reqbfr, req_ctx->skbfr);
1734                sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1735                                   req_ctx->reqbfr, remainder, req->nbytes -
1736                                   remainder);
1737        }
1738        req_ctx->reqlen = remainder;
1739        skb->dev = u_ctx->lldi.ports[0];
1740        set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1741        chcr_send_wr(skb);
1742        return -EINPROGRESS;
1743unmap:
1744        chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1745err:
1746        chcr_dec_wrcount(dev);
1747        return error;
1748}
1749
1750static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1751{
1752        memset(bfr_ptr, 0, bs);
1753        *bfr_ptr = 0x80;
1754        if (bs == 64)
1755                *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1  << 3);
1756        else
1757                *(__be64 *)(bfr_ptr + 120) =  cpu_to_be64(scmd1  << 3);
1758}
1759
1760static int chcr_ahash_final(struct ahash_request *req)
1761{
1762        struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1763        struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1764        struct chcr_dev *dev = h_ctx(rtfm)->dev;
1765        struct hash_wr_param params;
1766        struct sk_buff *skb;
1767        struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1768        struct chcr_context *ctx = h_ctx(rtfm);
1769        u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1770        int error;
1771        unsigned int cpu;
1772
1773        cpu = get_cpu();
1774        req_ctx->txqidx = cpu % ctx->ntxq;
1775        req_ctx->rxqidx = cpu % ctx->nrxq;
1776        put_cpu();
1777
1778        error = chcr_inc_wrcount(dev);
1779        if (error)
1780                return -ENXIO;
1781
1782        chcr_init_hctx_per_wr(req_ctx);
1783        if (is_hmac(crypto_ahash_tfm(rtfm)))
1784                params.opad_needed = 1;
1785        else
1786                params.opad_needed = 0;
1787        params.sg_len = 0;
1788        req_ctx->hctx_wr.isfinal = 1;
1789        get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1790        params.kctx_len = roundup(params.alg_prm.result_size, 16);
1791        if (is_hmac(crypto_ahash_tfm(rtfm))) {
1792                params.opad_needed = 1;
1793                params.kctx_len *= 2;
1794        } else {
1795                params.opad_needed = 0;
1796        }
1797
1798        req_ctx->hctx_wr.result = 1;
1799        params.bfr_len = req_ctx->reqlen;
1800        req_ctx->data_len += params.bfr_len + params.sg_len;
1801        req_ctx->hctx_wr.srcsg = req->src;
1802        if (req_ctx->reqlen == 0) {
1803                create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1804                params.last = 0;
1805                params.more = 1;
1806                params.scmd1 = 0;
1807                params.bfr_len = bs;
1808
1809        } else {
1810                params.scmd1 = req_ctx->data_len;
1811                params.last = 1;
1812                params.more = 0;
1813        }
1814        params.hash_size = crypto_ahash_digestsize(rtfm);
1815        skb = create_hash_wr(req, &params);
1816        if (IS_ERR(skb)) {
1817                error = PTR_ERR(skb);
1818                goto err;
1819        }
1820        req_ctx->reqlen = 0;
1821        skb->dev = u_ctx->lldi.ports[0];
1822        set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1823        chcr_send_wr(skb);
1824        return -EINPROGRESS;
1825err:
1826        chcr_dec_wrcount(dev);
1827        return error;
1828}
1829
1830static int chcr_ahash_finup(struct ahash_request *req)
1831{
1832        struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1833        struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1834        struct chcr_dev *dev = h_ctx(rtfm)->dev;
1835        struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1836        struct chcr_context *ctx = h_ctx(rtfm);
1837        struct sk_buff *skb;
1838        struct hash_wr_param params;
1839        u8  bs;
1840        int error;
1841        unsigned int cpu;
1842
1843        cpu = get_cpu();
1844        req_ctx->txqidx = cpu % ctx->ntxq;
1845        req_ctx->rxqidx = cpu % ctx->nrxq;
1846        put_cpu();
1847
1848        bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1849        error = chcr_inc_wrcount(dev);
1850        if (error)
1851                return -ENXIO;
1852
1853        if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1854                                                req_ctx->txqidx) &&
1855                (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1856                        error = -ENOSPC;
1857                        goto err;
1858        }
1859        chcr_init_hctx_per_wr(req_ctx);
1860        error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1861        if (error) {
1862                error = -ENOMEM;
1863                goto err;
1864        }
1865
1866        get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1867        params.kctx_len = roundup(params.alg_prm.result_size, 16);
1868        if (is_hmac(crypto_ahash_tfm(rtfm))) {
1869                params.kctx_len *= 2;
1870                params.opad_needed = 1;
1871        } else {
1872                params.opad_needed = 0;
1873        }
1874
1875        params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1876                                    HASH_SPACE_LEFT(params.kctx_len), 0);
1877        if (params.sg_len < req->nbytes) {
1878                if (is_hmac(crypto_ahash_tfm(rtfm))) {
1879                        params.kctx_len /= 2;
1880                        params.opad_needed = 0;
1881                }
1882                params.last = 0;
1883                params.more = 1;
1884                params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1885                                        - req_ctx->reqlen;
1886                params.hash_size = params.alg_prm.result_size;
1887                params.scmd1 = 0;
1888        } else {
1889                params.last = 1;
1890                params.more = 0;
1891                params.sg_len = req->nbytes;
1892                params.hash_size = crypto_ahash_digestsize(rtfm);
1893                params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1894                                params.sg_len;
1895        }
1896        params.bfr_len = req_ctx->reqlen;
1897        req_ctx->data_len += params.bfr_len + params.sg_len;
1898        req_ctx->hctx_wr.result = 1;
1899        req_ctx->hctx_wr.srcsg = req->src;
1900        if ((req_ctx->reqlen + req->nbytes) == 0) {
1901                create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1902                params.last = 0;
1903                params.more = 1;
1904                params.scmd1 = 0;
1905                params.bfr_len = bs;
1906        }
1907        skb = create_hash_wr(req, &params);
1908        if (IS_ERR(skb)) {
1909                error = PTR_ERR(skb);
1910                goto unmap;
1911        }
1912        req_ctx->reqlen = 0;
1913        req_ctx->hctx_wr.processed += params.sg_len;
1914        skb->dev = u_ctx->lldi.ports[0];
1915        set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1916        chcr_send_wr(skb);
1917        return -EINPROGRESS;
1918unmap:
1919        chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1920err:
1921        chcr_dec_wrcount(dev);
1922        return error;
1923}
1924
1925static int chcr_ahash_digest(struct ahash_request *req)
1926{
1927        struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1928        struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1929        struct chcr_dev *dev = h_ctx(rtfm)->dev;
1930        struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1931        struct chcr_context *ctx = h_ctx(rtfm);
1932        struct sk_buff *skb;
1933        struct hash_wr_param params;
1934        u8  bs;
1935        int error;
1936        unsigned int cpu;
1937
1938        cpu = get_cpu();
1939        req_ctx->txqidx = cpu % ctx->ntxq;
1940        req_ctx->rxqidx = cpu % ctx->nrxq;
1941        put_cpu();
1942
1943        rtfm->init(req);
1944        bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1945        error = chcr_inc_wrcount(dev);
1946        if (error)
1947                return -ENXIO;
1948
1949        if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1950                                                req_ctx->txqidx) &&
1951                (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1952                        error = -ENOSPC;
1953                        goto err;
1954        }
1955
1956        chcr_init_hctx_per_wr(req_ctx);
1957        error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1958        if (error) {
1959                error = -ENOMEM;
1960                goto err;
1961        }
1962
1963        get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1964        params.kctx_len = roundup(params.alg_prm.result_size, 16);
1965        if (is_hmac(crypto_ahash_tfm(rtfm))) {
1966                params.kctx_len *= 2;
1967                params.opad_needed = 1;
1968        } else {
1969                params.opad_needed = 0;
1970        }
1971        params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1972                                HASH_SPACE_LEFT(params.kctx_len), 0);
1973        if (params.sg_len < req->nbytes) {
1974                if (is_hmac(crypto_ahash_tfm(rtfm))) {
1975                        params.kctx_len /= 2;
1976                        params.opad_needed = 0;
1977                }
1978                params.last = 0;
1979                params.more = 1;
1980                params.scmd1 = 0;
1981                params.sg_len = rounddown(params.sg_len, bs);
1982                params.hash_size = params.alg_prm.result_size;
1983        } else {
1984                params.sg_len = req->nbytes;
1985                params.hash_size = crypto_ahash_digestsize(rtfm);
1986                params.last = 1;
1987                params.more = 0;
1988                params.scmd1 = req->nbytes + req_ctx->data_len;
1989
1990        }
1991        params.bfr_len = 0;
1992        req_ctx->hctx_wr.result = 1;
1993        req_ctx->hctx_wr.srcsg = req->src;
1994        req_ctx->data_len += params.bfr_len + params.sg_len;
1995
1996        if (req->nbytes == 0) {
1997                create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1998                params.more = 1;
1999                params.bfr_len = bs;
2000        }
2001
2002        skb = create_hash_wr(req, &params);
2003        if (IS_ERR(skb)) {
2004                error = PTR_ERR(skb);
2005                goto unmap;
2006        }
2007        req_ctx->hctx_wr.processed += params.sg_len;
2008        skb->dev = u_ctx->lldi.ports[0];
2009        set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
2010        chcr_send_wr(skb);
2011        return -EINPROGRESS;
2012unmap:
2013        chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2014err:
2015        chcr_dec_wrcount(dev);
2016        return error;
2017}
2018
2019static int chcr_ahash_continue(struct ahash_request *req)
2020{
2021        struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2022        struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2023        struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
2024        struct chcr_context *ctx = h_ctx(rtfm);
2025        struct uld_ctx *u_ctx = ULD_CTX(ctx);
2026        struct sk_buff *skb;
2027        struct hash_wr_param params;
2028        u8  bs;
2029        int error;
2030        unsigned int cpu;
2031
2032        cpu = get_cpu();
2033        reqctx->txqidx = cpu % ctx->ntxq;
2034        reqctx->rxqidx = cpu % ctx->nrxq;
2035        put_cpu();
2036
2037        bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2038        get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
2039        params.kctx_len = roundup(params.alg_prm.result_size, 16);
2040        if (is_hmac(crypto_ahash_tfm(rtfm))) {
2041                params.kctx_len *= 2;
2042                params.opad_needed = 1;
2043        } else {
2044                params.opad_needed = 0;
2045        }
2046        params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
2047                                            HASH_SPACE_LEFT(params.kctx_len),
2048                                            hctx_wr->src_ofst);
2049        if ((params.sg_len + hctx_wr->processed) > req->nbytes)
2050                params.sg_len = req->nbytes - hctx_wr->processed;
2051        if (!hctx_wr->result ||
2052            ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
2053                if (is_hmac(crypto_ahash_tfm(rtfm))) {
2054                        params.kctx_len /= 2;
2055                        params.opad_needed = 0;
2056                }
2057                params.last = 0;
2058                params.more = 1;
2059                params.sg_len = rounddown(params.sg_len, bs);
2060                params.hash_size = params.alg_prm.result_size;
2061                params.scmd1 = 0;
2062        } else {
2063                params.last = 1;
2064                params.more = 0;
2065                params.hash_size = crypto_ahash_digestsize(rtfm);
2066                params.scmd1 = reqctx->data_len + params.sg_len;
2067        }
2068        params.bfr_len = 0;
2069        reqctx->data_len += params.sg_len;
2070        skb = create_hash_wr(req, &params);
2071        if (IS_ERR(skb)) {
2072                error = PTR_ERR(skb);
2073                goto err;
2074        }
2075        hctx_wr->processed += params.sg_len;
2076        skb->dev = u_ctx->lldi.ports[0];
2077        set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
2078        chcr_send_wr(skb);
2079        return 0;
2080err:
2081        return error;
2082}
2083
2084static inline void chcr_handle_ahash_resp(struct ahash_request *req,
2085                                          unsigned char *input,
2086                                          int err)
2087{
2088        struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2089        struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2090        int digestsize, updated_digestsize;
2091        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2092        struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
2093        struct chcr_dev *dev = h_ctx(tfm)->dev;
2094
2095        if (input == NULL)
2096                goto out;
2097        digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
2098        updated_digestsize = digestsize;
2099        if (digestsize == SHA224_DIGEST_SIZE)
2100                updated_digestsize = SHA256_DIGEST_SIZE;
2101        else if (digestsize == SHA384_DIGEST_SIZE)
2102                updated_digestsize = SHA512_DIGEST_SIZE;
2103
2104        if (hctx_wr->dma_addr) {
2105                dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
2106                                 hctx_wr->dma_len, DMA_TO_DEVICE);
2107                hctx_wr->dma_addr = 0;
2108        }
2109        if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
2110                                 req->nbytes)) {
2111                if (hctx_wr->result == 1) {
2112                        hctx_wr->result = 0;
2113                        memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
2114                               digestsize);
2115                } else {
2116                        memcpy(reqctx->partial_hash,
2117                               input + sizeof(struct cpl_fw6_pld),
2118                               updated_digestsize);
2119
2120                }
2121                goto unmap;
2122        }
2123        memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
2124               updated_digestsize);
2125
2126        err = chcr_ahash_continue(req);
2127        if (err)
2128                goto unmap;
2129        return;
2130unmap:
2131        if (hctx_wr->is_sg_map)
2132                chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2133
2134
2135out:
2136        chcr_dec_wrcount(dev);
2137        req->base.complete(&req->base, err);
2138}
2139
2140/*
2141 *      chcr_handle_resp - Unmap the DMA buffers associated with the request
2142 *      @req: crypto request
2143 */
2144int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
2145                         int err)
2146{
2147        struct crypto_tfm *tfm = req->tfm;
2148        struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2149        struct adapter *adap = padap(ctx->dev);
2150
2151        switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
2152        case CRYPTO_ALG_TYPE_AEAD:
2153                err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
2154                break;
2155
2156        case CRYPTO_ALG_TYPE_SKCIPHER:
2157                 chcr_handle_cipher_resp(skcipher_request_cast(req),
2158                                               input, err);
2159                break;
2160        case CRYPTO_ALG_TYPE_AHASH:
2161                chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
2162                }
2163        atomic_inc(&adap->chcr_stats.complete);
2164        return err;
2165}
2166static int chcr_ahash_export(struct ahash_request *areq, void *out)
2167{
2168        struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2169        struct chcr_ahash_req_ctx *state = out;
2170
2171        state->reqlen = req_ctx->reqlen;
2172        state->data_len = req_ctx->data_len;
2173        memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
2174        memcpy(state->partial_hash, req_ctx->partial_hash,
2175               CHCR_HASH_MAX_DIGEST_SIZE);
2176        chcr_init_hctx_per_wr(state);
2177        return 0;
2178}
2179
2180static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2181{
2182        struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2183        struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2184
2185        req_ctx->reqlen = state->reqlen;
2186        req_ctx->data_len = state->data_len;
2187        req_ctx->reqbfr = req_ctx->bfr1;
2188        req_ctx->skbfr = req_ctx->bfr2;
2189        memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
2190        memcpy(req_ctx->partial_hash, state->partial_hash,
2191               CHCR_HASH_MAX_DIGEST_SIZE);
2192        chcr_init_hctx_per_wr(req_ctx);
2193        return 0;
2194}
2195
2196static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2197                             unsigned int keylen)
2198{
2199        struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
2200        unsigned int digestsize = crypto_ahash_digestsize(tfm);
2201        unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2202        unsigned int i, err = 0, updated_digestsize;
2203
2204        SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
2205
2206        /* use the key to calculate the ipad and opad. ipad will sent with the
2207         * first request's data. opad will be sent with the final hash result
2208         * ipad in hmacctx->ipad and opad in hmacctx->opad location
2209         */
2210        shash->tfm = hmacctx->base_hash;
2211        if (keylen > bs) {
2212                err = crypto_shash_digest(shash, key, keylen,
2213                                          hmacctx->ipad);
2214                if (err)
2215                        goto out;
2216                keylen = digestsize;
2217        } else {
2218                memcpy(hmacctx->ipad, key, keylen);
2219        }
2220        memset(hmacctx->ipad + keylen, 0, bs - keylen);
2221        memcpy(hmacctx->opad, hmacctx->ipad, bs);
2222
2223        for (i = 0; i < bs / sizeof(int); i++) {
2224                *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
2225                *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
2226        }
2227
2228        updated_digestsize = digestsize;
2229        if (digestsize == SHA224_DIGEST_SIZE)
2230                updated_digestsize = SHA256_DIGEST_SIZE;
2231        else if (digestsize == SHA384_DIGEST_SIZE)
2232                updated_digestsize = SHA512_DIGEST_SIZE;
2233        err = chcr_compute_partial_hash(shash, hmacctx->ipad,
2234                                        hmacctx->ipad, digestsize);
2235        if (err)
2236                goto out;
2237        chcr_change_order(hmacctx->ipad, updated_digestsize);
2238
2239        err = chcr_compute_partial_hash(shash, hmacctx->opad,
2240                                        hmacctx->opad, digestsize);
2241        if (err)
2242                goto out;
2243        chcr_change_order(hmacctx->opad, updated_digestsize);
2244out:
2245        return err;
2246}
2247
2248static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
2249                               unsigned int key_len)
2250{
2251        struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
2252        unsigned short context_size = 0;
2253        int err;
2254
2255        err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2256        if (err)
2257                goto badkey_err;
2258
2259        memcpy(ablkctx->key, key, key_len);
2260        ablkctx->enckey_len = key_len;
2261        get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2262        context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2263        /* Both keys for xts must be aligned to 16 byte boundary
2264         * by padding with zeros. So for 24 byte keys padding 8 zeroes.
2265         */
2266        if (key_len == 48) {
2267                context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len
2268                                + 16) >> 4;
2269                memmove(ablkctx->key + 32, ablkctx->key + 24, 24);
2270                memset(ablkctx->key + 24, 0, 8);
2271                memset(ablkctx->key + 56, 0, 8);
2272                ablkctx->enckey_len = 64;
2273                ablkctx->key_ctx_hdr =
2274                        FILL_KEY_CTX_HDR(CHCR_KEYCTX_CIPHER_KEY_SIZE_192,
2275                                         CHCR_KEYCTX_NO_KEY, 1,
2276                                         0, context_size);
2277        } else {
2278                ablkctx->key_ctx_hdr =
2279                FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2280                                 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2281                                 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2282                                 CHCR_KEYCTX_NO_KEY, 1,
2283                                 0, context_size);
2284        }
2285        ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2286        return 0;
2287badkey_err:
2288        ablkctx->enckey_len = 0;
2289
2290        return err;
2291}
2292
2293static int chcr_sha_init(struct ahash_request *areq)
2294{
2295        struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2296        struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2297        int digestsize =  crypto_ahash_digestsize(tfm);
2298
2299        req_ctx->data_len = 0;
2300        req_ctx->reqlen = 0;
2301        req_ctx->reqbfr = req_ctx->bfr1;
2302        req_ctx->skbfr = req_ctx->bfr2;
2303        copy_hash_init_values(req_ctx->partial_hash, digestsize);
2304
2305        return 0;
2306}
2307
2308static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2309{
2310        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2311                                 sizeof(struct chcr_ahash_req_ctx));
2312        return chcr_device_init(crypto_tfm_ctx(tfm));
2313}
2314
2315static int chcr_hmac_init(struct ahash_request *areq)
2316{
2317        struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2318        struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2319        struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
2320        unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2321        unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2322
2323        chcr_sha_init(areq);
2324        req_ctx->data_len = bs;
2325        if (is_hmac(crypto_ahash_tfm(rtfm))) {
2326                if (digestsize == SHA224_DIGEST_SIZE)
2327                        memcpy(req_ctx->partial_hash, hmacctx->ipad,
2328                               SHA256_DIGEST_SIZE);
2329                else if (digestsize == SHA384_DIGEST_SIZE)
2330                        memcpy(req_ctx->partial_hash, hmacctx->ipad,
2331                               SHA512_DIGEST_SIZE);
2332                else
2333                        memcpy(req_ctx->partial_hash, hmacctx->ipad,
2334                               digestsize);
2335        }
2336        return 0;
2337}
2338
2339static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2340{
2341        struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2342        struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2343        unsigned int digestsize =
2344                crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2345
2346        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2347                                 sizeof(struct chcr_ahash_req_ctx));
2348        hmacctx->base_hash = chcr_alloc_shash(digestsize);
2349        if (IS_ERR(hmacctx->base_hash))
2350                return PTR_ERR(hmacctx->base_hash);
2351        return chcr_device_init(crypto_tfm_ctx(tfm));
2352}
2353
2354static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2355{
2356        struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2357        struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2358
2359        if (hmacctx->base_hash) {
2360                chcr_free_shash(hmacctx->base_hash);
2361                hmacctx->base_hash = NULL;
2362        }
2363}
2364
2365inline void chcr_aead_common_exit(struct aead_request *req)
2366{
2367        struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2368        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2369        struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
2370
2371        chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
2372}
2373
2374static int chcr_aead_common_init(struct aead_request *req)
2375{
2376        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2377        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2378        struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2379        unsigned int authsize = crypto_aead_authsize(tfm);
2380        int error = -EINVAL;
2381
2382        /* validate key size */
2383        if (aeadctx->enckey_len == 0)
2384                goto err;
2385        if (reqctx->op && req->cryptlen < authsize)
2386                goto err;
2387        if (reqctx->b0_len)
2388                reqctx->scratch_pad = reqctx->iv + IV;
2389        else
2390                reqctx->scratch_pad = NULL;
2391
2392        error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2393                                  reqctx->op);
2394        if (error) {
2395                error = -ENOMEM;
2396                goto err;
2397        }
2398
2399        return 0;
2400err:
2401        return error;
2402}
2403
2404static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
2405                                   int aadmax, int wrlen,
2406                                   unsigned short op_type)
2407{
2408        unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2409
2410        if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2411            dst_nents > MAX_DSGL_ENT ||
2412            (req->assoclen > aadmax) ||
2413            (wrlen > SGE_MAX_WR_LEN))
2414                return 1;
2415        return 0;
2416}
2417
2418static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2419{
2420        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2421        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2422        struct aead_request *subreq = aead_request_ctx(req);
2423
2424        aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2425        aead_request_set_callback(subreq, req->base.flags,
2426                                  req->base.complete, req->base.data);
2427        aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2428                                 req->iv);
2429        aead_request_set_ad(subreq, req->assoclen);
2430        return op_type ? crypto_aead_decrypt(subreq) :
2431                crypto_aead_encrypt(subreq);
2432}
2433
2434static struct sk_buff *create_authenc_wr(struct aead_request *req,
2435                                         unsigned short qid,
2436                                         int size)
2437{
2438        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2439        struct chcr_context *ctx = a_ctx(tfm);
2440        struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2441        struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2442        struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2443        struct sk_buff *skb = NULL;
2444        struct chcr_wr *chcr_req;
2445        struct cpl_rx_phys_dsgl *phys_cpl;
2446        struct ulptx_sgl *ulptx;
2447        unsigned int transhdr_len;
2448        unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2449        unsigned int   kctx_len = 0, dnents, snents;
2450        unsigned int  authsize = crypto_aead_authsize(tfm);
2451        int error = -EINVAL;
2452        u8 *ivptr;
2453        int null = 0;
2454        gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2455                GFP_ATOMIC;
2456        struct adapter *adap = padap(ctx->dev);
2457        unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2458
2459        if (req->cryptlen == 0)
2460                return NULL;
2461
2462        reqctx->b0_len = 0;
2463        error = chcr_aead_common_init(req);
2464        if (error)
2465                return ERR_PTR(error);
2466
2467        if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2468                subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2469                null = 1;
2470        }
2471        dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
2472                (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
2473        dnents += MIN_AUTH_SG; // For IV
2474        snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2475                               CHCR_SRC_SG_SIZE, 0);
2476        dst_size = get_space_for_phys_dsgl(dnents);
2477        kctx_len = (KEY_CONTEXT_CTX_LEN_G(ntohl(aeadctx->key_ctx_hdr)) << 4)
2478                - sizeof(chcr_req->key_ctx);
2479        transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2480        reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
2481                        SGE_MAX_WR_LEN;
2482        temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
2483                        : (sgl_len(snents) * 8);
2484        transhdr_len += temp;
2485        transhdr_len = roundup(transhdr_len, 16);
2486
2487        if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2488                                    transhdr_len, reqctx->op)) {
2489                atomic_inc(&adap->chcr_stats.fallback);
2490                chcr_aead_common_exit(req);
2491                return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2492        }
2493        skb = alloc_skb(transhdr_len, flags);
2494        if (!skb) {
2495                error = -ENOMEM;
2496                goto err;
2497        }
2498
2499        chcr_req = __skb_put_zero(skb, transhdr_len);
2500
2501        temp  = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2502
2503        /*
2504         * Input order  is AAD,IV and Payload. where IV should be included as
2505         * the part of authdata. All other fields should be filled according
2506         * to the hardware spec
2507         */
2508        chcr_req->sec_cpl.op_ivinsrtofst =
2509                                FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2510        chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
2511        chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2512                                        null ? 0 : 1 + IV,
2513                                        null ? 0 : IV + req->assoclen,
2514                                        req->assoclen + IV + 1,
2515                                        (temp & 0x1F0) >> 4);
2516        chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2517                                        temp & 0xF,
2518                                        null ? 0 : req->assoclen + IV + 1,
2519                                        temp, temp);
2520        if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2521            subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2522                temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2523        else
2524                temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2525        chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
2526                                        (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
2527                                        temp,
2528                                        actx->auth_mode, aeadctx->hmac_ctrl,
2529                                        IV >> 1);
2530        chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2531                                         0, 0, dst_size);
2532
2533        chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2534        if (reqctx->op == CHCR_ENCRYPT_OP ||
2535                subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2536                subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2537                memcpy(chcr_req->key_ctx.key, aeadctx->key,
2538                       aeadctx->enckey_len);
2539        else
2540                memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2541                       aeadctx->enckey_len);
2542
2543        memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2544               actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
2545        phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2546        ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2547        ulptx = (struct ulptx_sgl *)(ivptr + IV);
2548        if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2549            subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2550                memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2551                memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
2552                                CTR_RFC3686_IV_SIZE);
2553                *(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
2554                        CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2555        } else {
2556                memcpy(ivptr, req->iv, IV);
2557        }
2558        chcr_add_aead_dst_ent(req, phys_cpl, qid);
2559        chcr_add_aead_src_ent(req, ulptx);
2560        atomic_inc(&adap->chcr_stats.cipher_rqst);
2561        temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2562                kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
2563        create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2564                   transhdr_len, temp, 0);
2565        reqctx->skb = skb;
2566
2567        return skb;
2568err:
2569        chcr_aead_common_exit(req);
2570
2571        return ERR_PTR(error);
2572}
2573
2574int chcr_aead_dma_map(struct device *dev,
2575                      struct aead_request *req,
2576                      unsigned short op_type)
2577{
2578        int error;
2579        struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2580        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2581        unsigned int authsize = crypto_aead_authsize(tfm);
2582        int src_len, dst_len;
2583
2584        /* calculate and handle src and dst sg length separately
2585         * for inplace and out-of place operations
2586         */
2587        if (req->src == req->dst) {
2588                src_len = req->assoclen + req->cryptlen + (op_type ?
2589                                                        0 : authsize);
2590                dst_len = src_len;
2591        } else {
2592                src_len = req->assoclen + req->cryptlen;
2593                dst_len = req->assoclen + req->cryptlen + (op_type ?
2594                                                        -authsize : authsize);
2595        }
2596
2597        if (!req->cryptlen || !src_len || !dst_len)
2598                return 0;
2599        reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
2600                                        DMA_BIDIRECTIONAL);
2601        if (dma_mapping_error(dev, reqctx->iv_dma))
2602                return -ENOMEM;
2603        if (reqctx->b0_len)
2604                reqctx->b0_dma = reqctx->iv_dma + IV;
2605        else
2606                reqctx->b0_dma = 0;
2607        if (req->src == req->dst) {
2608                error = dma_map_sg(dev, req->src,
2609                                sg_nents_for_len(req->src, src_len),
2610                                        DMA_BIDIRECTIONAL);
2611                if (!error)
2612                        goto err;
2613        } else {
2614                error = dma_map_sg(dev, req->src,
2615                                   sg_nents_for_len(req->src, src_len),
2616                                   DMA_TO_DEVICE);
2617                if (!error)
2618                        goto err;
2619                error = dma_map_sg(dev, req->dst,
2620                                   sg_nents_for_len(req->dst, dst_len),
2621                                   DMA_FROM_DEVICE);
2622                if (!error) {
2623                        dma_unmap_sg(dev, req->src,
2624                                     sg_nents_for_len(req->src, src_len),
2625                                     DMA_TO_DEVICE);
2626                        goto err;
2627                }
2628        }
2629
2630        return 0;
2631err:
2632        dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2633        return -ENOMEM;
2634}
2635
2636void chcr_aead_dma_unmap(struct device *dev,
2637                         struct aead_request *req,
2638                         unsigned short op_type)
2639{
2640        struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2641        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2642        unsigned int authsize = crypto_aead_authsize(tfm);
2643        int src_len, dst_len;
2644
2645        /* calculate and handle src and dst sg length separately
2646         * for inplace and out-of place operations
2647         */
2648        if (req->src == req->dst) {
2649                src_len = req->assoclen + req->cryptlen + (op_type ?
2650                                                        0 : authsize);
2651                dst_len = src_len;
2652        } else {
2653                src_len = req->assoclen + req->cryptlen;
2654                dst_len = req->assoclen + req->cryptlen + (op_type ?
2655                                                -authsize : authsize);
2656        }
2657
2658        if (!req->cryptlen || !src_len || !dst_len)
2659                return;
2660
2661        dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
2662                                        DMA_BIDIRECTIONAL);
2663        if (req->src == req->dst) {
2664                dma_unmap_sg(dev, req->src,
2665                             sg_nents_for_len(req->src, src_len),
2666                             DMA_BIDIRECTIONAL);
2667        } else {
2668                dma_unmap_sg(dev, req->src,
2669                             sg_nents_for_len(req->src, src_len),
2670                             DMA_TO_DEVICE);
2671                dma_unmap_sg(dev, req->dst,
2672                             sg_nents_for_len(req->dst, dst_len),
2673                             DMA_FROM_DEVICE);
2674        }
2675}
2676
2677void chcr_add_aead_src_ent(struct aead_request *req,
2678                           struct ulptx_sgl *ulptx)
2679{
2680        struct ulptx_walk ulp_walk;
2681        struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2682
2683        if (reqctx->imm) {
2684                u8 *buf = (u8 *)ulptx;
2685
2686                if (reqctx->b0_len) {
2687                        memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2688                        buf += reqctx->b0_len;
2689                }
2690                sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2691                                   buf, req->cryptlen + req->assoclen, 0);
2692        } else {
2693                ulptx_walk_init(&ulp_walk, ulptx);
2694                if (reqctx->b0_len)
2695                        ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2696                                            reqctx->b0_dma);
2697                ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
2698                                  req->assoclen,  0);
2699                ulptx_walk_end(&ulp_walk);
2700        }
2701}
2702
2703void chcr_add_aead_dst_ent(struct aead_request *req,
2704                           struct cpl_rx_phys_dsgl *phys_cpl,
2705                           unsigned short qid)
2706{
2707        struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2708        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2709        struct dsgl_walk dsgl_walk;
2710        unsigned int authsize = crypto_aead_authsize(tfm);
2711        struct chcr_context *ctx = a_ctx(tfm);
2712        u32 temp;
2713        unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2714
2715        dsgl_walk_init(&dsgl_walk, phys_cpl);
2716        dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
2717        temp = req->assoclen + req->cryptlen +
2718                (reqctx->op ? -authsize : authsize);
2719        dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
2720        dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2721}
2722
2723void chcr_add_cipher_src_ent(struct skcipher_request *req,
2724                             void *ulptx,
2725                             struct  cipher_wr_param *wrparam)
2726{
2727        struct ulptx_walk ulp_walk;
2728        struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2729        u8 *buf = ulptx;
2730
2731        memcpy(buf, reqctx->iv, IV);
2732        buf += IV;
2733        if (reqctx->imm) {
2734                sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2735                                   buf, wrparam->bytes, reqctx->processed);
2736        } else {
2737                ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
2738                ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2739                                  reqctx->src_ofst);
2740                reqctx->srcsg = ulp_walk.last_sg;
2741                reqctx->src_ofst = ulp_walk.last_sg_len;
2742                ulptx_walk_end(&ulp_walk);
2743        }
2744}
2745
2746void chcr_add_cipher_dst_ent(struct skcipher_request *req,
2747                             struct cpl_rx_phys_dsgl *phys_cpl,
2748                             struct  cipher_wr_param *wrparam,
2749                             unsigned short qid)
2750{
2751        struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2752        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
2753        struct chcr_context *ctx = c_ctx(tfm);
2754        struct dsgl_walk dsgl_walk;
2755        unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2756
2757        dsgl_walk_init(&dsgl_walk, phys_cpl);
2758        dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2759                         reqctx->dst_ofst);
2760        reqctx->dstsg = dsgl_walk.last_sg;
2761        reqctx->dst_ofst = dsgl_walk.last_sg_len;
2762        dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2763}
2764
2765void chcr_add_hash_src_ent(struct ahash_request *req,
2766                           struct ulptx_sgl *ulptx,
2767                           struct hash_wr_param *param)
2768{
2769        struct ulptx_walk ulp_walk;
2770        struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2771
2772        if (reqctx->hctx_wr.imm) {
2773                u8 *buf = (u8 *)ulptx;
2774
2775                if (param->bfr_len) {
2776                        memcpy(buf, reqctx->reqbfr, param->bfr_len);
2777                        buf += param->bfr_len;
2778                }
2779
2780                sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2781                                   sg_nents(reqctx->hctx_wr.srcsg), buf,
2782                                   param->sg_len, 0);
2783        } else {
2784                ulptx_walk_init(&ulp_walk, ulptx);
2785                if (param->bfr_len)
2786                        ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2787                                            reqctx->hctx_wr.dma_addr);
2788                ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2789                                  param->sg_len, reqctx->hctx_wr.src_ofst);
2790                reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2791                reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
2792                ulptx_walk_end(&ulp_walk);
2793        }
2794}
2795
2796int chcr_hash_dma_map(struct device *dev,
2797                      struct ahash_request *req)
2798{
2799        struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2800        int error = 0;
2801
2802        if (!req->nbytes)
2803                return 0;
2804        error = dma_map_sg(dev, req->src, sg_nents(req->src),
2805                           DMA_TO_DEVICE);
2806        if (!error)
2807                return -ENOMEM;
2808        req_ctx->hctx_wr.is_sg_map = 1;
2809        return 0;
2810}
2811
2812void chcr_hash_dma_unmap(struct device *dev,
2813                         struct ahash_request *req)
2814{
2815        struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2816
2817        if (!req->nbytes)
2818                return;
2819
2820        dma_unmap_sg(dev, req->src, sg_nents(req->src),
2821                           DMA_TO_DEVICE);
2822        req_ctx->hctx_wr.is_sg_map = 0;
2823
2824}
2825
2826int chcr_cipher_dma_map(struct device *dev,
2827                        struct skcipher_request *req)
2828{
2829        int error;
2830
2831        if (req->src == req->dst) {
2832                error = dma_map_sg(dev, req->src, sg_nents(req->src),
2833                                   DMA_BIDIRECTIONAL);
2834                if (!error)
2835                        goto err;
2836        } else {
2837                error = dma_map_sg(dev, req->src, sg_nents(req->src),
2838                                   DMA_TO_DEVICE);
2839                if (!error)
2840                        goto err;
2841                error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2842                                   DMA_FROM_DEVICE);
2843                if (!error) {
2844                        dma_unmap_sg(dev, req->src, sg_nents(req->src),
2845                                   DMA_TO_DEVICE);
2846                        goto err;
2847                }
2848        }
2849
2850        return 0;
2851err:
2852        return -ENOMEM;
2853}
2854
2855void chcr_cipher_dma_unmap(struct device *dev,
2856                           struct skcipher_request *req)
2857{
2858        if (req->src == req->dst) {
2859                dma_unmap_sg(dev, req->src, sg_nents(req->src),
2860                                   DMA_BIDIRECTIONAL);
2861        } else {
2862                dma_unmap_sg(dev, req->src, sg_nents(req->src),
2863                                   DMA_TO_DEVICE);
2864                dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2865                                   DMA_FROM_DEVICE);
2866        }
2867}
2868
2869static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2870{
2871        __be32 data;
2872
2873        memset(block, 0, csize);
2874        block += csize;
2875
2876        if (csize >= 4)
2877                csize = 4;
2878        else if (msglen > (unsigned int)(1 << (8 * csize)))
2879                return -EOVERFLOW;
2880
2881        data = cpu_to_be32(msglen);
2882        memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2883
2884        return 0;
2885}
2886
2887static int generate_b0(struct aead_request *req, u8 *ivptr,
2888                        unsigned short op_type)
2889{
2890        unsigned int l, lp, m;
2891        int rc;
2892        struct crypto_aead *aead = crypto_aead_reqtfm(req);
2893        struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2894        u8 *b0 = reqctx->scratch_pad;
2895
2896        m = crypto_aead_authsize(aead);
2897
2898        memcpy(b0, ivptr, 16);
2899
2900        lp = b0[0];
2901        l = lp + 1;
2902
2903        /* set m, bits 3-5 */
2904        *b0 |= (8 * ((m - 2) / 2));
2905
2906        /* set adata, bit 6, if associated data is used */
2907        if (req->assoclen)
2908                *b0 |= 64;
2909        rc = set_msg_len(b0 + 16 - l,
2910                         (op_type == CHCR_DECRYPT_OP) ?
2911                         req->cryptlen - m : req->cryptlen, l);
2912
2913        return rc;
2914}
2915
2916static inline int crypto_ccm_check_iv(const u8 *iv)
2917{
2918        /* 2 <= L <= 8, so 1 <= L' <= 7. */
2919        if (iv[0] < 1 || iv[0] > 7)
2920                return -EINVAL;
2921
2922        return 0;
2923}
2924
2925static int ccm_format_packet(struct aead_request *req,
2926                             u8 *ivptr,
2927                             unsigned int sub_type,
2928                             unsigned short op_type,
2929                             unsigned int assoclen)
2930{
2931        struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2932        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2933        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2934        int rc = 0;
2935
2936        if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2937                ivptr[0] = 3;
2938                memcpy(ivptr + 1, &aeadctx->salt[0], 3);
2939                memcpy(ivptr + 4, req->iv, 8);
2940                memset(ivptr + 12, 0, 4);
2941        } else {
2942                memcpy(ivptr, req->iv, 16);
2943        }
2944        if (assoclen)
2945                put_unaligned_be16(assoclen, &reqctx->scratch_pad[16]);
2946
2947        rc = generate_b0(req, ivptr, op_type);
2948        /* zero the ctr value */
2949        memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
2950        return rc;
2951}
2952
2953static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2954                                  unsigned int dst_size,
2955                                  struct aead_request *req,
2956                                  unsigned short op_type)
2957{
2958        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2959        struct chcr_context *ctx = a_ctx(tfm);
2960        struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2961        struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2962        unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2963        unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2964        unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2965        unsigned int ccm_xtra;
2966        unsigned int tag_offset = 0, auth_offset = 0;
2967        unsigned int assoclen;
2968
2969        if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2970                assoclen = req->assoclen - 8;
2971        else
2972                assoclen = req->assoclen;
2973        ccm_xtra = CCM_B0_SIZE +
2974                ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2975
2976        auth_offset = req->cryptlen ?
2977                (req->assoclen + IV + 1 + ccm_xtra) : 0;
2978        if (op_type == CHCR_DECRYPT_OP) {
2979                if (crypto_aead_authsize(tfm) != req->cryptlen)
2980                        tag_offset = crypto_aead_authsize(tfm);
2981                else
2982                        auth_offset = 0;
2983        }
2984
2985        sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2986        sec_cpl->pldlen =
2987                htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
2988        /* For CCM there wil be b0 always. So AAD start will be 1 always */
2989        sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2990                                1 + IV, IV + assoclen + ccm_xtra,
2991                                req->assoclen + IV + 1 + ccm_xtra, 0);
2992
2993        sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2994                                        auth_offset, tag_offset,
2995                                        (op_type == CHCR_ENCRYPT_OP) ? 0 :
2996                                        crypto_aead_authsize(tfm));
2997        sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2998                                        (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
2999                                        cipher_mode, mac_mode,
3000                                        aeadctx->hmac_ctrl, IV >> 1);
3001
3002        sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
3003                                        0, dst_size);
3004}
3005
3006static int aead_ccm_validate_input(unsigned short op_type,
3007                                   struct aead_request *req,
3008                                   struct chcr_aead_ctx *aeadctx,
3009                                   unsigned int sub_type)
3010{
3011        if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
3012                if (crypto_ccm_check_iv(req->iv)) {
3013                        pr_err("CCM: IV check fails\n");
3014                        return -EINVAL;
3015                }
3016        } else {
3017                if (req->assoclen != 16 && req->assoclen != 20) {
3018                        pr_err("RFC4309: Invalid AAD length %d\n",
3019                               req->assoclen);
3020                        return -EINVAL;
3021                }
3022        }
3023        return 0;
3024}
3025
3026static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
3027                                          unsigned short qid,
3028                                          int size)
3029{
3030        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3031        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3032        struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3033        struct sk_buff *skb = NULL;
3034        struct chcr_wr *chcr_req;
3035        struct cpl_rx_phys_dsgl *phys_cpl;
3036        struct ulptx_sgl *ulptx;
3037        unsigned int transhdr_len;
3038        unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
3039        unsigned int sub_type, assoclen = req->assoclen;
3040        unsigned int authsize = crypto_aead_authsize(tfm);
3041        int error = -EINVAL;
3042        u8 *ivptr;
3043        gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3044                GFP_ATOMIC;
3045        struct adapter *adap = padap(a_ctx(tfm)->dev);
3046
3047        sub_type = get_aead_subtype(tfm);
3048        if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
3049                assoclen -= 8;
3050        reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
3051        error = chcr_aead_common_init(req);
3052        if (error)
3053                return ERR_PTR(error);
3054
3055        error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
3056        if (error)
3057                goto err;
3058        dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
3059                        + (reqctx->op ? -authsize : authsize),
3060                        CHCR_DST_SG_SIZE, 0);
3061        dnents += MIN_CCM_SG; // For IV and B0
3062        dst_size = get_space_for_phys_dsgl(dnents);
3063        snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3064                               CHCR_SRC_SG_SIZE, 0);
3065        snents += MIN_CCM_SG; //For B0
3066        kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
3067        transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3068        reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
3069                       reqctx->b0_len) <= SGE_MAX_WR_LEN;
3070        temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
3071                                     reqctx->b0_len, 16) :
3072                (sgl_len(snents) *  8);
3073        transhdr_len += temp;
3074        transhdr_len = roundup(transhdr_len, 16);
3075
3076        if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
3077                                reqctx->b0_len, transhdr_len, reqctx->op)) {
3078                atomic_inc(&adap->chcr_stats.fallback);
3079                chcr_aead_common_exit(req);
3080                return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3081        }
3082        skb = alloc_skb(transhdr_len,  flags);
3083
3084        if (!skb) {
3085                error = -ENOMEM;
3086                goto err;
3087        }
3088
3089        chcr_req = __skb_put_zero(skb, transhdr_len);
3090
3091        fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
3092
3093        chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3094        memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3095        memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3096                        aeadctx->key, aeadctx->enckey_len);
3097
3098        phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3099        ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3100        ulptx = (struct ulptx_sgl *)(ivptr + IV);
3101        error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
3102        if (error)
3103                goto dstmap_fail;
3104        chcr_add_aead_dst_ent(req, phys_cpl, qid);
3105        chcr_add_aead_src_ent(req, ulptx);
3106
3107        atomic_inc(&adap->chcr_stats.aead_rqst);
3108        temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3109                kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
3110                reqctx->b0_len) : 0);
3111        create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
3112                    transhdr_len, temp, 0);
3113        reqctx->skb = skb;
3114
3115        return skb;
3116dstmap_fail:
3117        kfree_skb(skb);
3118err:
3119        chcr_aead_common_exit(req);
3120        return ERR_PTR(error);
3121}
3122
3123static struct sk_buff *create_gcm_wr(struct aead_request *req,
3124                                     unsigned short qid,
3125                                     int size)
3126{
3127        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3128        struct chcr_context *ctx = a_ctx(tfm);
3129        struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3130        struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
3131        struct sk_buff *skb = NULL;
3132        struct chcr_wr *chcr_req;
3133        struct cpl_rx_phys_dsgl *phys_cpl;
3134        struct ulptx_sgl *ulptx;
3135        unsigned int transhdr_len, dnents = 0, snents;
3136        unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
3137        unsigned int authsize = crypto_aead_authsize(tfm);
3138        int error = -EINVAL;
3139        u8 *ivptr;
3140        gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3141                GFP_ATOMIC;
3142        struct adapter *adap = padap(ctx->dev);
3143        unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
3144
3145        if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
3146                assoclen = req->assoclen - 8;
3147
3148        reqctx->b0_len = 0;
3149        error = chcr_aead_common_init(req);
3150        if (error)
3151                return ERR_PTR(error);
3152        dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
3153                                (reqctx->op ? -authsize : authsize),
3154                                CHCR_DST_SG_SIZE, 0);
3155        snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3156                               CHCR_SRC_SG_SIZE, 0);
3157        dnents += MIN_GCM_SG; // For IV
3158        dst_size = get_space_for_phys_dsgl(dnents);
3159        kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
3160        transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3161        reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
3162                        SGE_MAX_WR_LEN;
3163        temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
3164                (sgl_len(snents) * 8);
3165        transhdr_len += temp;
3166        transhdr_len = roundup(transhdr_len, 16);
3167        if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
3168                            transhdr_len, reqctx->op)) {
3169
3170                atomic_inc(&adap->chcr_stats.fallback);
3171                chcr_aead_common_exit(req);
3172                return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3173        }
3174        skb = alloc_skb(transhdr_len, flags);
3175        if (!skb) {
3176                error = -ENOMEM;
3177                goto err;
3178        }
3179
3180        chcr_req = __skb_put_zero(skb, transhdr_len);
3181
3182        //Offset of tag from end
3183        temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
3184        chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
3185                                                rx_channel_id, 2, 1);
3186        chcr_req->sec_cpl.pldlen =
3187                htonl(req->assoclen + IV + req->cryptlen);
3188        chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
3189                                        assoclen ? 1 + IV : 0,
3190                                        assoclen ? IV + assoclen : 0,
3191                                        req->assoclen + IV + 1, 0);
3192        chcr_req->sec_cpl.cipherstop_lo_authinsert =
3193                        FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
3194                                                temp, temp);
3195        chcr_req->sec_cpl.seqno_numivs =
3196                        FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
3197                                        CHCR_ENCRYPT_OP) ? 1 : 0,
3198                                        CHCR_SCMD_CIPHER_MODE_AES_GCM,
3199                                        CHCR_SCMD_AUTH_MODE_GHASH,
3200                                        aeadctx->hmac_ctrl, IV >> 1);
3201        chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3202                                        0, 0, dst_size);
3203        chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3204        memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3205        memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3206               GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
3207
3208        phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3209        ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3210        /* prepare a 16 byte iv */
3211        /* S   A   L  T |  IV | 0x00000001 */
3212        if (get_aead_subtype(tfm) ==
3213            CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
3214                memcpy(ivptr, aeadctx->salt, 4);
3215                memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
3216        } else {
3217                memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
3218        }
3219        put_unaligned_be32(0x01, &ivptr[12]);
3220        ulptx = (struct ulptx_sgl *)(ivptr + 16);
3221
3222        chcr_add_aead_dst_ent(req, phys_cpl, qid);
3223        chcr_add_aead_src_ent(req, ulptx);
3224        atomic_inc(&adap->chcr_stats.aead_rqst);
3225        temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3226                kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
3227        create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3228                    transhdr_len, temp, reqctx->verify);
3229        reqctx->skb = skb;
3230        return skb;
3231
3232err:
3233        chcr_aead_common_exit(req);
3234        return ERR_PTR(error);
3235}
3236
3237
3238
3239static int chcr_aead_cra_init(struct crypto_aead *tfm)
3240{
3241        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3242        struct aead_alg *alg = crypto_aead_alg(tfm);
3243
3244        aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
3245                                               CRYPTO_ALG_NEED_FALLBACK |
3246                                               CRYPTO_ALG_ASYNC);
3247        if  (IS_ERR(aeadctx->sw_cipher))
3248                return PTR_ERR(aeadctx->sw_cipher);
3249        crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
3250                                 sizeof(struct aead_request) +
3251                                 crypto_aead_reqsize(aeadctx->sw_cipher)));
3252        return chcr_device_init(a_ctx(tfm));
3253}
3254
3255static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3256{
3257        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3258
3259        crypto_free_aead(aeadctx->sw_cipher);
3260}
3261
3262static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3263                                        unsigned int authsize)
3264{
3265        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3266
3267        aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3268        aeadctx->mayverify = VERIFY_HW;
3269        return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3270}
3271static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3272                                    unsigned int authsize)
3273{
3274        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3275        u32 maxauth = crypto_aead_maxauthsize(tfm);
3276
3277        /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3278         * true for sha1. authsize == 12 condition should be before
3279         * authsize == (maxauth >> 1)
3280         */
3281        if (authsize == ICV_4) {
3282                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3283                aeadctx->mayverify = VERIFY_HW;
3284        } else if (authsize == ICV_6) {
3285                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3286                aeadctx->mayverify = VERIFY_HW;
3287        } else if (authsize == ICV_10) {
3288                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3289                aeadctx->mayverify = VERIFY_HW;
3290        } else if (authsize == ICV_12) {
3291                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3292                aeadctx->mayverify = VERIFY_HW;
3293        } else if (authsize == ICV_14) {
3294                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3295                aeadctx->mayverify = VERIFY_HW;
3296        } else if (authsize == (maxauth >> 1)) {
3297                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3298                aeadctx->mayverify = VERIFY_HW;
3299        } else if (authsize == maxauth) {
3300                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3301                aeadctx->mayverify = VERIFY_HW;
3302        } else {
3303                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3304                aeadctx->mayverify = VERIFY_SW;
3305        }
3306        return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3307}
3308
3309
3310static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3311{
3312        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3313
3314        switch (authsize) {
3315        case ICV_4:
3316                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3317                aeadctx->mayverify = VERIFY_HW;
3318                break;
3319        case ICV_8:
3320                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3321                aeadctx->mayverify = VERIFY_HW;
3322                break;
3323        case ICV_12:
3324                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3325                aeadctx->mayverify = VERIFY_HW;
3326                break;
3327        case ICV_14:
3328                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3329                aeadctx->mayverify = VERIFY_HW;
3330                break;
3331        case ICV_16:
3332                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3333                aeadctx->mayverify = VERIFY_HW;
3334                break;
3335        case ICV_13:
3336        case ICV_15:
3337                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3338                aeadctx->mayverify = VERIFY_SW;
3339                break;
3340        default:
3341                return -EINVAL;
3342        }
3343        return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3344}
3345
3346static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3347                                          unsigned int authsize)
3348{
3349        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3350
3351        switch (authsize) {
3352        case ICV_8:
3353                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3354                aeadctx->mayverify = VERIFY_HW;
3355                break;
3356        case ICV_12:
3357                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3358                aeadctx->mayverify = VERIFY_HW;
3359                break;
3360        case ICV_16:
3361                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3362                aeadctx->mayverify = VERIFY_HW;
3363                break;
3364        default:
3365                return -EINVAL;
3366        }
3367        return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3368}
3369
3370static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3371                                unsigned int authsize)
3372{
3373        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3374
3375        switch (authsize) {
3376        case ICV_4:
3377                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3378                aeadctx->mayverify = VERIFY_HW;
3379                break;
3380        case ICV_6:
3381                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3382                aeadctx->mayverify = VERIFY_HW;
3383                break;
3384        case ICV_8:
3385                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3386                aeadctx->mayverify = VERIFY_HW;
3387                break;
3388        case ICV_10:
3389                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3390                aeadctx->mayverify = VERIFY_HW;
3391                break;
3392        case ICV_12:
3393                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3394                aeadctx->mayverify = VERIFY_HW;
3395                break;
3396        case ICV_14:
3397                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3398                aeadctx->mayverify = VERIFY_HW;
3399                break;
3400        case ICV_16:
3401                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3402                aeadctx->mayverify = VERIFY_HW;
3403                break;
3404        default:
3405                return -EINVAL;
3406        }
3407        return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3408}
3409
3410static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3411                                const u8 *key,
3412                                unsigned int keylen)
3413{
3414        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3415        unsigned char ck_size, mk_size;
3416        int key_ctx_size = 0;
3417
3418        key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
3419        if (keylen == AES_KEYSIZE_128) {
3420                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3421                mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
3422        } else if (keylen == AES_KEYSIZE_192) {
3423                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3424                mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3425        } else if (keylen == AES_KEYSIZE_256) {
3426                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3427                mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3428        } else {
3429                aeadctx->enckey_len = 0;
3430                return  -EINVAL;
3431        }
3432        aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3433                                                key_ctx_size >> 4);
3434        memcpy(aeadctx->key, key, keylen);
3435        aeadctx->enckey_len = keylen;
3436
3437        return 0;
3438}
3439
3440static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3441                                const u8 *key,
3442                                unsigned int keylen)
3443{
3444        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3445        int error;
3446
3447        crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3448        crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3449                              CRYPTO_TFM_REQ_MASK);
3450        error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3451        if (error)
3452                return error;
3453        return chcr_ccm_common_setkey(aead, key, keylen);
3454}
3455
3456static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3457                                    unsigned int keylen)
3458{
3459        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3460        int error;
3461
3462        if (keylen < 3) {
3463                aeadctx->enckey_len = 0;
3464                return  -EINVAL;
3465        }
3466        crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3467        crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3468                              CRYPTO_TFM_REQ_MASK);
3469        error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3470        if (error)
3471                return error;
3472        keylen -= 3;
3473        memcpy(aeadctx->salt, key + keylen, 3);
3474        return chcr_ccm_common_setkey(aead, key, keylen);
3475}
3476
3477static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3478                           unsigned int keylen)
3479{
3480        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3481        struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
3482        unsigned int ck_size;
3483        int ret = 0, key_ctx_size = 0;
3484        struct crypto_aes_ctx aes;
3485
3486        aeadctx->enckey_len = 0;
3487        crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3488        crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3489                              & CRYPTO_TFM_REQ_MASK);
3490        ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3491        if (ret)
3492                goto out;
3493
3494        if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3495            keylen > 3) {
3496                keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
3497                memcpy(aeadctx->salt, key + keylen, 4);
3498        }
3499        if (keylen == AES_KEYSIZE_128) {
3500                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3501        } else if (keylen == AES_KEYSIZE_192) {
3502                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3503        } else if (keylen == AES_KEYSIZE_256) {
3504                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3505        } else {
3506                pr_err("GCM: Invalid key length %d\n", keylen);
3507                ret = -EINVAL;
3508                goto out;
3509        }
3510
3511        memcpy(aeadctx->key, key, keylen);
3512        aeadctx->enckey_len = keylen;
3513        key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
3514                AEAD_H_SIZE;
3515        aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3516                                                CHCR_KEYCTX_MAC_KEY_SIZE_128,
3517                                                0, 0,
3518                                                key_ctx_size >> 4);
3519        /* Calculate the H = CIPH(K, 0 repeated 16 times).
3520         * It will go in key context
3521         */
3522        ret = aes_expandkey(&aes, key, keylen);
3523        if (ret) {
3524                aeadctx->enckey_len = 0;
3525                goto out;
3526        }
3527        memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3528        aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h);
3529        memzero_explicit(&aes, sizeof(aes));
3530
3531out:
3532        return ret;
3533}
3534
3535static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3536                                   unsigned int keylen)
3537{
3538        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3539        struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3540        /* it contains auth and cipher key both*/
3541        struct crypto_authenc_keys keys;
3542        unsigned int bs, subtype;
3543        unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3544        int err = 0, i, key_ctx_len = 0;
3545        unsigned char ck_size = 0;
3546        unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
3547        struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
3548        struct algo_param param;
3549        int align;
3550        u8 *o_ptr = NULL;
3551
3552        crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3553        crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3554                              & CRYPTO_TFM_REQ_MASK);
3555        err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3556        if (err)
3557                goto out;
3558
3559        if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3560                goto out;
3561
3562        if (get_alg_config(&param, max_authsize)) {
3563                pr_err("Unsupported digest size\n");
3564                goto out;
3565        }
3566        subtype = get_aead_subtype(authenc);
3567        if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3568                subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3569                if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3570                        goto out;
3571                memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3572                - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3573                keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3574        }
3575        if (keys.enckeylen == AES_KEYSIZE_128) {
3576                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3577        } else if (keys.enckeylen == AES_KEYSIZE_192) {
3578                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3579        } else if (keys.enckeylen == AES_KEYSIZE_256) {
3580                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3581        } else {
3582                pr_err("Unsupported cipher key\n");
3583                goto out;
3584        }
3585
3586        /* Copy only encryption key. We use authkey to generate h(ipad) and
3587         * h(opad) so authkey is not needed again. authkeylen size have the
3588         * size of the hash digest size.
3589         */
3590        memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3591        aeadctx->enckey_len = keys.enckeylen;
3592        if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3593                subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3594
3595                get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3596                            aeadctx->enckey_len << 3);
3597        }
3598        base_hash  = chcr_alloc_shash(max_authsize);
3599        if (IS_ERR(base_hash)) {
3600                pr_err("Base driver cannot be loaded\n");
3601                goto out;
3602        }
3603        {
3604                SHASH_DESC_ON_STACK(shash, base_hash);
3605
3606                shash->tfm = base_hash;
3607                bs = crypto_shash_blocksize(base_hash);
3608                align = KEYCTX_ALIGN_PAD(max_authsize);
3609                o_ptr =  actx->h_iopad + param.result_size + align;
3610
3611                if (keys.authkeylen > bs) {
3612                        err = crypto_shash_digest(shash, keys.authkey,
3613                                                  keys.authkeylen,
3614                                                  o_ptr);
3615                        if (err) {
3616                                pr_err("Base driver cannot be loaded\n");
3617                                goto out;
3618                        }
3619                        keys.authkeylen = max_authsize;
3620                } else
3621                        memcpy(o_ptr, keys.authkey, keys.authkeylen);
3622
3623                /* Compute the ipad-digest*/
3624                memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3625                memcpy(pad, o_ptr, keys.authkeylen);
3626                for (i = 0; i < bs >> 2; i++)
3627                        *((unsigned int *)pad + i) ^= IPAD_DATA;
3628
3629                if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3630                                              max_authsize))
3631                        goto out;
3632                /* Compute the opad-digest */
3633                memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3634                memcpy(pad, o_ptr, keys.authkeylen);
3635                for (i = 0; i < bs >> 2; i++)
3636                        *((unsigned int *)pad + i) ^= OPAD_DATA;
3637
3638                if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3639                        goto out;
3640
3641                /* convert the ipad and opad digest to network order */
3642                chcr_change_order(actx->h_iopad, param.result_size);
3643                chcr_change_order(o_ptr, param.result_size);
3644                key_ctx_len = sizeof(struct _key_ctx) +
3645                        roundup(keys.enckeylen, 16) +
3646                        (param.result_size + align) * 2;
3647                aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3648                                                0, 1, key_ctx_len >> 4);
3649                actx->auth_mode = param.auth_mode;
3650                chcr_free_shash(base_hash);
3651
3652                memzero_explicit(&keys, sizeof(keys));
3653                return 0;
3654        }
3655out:
3656        aeadctx->enckey_len = 0;
3657        memzero_explicit(&keys, sizeof(keys));
3658        if (!IS_ERR(base_hash))
3659                chcr_free_shash(base_hash);
3660        return -EINVAL;
3661}
3662
3663static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3664                                        const u8 *key, unsigned int keylen)
3665{
3666        struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3667        struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3668        struct crypto_authenc_keys keys;
3669        int err;
3670        /* it contains auth and cipher key both*/
3671        unsigned int subtype;
3672        int key_ctx_len = 0;
3673        unsigned char ck_size = 0;
3674
3675        crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3676        crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3677                              & CRYPTO_TFM_REQ_MASK);
3678        err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3679        if (err)
3680                goto out;
3681
3682        if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3683                goto out;
3684
3685        subtype = get_aead_subtype(authenc);
3686        if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3687            subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3688                if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3689                        goto out;
3690                memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3691                        - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3692                keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3693        }
3694        if (keys.enckeylen == AES_KEYSIZE_128) {
3695                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3696        } else if (keys.enckeylen == AES_KEYSIZE_192) {
3697                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3698        } else if (keys.enckeylen == AES_KEYSIZE_256) {
3699                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3700        } else {
3701                pr_err("Unsupported cipher key %d\n", keys.enckeylen);
3702                goto out;
3703        }
3704        memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3705        aeadctx->enckey_len = keys.enckeylen;
3706        if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3707            subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3708                get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3709                                aeadctx->enckey_len << 3);
3710        }
3711        key_ctx_len =  sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
3712
3713        aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3714                                                0, key_ctx_len >> 4);
3715        actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3716        memzero_explicit(&keys, sizeof(keys));
3717        return 0;
3718out:
3719        aeadctx->enckey_len = 0;
3720        memzero_explicit(&keys, sizeof(keys));
3721        return -EINVAL;
3722}
3723
3724static int chcr_aead_op(struct aead_request *req,
3725                        int size,
3726                        create_wr_t create_wr_fn)
3727{
3728        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3729        struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
3730        struct chcr_context *ctx = a_ctx(tfm);
3731        struct uld_ctx *u_ctx = ULD_CTX(ctx);
3732        struct sk_buff *skb;
3733        struct chcr_dev *cdev;
3734
3735        cdev = a_ctx(tfm)->dev;
3736        if (!cdev) {
3737                pr_err("%s : No crypto device.\n", __func__);
3738                return -ENXIO;
3739        }
3740
3741        if (chcr_inc_wrcount(cdev)) {
3742        /* Detach state for CHCR means lldi or padap is freed.
3743         * We cannot increment fallback here.
3744         */
3745                return chcr_aead_fallback(req, reqctx->op);
3746        }
3747
3748        if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3749                                        reqctx->txqidx) &&
3750                (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) {
3751                        chcr_dec_wrcount(cdev);
3752                        return -ENOSPC;
3753        }
3754
3755        if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3756            crypto_ipsec_check_assoclen(req->assoclen) != 0) {
3757                pr_err("RFC4106: Invalid value of assoclen %d\n",
3758                       req->assoclen);
3759                return -EINVAL;
3760        }
3761
3762        /* Form a WR from req */
3763        skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size);
3764
3765        if (IS_ERR_OR_NULL(skb)) {
3766                chcr_dec_wrcount(cdev);
3767                return PTR_ERR_OR_ZERO(skb);
3768        }
3769
3770        skb->dev = u_ctx->lldi.ports[0];
3771        set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
3772        chcr_send_wr(skb);
3773        return -EINPROGRESS;
3774}
3775
3776static int chcr_aead_encrypt(struct aead_request *req)
3777{
3778        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3779        struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3780        struct chcr_context *ctx = a_ctx(tfm);
3781        unsigned int cpu;
3782
3783        cpu = get_cpu();
3784        reqctx->txqidx = cpu % ctx->ntxq;
3785        reqctx->rxqidx = cpu % ctx->nrxq;
3786        put_cpu();
3787
3788        reqctx->verify = VERIFY_HW;
3789        reqctx->op = CHCR_ENCRYPT_OP;
3790
3791        switch (get_aead_subtype(tfm)) {
3792        case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3793        case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3794        case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3795        case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3796                return chcr_aead_op(req, 0, create_authenc_wr);
3797        case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3798        case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3799                return chcr_aead_op(req, 0, create_aead_ccm_wr);
3800        default:
3801                return chcr_aead_op(req, 0, create_gcm_wr);
3802        }
3803}
3804
3805static int chcr_aead_decrypt(struct aead_request *req)
3806{
3807        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3808        struct chcr_context *ctx = a_ctx(tfm);
3809        struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3810        struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3811        int size;
3812        unsigned int cpu;
3813
3814        cpu = get_cpu();
3815        reqctx->txqidx = cpu % ctx->ntxq;
3816        reqctx->rxqidx = cpu % ctx->nrxq;
3817        put_cpu();
3818
3819        if (aeadctx->mayverify == VERIFY_SW) {
3820                size = crypto_aead_maxauthsize(tfm);
3821                reqctx->verify = VERIFY_SW;
3822        } else {
3823                size = 0;
3824                reqctx->verify = VERIFY_HW;
3825        }
3826        reqctx->op = CHCR_DECRYPT_OP;
3827        switch (get_aead_subtype(tfm)) {
3828        case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3829        case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3830        case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3831        case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3832                return chcr_aead_op(req, size, create_authenc_wr);
3833        case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3834        case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3835                return chcr_aead_op(req, size, create_aead_ccm_wr);
3836        default:
3837                return chcr_aead_op(req, size, create_gcm_wr);
3838        }
3839}
3840
3841static struct chcr_alg_template driver_algs[] = {
3842        /* AES-CBC */
3843        {
3844                .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3845                .is_registered = 0,
3846                .alg.skcipher = {
3847                        .base.cra_name          = "cbc(aes)",
3848                        .base.cra_driver_name   = "cbc-aes-chcr",
3849                        .base.cra_blocksize     = AES_BLOCK_SIZE,
3850
3851                        .init                   = chcr_init_tfm,
3852                        .exit                   = chcr_exit_tfm,
3853                        .min_keysize            = AES_MIN_KEY_SIZE,
3854                        .max_keysize            = AES_MAX_KEY_SIZE,
3855                        .ivsize                 = AES_BLOCK_SIZE,
3856                        .setkey                 = chcr_aes_cbc_setkey,
3857                        .encrypt                = chcr_aes_encrypt,
3858                        .decrypt                = chcr_aes_decrypt,
3859                        }
3860        },
3861        {
3862                .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3863                .is_registered = 0,
3864                .alg.skcipher = {
3865                        .base.cra_name          = "xts(aes)",
3866                        .base.cra_driver_name   = "xts-aes-chcr",
3867                        .base.cra_blocksize     = AES_BLOCK_SIZE,
3868
3869                        .init                   = chcr_init_tfm,
3870                        .exit                   = chcr_exit_tfm,
3871                        .min_keysize            = 2 * AES_MIN_KEY_SIZE,
3872                        .max_keysize            = 2 * AES_MAX_KEY_SIZE,
3873                        .ivsize                 = AES_BLOCK_SIZE,
3874                        .setkey                 = chcr_aes_xts_setkey,
3875                        .encrypt                = chcr_aes_encrypt,
3876                        .decrypt                = chcr_aes_decrypt,
3877                        }
3878        },
3879        {
3880                .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3881                .is_registered = 0,
3882                .alg.skcipher = {
3883                        .base.cra_name          = "ctr(aes)",
3884                        .base.cra_driver_name   = "ctr-aes-chcr",
3885                        .base.cra_blocksize     = 1,
3886
3887                        .init                   = chcr_init_tfm,
3888                        .exit                   = chcr_exit_tfm,
3889                        .min_keysize            = AES_MIN_KEY_SIZE,
3890                        .max_keysize            = AES_MAX_KEY_SIZE,
3891                        .ivsize                 = AES_BLOCK_SIZE,
3892                        .setkey                 = chcr_aes_ctr_setkey,
3893                        .encrypt                = chcr_aes_encrypt,
3894                        .decrypt                = chcr_aes_decrypt,
3895                }
3896        },
3897        {
3898                .type = CRYPTO_ALG_TYPE_SKCIPHER |
3899                        CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3900                .is_registered = 0,
3901                .alg.skcipher = {
3902                        .base.cra_name          = "rfc3686(ctr(aes))",
3903                        .base.cra_driver_name   = "rfc3686-ctr-aes-chcr",
3904                        .base.cra_blocksize     = 1,
3905
3906                        .init                   = chcr_rfc3686_init,
3907                        .exit                   = chcr_exit_tfm,
3908                        .min_keysize            = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3909                        .max_keysize            = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3910                        .ivsize                 = CTR_RFC3686_IV_SIZE,
3911                        .setkey                 = chcr_aes_rfc3686_setkey,
3912                        .encrypt                = chcr_aes_encrypt,
3913                        .decrypt                = chcr_aes_decrypt,
3914                }
3915        },
3916        /* SHA */
3917        {
3918                .type = CRYPTO_ALG_TYPE_AHASH,
3919                .is_registered = 0,
3920                .alg.hash = {
3921                        .halg.digestsize = SHA1_DIGEST_SIZE,
3922                        .halg.base = {
3923                                .cra_name = "sha1",
3924                                .cra_driver_name = "sha1-chcr",
3925                                .cra_blocksize = SHA1_BLOCK_SIZE,
3926                        }
3927                }
3928        },
3929        {
3930                .type = CRYPTO_ALG_TYPE_AHASH,
3931                .is_registered = 0,
3932                .alg.hash = {
3933                        .halg.digestsize = SHA256_DIGEST_SIZE,
3934                        .halg.base = {
3935                                .cra_name = "sha256",
3936                                .cra_driver_name = "sha256-chcr",
3937                                .cra_blocksize = SHA256_BLOCK_SIZE,
3938                        }
3939                }
3940        },
3941        {
3942                .type = CRYPTO_ALG_TYPE_AHASH,
3943                .is_registered = 0,
3944                .alg.hash = {
3945                        .halg.digestsize = SHA224_DIGEST_SIZE,
3946                        .halg.base = {
3947                                .cra_name = "sha224",
3948                                .cra_driver_name = "sha224-chcr",
3949                                .cra_blocksize = SHA224_BLOCK_SIZE,
3950                        }
3951                }
3952        },
3953        {
3954                .type = CRYPTO_ALG_TYPE_AHASH,
3955                .is_registered = 0,
3956                .alg.hash = {
3957                        .halg.digestsize = SHA384_DIGEST_SIZE,
3958                        .halg.base = {
3959                                .cra_name = "sha384",
3960                                .cra_driver_name = "sha384-chcr",
3961                                .cra_blocksize = SHA384_BLOCK_SIZE,
3962                        }
3963                }
3964        },
3965        {
3966                .type = CRYPTO_ALG_TYPE_AHASH,
3967                .is_registered = 0,
3968                .alg.hash = {
3969                        .halg.digestsize = SHA512_DIGEST_SIZE,
3970                        .halg.base = {
3971                                .cra_name = "sha512",
3972                                .cra_driver_name = "sha512-chcr",
3973                                .cra_blocksize = SHA512_BLOCK_SIZE,
3974                        }
3975                }
3976        },
3977        /* HMAC */
3978        {
3979                .type = CRYPTO_ALG_TYPE_HMAC,
3980                .is_registered = 0,
3981                .alg.hash = {
3982                        .halg.digestsize = SHA1_DIGEST_SIZE,
3983                        .halg.base = {
3984                                .cra_name = "hmac(sha1)",
3985                                .cra_driver_name = "hmac-sha1-chcr",
3986                                .cra_blocksize = SHA1_BLOCK_SIZE,
3987                        }
3988                }
3989        },
3990        {
3991                .type = CRYPTO_ALG_TYPE_HMAC,
3992                .is_registered = 0,
3993                .alg.hash = {
3994                        .halg.digestsize = SHA224_DIGEST_SIZE,
3995                        .halg.base = {
3996                                .cra_name = "hmac(sha224)",
3997                                .cra_driver_name = "hmac-sha224-chcr",
3998                                .cra_blocksize = SHA224_BLOCK_SIZE,
3999                        }
4000                }
4001        },
4002        {
4003                .type = CRYPTO_ALG_TYPE_HMAC,
4004                .is_registered = 0,
4005                .alg.hash = {
4006                        .halg.digestsize = SHA256_DIGEST_SIZE,
4007                        .halg.base = {
4008                                .cra_name = "hmac(sha256)",
4009                                .cra_driver_name = "hmac-sha256-chcr",
4010                                .cra_blocksize = SHA256_BLOCK_SIZE,
4011                        }
4012                }
4013        },
4014        {
4015                .type = CRYPTO_ALG_TYPE_HMAC,
4016                .is_registered = 0,
4017                .alg.hash = {
4018                        .halg.digestsize = SHA384_DIGEST_SIZE,
4019                        .halg.base = {
4020                                .cra_name = "hmac(sha384)",
4021                                .cra_driver_name = "hmac-sha384-chcr",
4022                                .cra_blocksize = SHA384_BLOCK_SIZE,
4023                        }
4024                }
4025        },
4026        {
4027                .type = CRYPTO_ALG_TYPE_HMAC,
4028                .is_registered = 0,
4029                .alg.hash = {
4030                        .halg.digestsize = SHA512_DIGEST_SIZE,
4031                        .halg.base = {
4032                                .cra_name = "hmac(sha512)",
4033                                .cra_driver_name = "hmac-sha512-chcr",
4034                                .cra_blocksize = SHA512_BLOCK_SIZE,
4035                        }
4036                }
4037        },
4038        /* Add AEAD Algorithms */
4039        {
4040                .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
4041                .is_registered = 0,
4042                .alg.aead = {
4043                        .base = {
4044                                .cra_name = "gcm(aes)",
4045                                .cra_driver_name = "gcm-aes-chcr",
4046                                .cra_blocksize  = 1,
4047                                .cra_priority = CHCR_AEAD_PRIORITY,
4048                                .cra_ctxsize =  sizeof(struct chcr_context) +
4049                                                sizeof(struct chcr_aead_ctx) +
4050                                                sizeof(struct chcr_gcm_ctx),
4051                        },
4052                        .ivsize = GCM_AES_IV_SIZE,
4053                        .maxauthsize = GHASH_DIGEST_SIZE,
4054                        .setkey = chcr_gcm_setkey,
4055                        .setauthsize = chcr_gcm_setauthsize,
4056                }
4057        },
4058        {
4059                .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
4060                .is_registered = 0,
4061                .alg.aead = {
4062                        .base = {
4063                                .cra_name = "rfc4106(gcm(aes))",
4064                                .cra_driver_name = "rfc4106-gcm-aes-chcr",
4065                                .cra_blocksize   = 1,
4066                                .cra_priority = CHCR_AEAD_PRIORITY + 1,
4067                                .cra_ctxsize =  sizeof(struct chcr_context) +
4068                                                sizeof(struct chcr_aead_ctx) +
4069                                                sizeof(struct chcr_gcm_ctx),
4070
4071                        },
4072                        .ivsize = GCM_RFC4106_IV_SIZE,
4073                        .maxauthsize    = GHASH_DIGEST_SIZE,
4074                        .setkey = chcr_gcm_setkey,
4075                        .setauthsize    = chcr_4106_4309_setauthsize,
4076                }
4077        },
4078        {
4079                .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
4080                .is_registered = 0,
4081                .alg.aead = {
4082                        .base = {
4083                                .cra_name = "ccm(aes)",
4084                                .cra_driver_name = "ccm-aes-chcr",
4085                                .cra_blocksize   = 1,
4086                                .cra_priority = CHCR_AEAD_PRIORITY,
4087                                .cra_ctxsize =  sizeof(struct chcr_context) +
4088                                                sizeof(struct chcr_aead_ctx),
4089
4090                        },
4091                        .ivsize = AES_BLOCK_SIZE,
4092                        .maxauthsize    = GHASH_DIGEST_SIZE,
4093                        .setkey = chcr_aead_ccm_setkey,
4094                        .setauthsize    = chcr_ccm_setauthsize,
4095                }
4096        },
4097        {
4098                .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
4099                .is_registered = 0,
4100                .alg.aead = {
4101                        .base = {
4102                                .cra_name = "rfc4309(ccm(aes))",
4103                                .cra_driver_name = "rfc4309-ccm-aes-chcr",
4104                                .cra_blocksize   = 1,
4105                                .cra_priority = CHCR_AEAD_PRIORITY + 1,
4106                                .cra_ctxsize =  sizeof(struct chcr_context) +
4107                                                sizeof(struct chcr_aead_ctx),
4108
4109                        },
4110                        .ivsize = 8,
4111                        .maxauthsize    = GHASH_DIGEST_SIZE,
4112                        .setkey = chcr_aead_rfc4309_setkey,
4113                        .setauthsize = chcr_4106_4309_setauthsize,
4114                }
4115        },
4116        {
4117                .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4118                .is_registered = 0,
4119                .alg.aead = {
4120                        .base = {
4121                                .cra_name = "authenc(hmac(sha1),cbc(aes))",
4122                                .cra_driver_name =
4123                                        "authenc-hmac-sha1-cbc-aes-chcr",
4124                                .cra_blocksize   = AES_BLOCK_SIZE,
4125                                .cra_priority = CHCR_AEAD_PRIORITY,
4126                                .cra_ctxsize =  sizeof(struct chcr_context) +
4127                                                sizeof(struct chcr_aead_ctx) +
4128                                                sizeof(struct chcr_authenc_ctx),
4129
4130                        },
4131                        .ivsize = AES_BLOCK_SIZE,
4132                        .maxauthsize = SHA1_DIGEST_SIZE,
4133                        .setkey = chcr_authenc_setkey,
4134                        .setauthsize = chcr_authenc_setauthsize,
4135                }
4136        },
4137        {
4138                .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4139                .is_registered = 0,
4140                .alg.aead = {
4141                        .base = {
4142
4143                                .cra_name = "authenc(hmac(sha256),cbc(aes))",
4144                                .cra_driver_name =
4145                                        "authenc-hmac-sha256-cbc-aes-chcr",
4146                                .cra_blocksize   = AES_BLOCK_SIZE,
4147                                .cra_priority = CHCR_AEAD_PRIORITY,
4148                                .cra_ctxsize =  sizeof(struct chcr_context) +
4149                                                sizeof(struct chcr_aead_ctx) +
4150                                                sizeof(struct chcr_authenc_ctx),
4151
4152                        },
4153                        .ivsize = AES_BLOCK_SIZE,
4154                        .maxauthsize    = SHA256_DIGEST_SIZE,
4155                        .setkey = chcr_authenc_setkey,
4156                        .setauthsize = chcr_authenc_setauthsize,
4157                }
4158        },
4159        {
4160                .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4161                .is_registered = 0,
4162                .alg.aead = {
4163                        .base = {
4164                                .cra_name = "authenc(hmac(sha224),cbc(aes))",
4165                                .cra_driver_name =
4166                                        "authenc-hmac-sha224-cbc-aes-chcr",
4167                                .cra_blocksize   = AES_BLOCK_SIZE,
4168                                .cra_priority = CHCR_AEAD_PRIORITY,
4169                                .cra_ctxsize =  sizeof(struct chcr_context) +
4170                                                sizeof(struct chcr_aead_ctx) +
4171                                                sizeof(struct chcr_authenc_ctx),
4172                        },
4173                        .ivsize = AES_BLOCK_SIZE,
4174                        .maxauthsize = SHA224_DIGEST_SIZE,
4175                        .setkey = chcr_authenc_setkey,
4176                        .setauthsize = chcr_authenc_setauthsize,
4177                }
4178        },
4179        {
4180                .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4181                .is_registered = 0,
4182                .alg.aead = {
4183                        .base = {
4184                                .cra_name = "authenc(hmac(sha384),cbc(aes))",
4185                                .cra_driver_name =
4186                                        "authenc-hmac-sha384-cbc-aes-chcr",
4187                                .cra_blocksize   = AES_BLOCK_SIZE,
4188                                .cra_priority = CHCR_AEAD_PRIORITY,
4189                                .cra_ctxsize =  sizeof(struct chcr_context) +
4190                                                sizeof(struct chcr_aead_ctx) +
4191                                                sizeof(struct chcr_authenc_ctx),
4192
4193                        },
4194                        .ivsize = AES_BLOCK_SIZE,
4195                        .maxauthsize = SHA384_DIGEST_SIZE,
4196                        .setkey = chcr_authenc_setkey,
4197                        .setauthsize = chcr_authenc_setauthsize,
4198                }
4199        },
4200        {
4201                .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4202                .is_registered = 0,
4203                .alg.aead = {
4204                        .base = {
4205                                .cra_name = "authenc(hmac(sha512),cbc(aes))",
4206                                .cra_driver_name =
4207                                        "authenc-hmac-sha512-cbc-aes-chcr",
4208                                .cra_blocksize   = AES_BLOCK_SIZE,
4209                                .cra_priority = CHCR_AEAD_PRIORITY,
4210                                .cra_ctxsize =  sizeof(struct chcr_context) +
4211                                                sizeof(struct chcr_aead_ctx) +
4212                                                sizeof(struct chcr_authenc_ctx),
4213
4214                        },
4215                        .ivsize = AES_BLOCK_SIZE,
4216                        .maxauthsize = SHA512_DIGEST_SIZE,
4217                        .setkey = chcr_authenc_setkey,
4218                        .setauthsize = chcr_authenc_setauthsize,
4219                }
4220        },
4221        {
4222                .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
4223                .is_registered = 0,
4224                .alg.aead = {
4225                        .base = {
4226                                .cra_name = "authenc(digest_null,cbc(aes))",
4227                                .cra_driver_name =
4228                                        "authenc-digest_null-cbc-aes-chcr",
4229                                .cra_blocksize   = AES_BLOCK_SIZE,
4230                                .cra_priority = CHCR_AEAD_PRIORITY,
4231                                .cra_ctxsize =  sizeof(struct chcr_context) +
4232                                                sizeof(struct chcr_aead_ctx) +
4233                                                sizeof(struct chcr_authenc_ctx),
4234
4235                        },
4236                        .ivsize  = AES_BLOCK_SIZE,
4237                        .maxauthsize = 0,
4238                        .setkey  = chcr_aead_digest_null_setkey,
4239                        .setauthsize = chcr_authenc_null_setauthsize,
4240                }
4241        },
4242        {
4243                .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4244                .is_registered = 0,
4245                .alg.aead = {
4246                        .base = {
4247                                .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4248                                .cra_driver_name =
4249                                "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4250                                .cra_blocksize   = 1,
4251                                .cra_priority = CHCR_AEAD_PRIORITY,
4252                                .cra_ctxsize =  sizeof(struct chcr_context) +
4253                                                sizeof(struct chcr_aead_ctx) +
4254                                                sizeof(struct chcr_authenc_ctx),
4255
4256                        },
4257                        .ivsize = CTR_RFC3686_IV_SIZE,
4258                        .maxauthsize = SHA1_DIGEST_SIZE,
4259                        .setkey = chcr_authenc_setkey,
4260                        .setauthsize = chcr_authenc_setauthsize,
4261                }
4262        },
4263        {
4264                .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4265                .is_registered = 0,
4266                .alg.aead = {
4267                        .base = {
4268
4269                                .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4270                                .cra_driver_name =
4271                                "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4272                                .cra_blocksize   = 1,
4273                                .cra_priority = CHCR_AEAD_PRIORITY,
4274                                .cra_ctxsize =  sizeof(struct chcr_context) +
4275                                                sizeof(struct chcr_aead_ctx) +
4276                                                sizeof(struct chcr_authenc_ctx),
4277
4278                        },
4279                        .ivsize = CTR_RFC3686_IV_SIZE,
4280                        .maxauthsize    = SHA256_DIGEST_SIZE,
4281                        .setkey = chcr_authenc_setkey,
4282                        .setauthsize = chcr_authenc_setauthsize,
4283                }
4284        },
4285        {
4286                .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4287                .is_registered = 0,
4288                .alg.aead = {
4289                        .base = {
4290                                .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4291                                .cra_driver_name =
4292                                "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4293                                .cra_blocksize   = 1,
4294                                .cra_priority = CHCR_AEAD_PRIORITY,
4295                                .cra_ctxsize =  sizeof(struct chcr_context) +
4296                                                sizeof(struct chcr_aead_ctx) +
4297                                                sizeof(struct chcr_authenc_ctx),
4298                        },
4299                        .ivsize = CTR_RFC3686_IV_SIZE,
4300                        .maxauthsize = SHA224_DIGEST_SIZE,
4301                        .setkey = chcr_authenc_setkey,
4302                        .setauthsize = chcr_authenc_setauthsize,
4303                }
4304        },
4305        {
4306                .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4307                .is_registered = 0,
4308                .alg.aead = {
4309                        .base = {
4310                                .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4311                                .cra_driver_name =
4312                                "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4313                                .cra_blocksize   = 1,
4314                                .cra_priority = CHCR_AEAD_PRIORITY,
4315                                .cra_ctxsize =  sizeof(struct chcr_context) +
4316                                                sizeof(struct chcr_aead_ctx) +
4317                                                sizeof(struct chcr_authenc_ctx),
4318
4319                        },
4320                        .ivsize = CTR_RFC3686_IV_SIZE,
4321                        .maxauthsize = SHA384_DIGEST_SIZE,
4322                        .setkey = chcr_authenc_setkey,
4323                        .setauthsize = chcr_authenc_setauthsize,
4324                }
4325        },
4326        {
4327                .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4328                .is_registered = 0,
4329                .alg.aead = {
4330                        .base = {
4331                                .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4332                                .cra_driver_name =
4333                                "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4334                                .cra_blocksize   = 1,
4335                                .cra_priority = CHCR_AEAD_PRIORITY,
4336                                .cra_ctxsize =  sizeof(struct chcr_context) +
4337                                                sizeof(struct chcr_aead_ctx) +
4338                                                sizeof(struct chcr_authenc_ctx),
4339
4340                        },
4341                        .ivsize = CTR_RFC3686_IV_SIZE,
4342                        .maxauthsize = SHA512_DIGEST_SIZE,
4343                        .setkey = chcr_authenc_setkey,
4344                        .setauthsize = chcr_authenc_setauthsize,
4345                }
4346        },
4347        {
4348                .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4349                .is_registered = 0,
4350                .alg.aead = {
4351                        .base = {
4352                                .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4353                                .cra_driver_name =
4354                                "authenc-digest_null-rfc3686-ctr-aes-chcr",
4355                                .cra_blocksize   = 1,
4356                                .cra_priority = CHCR_AEAD_PRIORITY,
4357                                .cra_ctxsize =  sizeof(struct chcr_context) +
4358                                                sizeof(struct chcr_aead_ctx) +
4359                                                sizeof(struct chcr_authenc_ctx),
4360
4361                        },
4362                        .ivsize  = CTR_RFC3686_IV_SIZE,
4363                        .maxauthsize = 0,
4364                        .setkey  = chcr_aead_digest_null_setkey,
4365                        .setauthsize = chcr_authenc_null_setauthsize,
4366                }
4367        },
4368};
4369
4370/*
4371 *      chcr_unregister_alg - Deregister crypto algorithms with
4372 *      kernel framework.
4373 */
4374static int chcr_unregister_alg(void)
4375{
4376        int i;
4377
4378        for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4379                switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4380                case CRYPTO_ALG_TYPE_SKCIPHER:
4381                        if (driver_algs[i].is_registered && refcount_read(
4382                            &driver_algs[i].alg.skcipher.base.cra_refcnt)
4383                            == 1) {
4384                                crypto_unregister_skcipher(
4385                                                &driver_algs[i].alg.skcipher);
4386                                driver_algs[i].is_registered = 0;
4387                        }
4388                        break;
4389                case CRYPTO_ALG_TYPE_AEAD:
4390                        if (driver_algs[i].is_registered && refcount_read(
4391                            &driver_algs[i].alg.aead.base.cra_refcnt) == 1) {
4392                                crypto_unregister_aead(
4393                                                &driver_algs[i].alg.aead);
4394                                driver_algs[i].is_registered = 0;
4395                        }
4396                        break;
4397                case CRYPTO_ALG_TYPE_AHASH:
4398                        if (driver_algs[i].is_registered && refcount_read(
4399                            &driver_algs[i].alg.hash.halg.base.cra_refcnt)
4400                            == 1) {
4401                                crypto_unregister_ahash(
4402                                                &driver_algs[i].alg.hash);
4403                                driver_algs[i].is_registered = 0;
4404                        }
4405                        break;
4406                }
4407        }
4408        return 0;
4409}
4410
4411#define SZ_AHASH_CTX sizeof(struct chcr_context)
4412#define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4413#define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4414
4415/*
4416 *      chcr_register_alg - Register crypto algorithms with kernel framework.
4417 */
4418static int chcr_register_alg(void)
4419{
4420        struct crypto_alg ai;
4421        struct ahash_alg *a_hash;
4422        int err = 0, i;
4423        char *name = NULL;
4424
4425        for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4426                if (driver_algs[i].is_registered)
4427                        continue;
4428                switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4429                case CRYPTO_ALG_TYPE_SKCIPHER:
4430                        driver_algs[i].alg.skcipher.base.cra_priority =
4431                                CHCR_CRA_PRIORITY;
4432                        driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE;
4433                        driver_algs[i].alg.skcipher.base.cra_flags =
4434                                CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
4435                                CRYPTO_ALG_ALLOCATES_MEMORY |
4436                                CRYPTO_ALG_NEED_FALLBACK;
4437                        driver_algs[i].alg.skcipher.base.cra_ctxsize =
4438                                sizeof(struct chcr_context) +
4439                                sizeof(struct ablk_ctx);
4440                        driver_algs[i].alg.skcipher.base.cra_alignmask = 0;
4441
4442                        err = crypto_register_skcipher(&driver_algs[i].alg.skcipher);
4443                        name = driver_algs[i].alg.skcipher.base.cra_driver_name;
4444                        break;
4445                case CRYPTO_ALG_TYPE_AEAD:
4446                        driver_algs[i].alg.aead.base.cra_flags =
4447                                CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
4448                                CRYPTO_ALG_ALLOCATES_MEMORY;
4449                        driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4450                        driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4451                        driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4452                        driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4453                        driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4454                        err = crypto_register_aead(&driver_algs[i].alg.aead);
4455                        name = driver_algs[i].alg.aead.base.cra_driver_name;
4456                        break;
4457                case CRYPTO_ALG_TYPE_AHASH:
4458                        a_hash = &driver_algs[i].alg.hash;
4459                        a_hash->update = chcr_ahash_update;
4460                        a_hash->final = chcr_ahash_final;
4461                        a_hash->finup = chcr_ahash_finup;
4462                        a_hash->digest = chcr_ahash_digest;
4463                        a_hash->export = chcr_ahash_export;
4464                        a_hash->import = chcr_ahash_import;
4465                        a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4466                        a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4467                        a_hash->halg.base.cra_module = THIS_MODULE;
4468                        a_hash->halg.base.cra_flags =
4469                                CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4470                        a_hash->halg.base.cra_alignmask = 0;
4471                        a_hash->halg.base.cra_exit = NULL;
4472
4473                        if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4474                                a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4475                                a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4476                                a_hash->init = chcr_hmac_init;
4477                                a_hash->setkey = chcr_ahash_setkey;
4478                                a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4479                        } else {
4480                                a_hash->init = chcr_sha_init;
4481                                a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4482                                a_hash->halg.base.cra_init = chcr_sha_cra_init;
4483                        }
4484                        err = crypto_register_ahash(&driver_algs[i].alg.hash);
4485                        ai = driver_algs[i].alg.hash.halg.base;
4486                        name = ai.cra_driver_name;
4487                        break;
4488                }
4489                if (err) {
4490                        pr_err("%s : Algorithm registration failed\n", name);
4491                        goto register_err;
4492                } else {
4493                        driver_algs[i].is_registered = 1;
4494                }
4495        }
4496        return 0;
4497
4498register_err:
4499        chcr_unregister_alg();
4500        return err;
4501}
4502
4503/*
4504 *      start_crypto - Register the crypto algorithms.
4505 *      This should called once when the first device comesup. After this
4506 *      kernel will start calling driver APIs for crypto operations.
4507 */
4508int start_crypto(void)
4509{
4510        return chcr_register_alg();
4511}
4512
4513/*
4514 *      stop_crypto - Deregister all the crypto algorithms with kernel.
4515 *      This should be called once when the last device goes down. After this
4516 *      kernel will not call the driver API for crypto operations.
4517 */
4518int stop_crypto(void)
4519{
4520        chcr_unregister_alg();
4521        return 0;
4522}
4523