linux/drivers/crypto/caam/caamalg_qi2.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
   2/*
   3 * Copyright 2015-2016 Freescale Semiconductor Inc.
   4 * Copyright 2017-2019 NXP
   5 */
   6
   7#include "compat.h"
   8#include "regs.h"
   9#include "caamalg_qi2.h"
  10#include "dpseci_cmd.h"
  11#include "desc_constr.h"
  12#include "error.h"
  13#include "sg_sw_sec4.h"
  14#include "sg_sw_qm2.h"
  15#include "key_gen.h"
  16#include "caamalg_desc.h"
  17#include "caamhash_desc.h"
  18#include "dpseci-debugfs.h"
  19#include <linux/fsl/mc.h>
  20#include <soc/fsl/dpaa2-io.h>
  21#include <soc/fsl/dpaa2-fd.h>
  22
  23#define CAAM_CRA_PRIORITY       2000
  24
  25/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
  26#define CAAM_MAX_KEY_SIZE       (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
  27                                 SHA512_DIGEST_SIZE * 2)
  28
  29/*
  30 * This is a a cache of buffers, from which the users of CAAM QI driver
  31 * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
  32 * NOTE: A more elegant solution would be to have some headroom in the frames
  33 *       being processed. This can be added by the dpaa2-eth driver. This would
  34 *       pose a problem for userspace application processing which cannot
  35 *       know of this limitation. So for now, this will work.
  36 * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
  37 */
  38static struct kmem_cache *qi_cache;
  39
  40struct caam_alg_entry {
  41        struct device *dev;
  42        int class1_alg_type;
  43        int class2_alg_type;
  44        bool rfc3686;
  45        bool geniv;
  46        bool nodkp;
  47};
  48
  49struct caam_aead_alg {
  50        struct aead_alg aead;
  51        struct caam_alg_entry caam;
  52        bool registered;
  53};
  54
  55struct caam_skcipher_alg {
  56        struct skcipher_alg skcipher;
  57        struct caam_alg_entry caam;
  58        bool registered;
  59};
  60
  61/**
  62 * caam_ctx - per-session context
  63 * @flc: Flow Contexts array
  64 * @key:  [authentication key], encryption key
  65 * @flc_dma: I/O virtual addresses of the Flow Contexts
  66 * @key_dma: I/O virtual address of the key
  67 * @dir: DMA direction for mapping key and Flow Contexts
  68 * @dev: dpseci device
  69 * @adata: authentication algorithm details
  70 * @cdata: encryption algorithm details
  71 * @authsize: authentication tag (a.k.a. ICV / MAC) size
  72 */
  73struct caam_ctx {
  74        struct caam_flc flc[NUM_OP];
  75        u8 key[CAAM_MAX_KEY_SIZE];
  76        dma_addr_t flc_dma[NUM_OP];
  77        dma_addr_t key_dma;
  78        enum dma_data_direction dir;
  79        struct device *dev;
  80        struct alginfo adata;
  81        struct alginfo cdata;
  82        unsigned int authsize;
  83};
  84
  85static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
  86                                     dma_addr_t iova_addr)
  87{
  88        phys_addr_t phys_addr;
  89
  90        phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
  91                                   iova_addr;
  92
  93        return phys_to_virt(phys_addr);
  94}
  95
  96/*
  97 * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
  98 *
  99 * Allocate data on the hotpath. Instead of using kzalloc, one can use the
 100 * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
 101 * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
 102 * hosting 16 SG entries.
 103 *
 104 * @flags - flags that would be used for the equivalent kmalloc(..) call
 105 *
 106 * Returns a pointer to a retrieved buffer on success or NULL on failure.
 107 */
 108static inline void *qi_cache_zalloc(gfp_t flags)
 109{
 110        return kmem_cache_zalloc(qi_cache, flags);
 111}
 112
 113/*
 114 * qi_cache_free - Frees buffers allocated from CAAM-QI cache
 115 *
 116 * @obj - buffer previously allocated by qi_cache_zalloc
 117 *
 118 * No checking is being done, the call is a passthrough call to
 119 * kmem_cache_free(...)
 120 */
 121static inline void qi_cache_free(void *obj)
 122{
 123        kmem_cache_free(qi_cache, obj);
 124}
 125
 126static struct caam_request *to_caam_req(struct crypto_async_request *areq)
 127{
 128        switch (crypto_tfm_alg_type(areq->tfm)) {
 129        case CRYPTO_ALG_TYPE_SKCIPHER:
 130                return skcipher_request_ctx(skcipher_request_cast(areq));
 131        case CRYPTO_ALG_TYPE_AEAD:
 132                return aead_request_ctx(container_of(areq, struct aead_request,
 133                                                     base));
 134        case CRYPTO_ALG_TYPE_AHASH:
 135                return ahash_request_ctx(ahash_request_cast(areq));
 136        default:
 137                return ERR_PTR(-EINVAL);
 138        }
 139}
 140
 141static void caam_unmap(struct device *dev, struct scatterlist *src,
 142                       struct scatterlist *dst, int src_nents,
 143                       int dst_nents, dma_addr_t iv_dma, int ivsize,
 144                       enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
 145                       int qm_sg_bytes)
 146{
 147        if (dst != src) {
 148                if (src_nents)
 149                        dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
 150                if (dst_nents)
 151                        dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
 152        } else {
 153                dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
 154        }
 155
 156        if (iv_dma)
 157                dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
 158
 159        if (qm_sg_bytes)
 160                dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
 161}
 162
 163static int aead_set_sh_desc(struct crypto_aead *aead)
 164{
 165        struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
 166                                                 typeof(*alg), aead);
 167        struct caam_ctx *ctx = crypto_aead_ctx(aead);
 168        unsigned int ivsize = crypto_aead_ivsize(aead);
 169        struct device *dev = ctx->dev;
 170        struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
 171        struct caam_flc *flc;
 172        u32 *desc;
 173        u32 ctx1_iv_off = 0;
 174        u32 *nonce = NULL;
 175        unsigned int data_len[2];
 176        u32 inl_mask;
 177        const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
 178                               OP_ALG_AAI_CTR_MOD128);
 179        const bool is_rfc3686 = alg->caam.rfc3686;
 180
 181        if (!ctx->cdata.keylen || !ctx->authsize)
 182                return 0;
 183
 184        /*
 185         * AES-CTR needs to load IV in CONTEXT1 reg
 186         * at an offset of 128bits (16bytes)
 187         * CONTEXT1[255:128] = IV
 188         */
 189        if (ctr_mode)
 190                ctx1_iv_off = 16;
 191
 192        /*
 193         * RFC3686 specific:
 194         *      CONTEXT1[255:128] = {NONCE, IV, COUNTER}
 195         */
 196        if (is_rfc3686) {
 197                ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
 198                nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
 199                                ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
 200        }
 201
 202        /*
 203         * In case |user key| > |derived key|, using DKP<imm,imm> would result
 204         * in invalid opcodes (last bytes of user key) in the resulting
 205         * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
 206         * addresses are needed.
 207         */
 208        ctx->adata.key_virt = ctx->key;
 209        ctx->adata.key_dma = ctx->key_dma;
 210
 211        ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
 212        ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
 213
 214        data_len[0] = ctx->adata.keylen_pad;
 215        data_len[1] = ctx->cdata.keylen;
 216
 217        /* aead_encrypt shared descriptor */
 218        if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
 219                                                 DESC_QI_AEAD_ENC_LEN) +
 220                              (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
 221                              DESC_JOB_IO_LEN, data_len, &inl_mask,
 222                              ARRAY_SIZE(data_len)) < 0)
 223                return -EINVAL;
 224
 225        ctx->adata.key_inline = !!(inl_mask & 1);
 226        ctx->cdata.key_inline = !!(inl_mask & 2);
 227
 228        flc = &ctx->flc[ENCRYPT];
 229        desc = flc->sh_desc;
 230
 231        if (alg->caam.geniv)
 232                cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
 233                                          ivsize, ctx->authsize, is_rfc3686,
 234                                          nonce, ctx1_iv_off, true,
 235                                          priv->sec_attr.era);
 236        else
 237                cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
 238                                       ivsize, ctx->authsize, is_rfc3686, nonce,
 239                                       ctx1_iv_off, true, priv->sec_attr.era);
 240
 241        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
 242        dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
 243                                   sizeof(flc->flc) + desc_bytes(desc),
 244                                   ctx->dir);
 245
 246        /* aead_decrypt shared descriptor */
 247        if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
 248                              (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
 249                              DESC_JOB_IO_LEN, data_len, &inl_mask,
 250                              ARRAY_SIZE(data_len)) < 0)
 251                return -EINVAL;
 252
 253        ctx->adata.key_inline = !!(inl_mask & 1);
 254        ctx->cdata.key_inline = !!(inl_mask & 2);
 255
 256        flc = &ctx->flc[DECRYPT];
 257        desc = flc->sh_desc;
 258        cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
 259                               ivsize, ctx->authsize, alg->caam.geniv,
 260                               is_rfc3686, nonce, ctx1_iv_off, true,
 261                               priv->sec_attr.era);
 262        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
 263        dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
 264                                   sizeof(flc->flc) + desc_bytes(desc),
 265                                   ctx->dir);
 266
 267        return 0;
 268}
 269
 270static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
 271{
 272        struct caam_ctx *ctx = crypto_aead_ctx(authenc);
 273
 274        ctx->authsize = authsize;
 275        aead_set_sh_desc(authenc);
 276
 277        return 0;
 278}
 279
 280static int aead_setkey(struct crypto_aead *aead, const u8 *key,
 281                       unsigned int keylen)
 282{
 283        struct caam_ctx *ctx = crypto_aead_ctx(aead);
 284        struct device *dev = ctx->dev;
 285        struct crypto_authenc_keys keys;
 286
 287        if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
 288                goto badkey;
 289
 290        dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n",
 291                keys.authkeylen + keys.enckeylen, keys.enckeylen,
 292                keys.authkeylen);
 293        print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
 294                             DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
 295
 296        ctx->adata.keylen = keys.authkeylen;
 297        ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
 298                                              OP_ALG_ALGSEL_MASK);
 299
 300        if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
 301                goto badkey;
 302
 303        memcpy(ctx->key, keys.authkey, keys.authkeylen);
 304        memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
 305        dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
 306                                   keys.enckeylen, ctx->dir);
 307        print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
 308                             DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
 309                             ctx->adata.keylen_pad + keys.enckeylen, 1);
 310
 311        ctx->cdata.keylen = keys.enckeylen;
 312
 313        memzero_explicit(&keys, sizeof(keys));
 314        return aead_set_sh_desc(aead);
 315badkey:
 316        crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
 317        memzero_explicit(&keys, sizeof(keys));
 318        return -EINVAL;
 319}
 320
 321static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
 322                            unsigned int keylen)
 323{
 324        struct crypto_authenc_keys keys;
 325        int err;
 326
 327        err = crypto_authenc_extractkeys(&keys, key, keylen);
 328        if (unlikely(err))
 329                goto badkey;
 330
 331        err = -EINVAL;
 332        if (keys.enckeylen != DES3_EDE_KEY_SIZE)
 333                goto badkey;
 334
 335        err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?:
 336              aead_setkey(aead, key, keylen);
 337
 338out:
 339        memzero_explicit(&keys, sizeof(keys));
 340        return err;
 341
 342badkey:
 343        crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
 344        goto out;
 345}
 346
 347static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
 348                                           bool encrypt)
 349{
 350        struct crypto_aead *aead = crypto_aead_reqtfm(req);
 351        struct caam_request *req_ctx = aead_request_ctx(req);
 352        struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
 353        struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
 354        struct caam_ctx *ctx = crypto_aead_ctx(aead);
 355        struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
 356                                                 typeof(*alg), aead);
 357        struct device *dev = ctx->dev;
 358        gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
 359                      GFP_KERNEL : GFP_ATOMIC;
 360        int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
 361        int src_len, dst_len = 0;
 362        struct aead_edesc *edesc;
 363        dma_addr_t qm_sg_dma, iv_dma = 0;
 364        int ivsize = 0;
 365        unsigned int authsize = ctx->authsize;
 366        int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
 367        int in_len, out_len;
 368        struct dpaa2_sg_entry *sg_table;
 369
 370        /* allocate space for base edesc, link tables and IV */
 371        edesc = qi_cache_zalloc(GFP_DMA | flags);
 372        if (unlikely(!edesc)) {
 373                dev_err(dev, "could not allocate extended descriptor\n");
 374                return ERR_PTR(-ENOMEM);
 375        }
 376
 377        if (unlikely(req->dst != req->src)) {
 378                src_len = req->assoclen + req->cryptlen;
 379                dst_len = src_len + (encrypt ? authsize : (-authsize));
 380
 381                src_nents = sg_nents_for_len(req->src, src_len);
 382                if (unlikely(src_nents < 0)) {
 383                        dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
 384                                src_len);
 385                        qi_cache_free(edesc);
 386                        return ERR_PTR(src_nents);
 387                }
 388
 389                dst_nents = sg_nents_for_len(req->dst, dst_len);
 390                if (unlikely(dst_nents < 0)) {
 391                        dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
 392                                dst_len);
 393                        qi_cache_free(edesc);
 394                        return ERR_PTR(dst_nents);
 395                }
 396
 397                if (src_nents) {
 398                        mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
 399                                                      DMA_TO_DEVICE);
 400                        if (unlikely(!mapped_src_nents)) {
 401                                dev_err(dev, "unable to map source\n");
 402                                qi_cache_free(edesc);
 403                                return ERR_PTR(-ENOMEM);
 404                        }
 405                } else {
 406                        mapped_src_nents = 0;
 407                }
 408
 409                if (dst_nents) {
 410                        mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
 411                                                      DMA_FROM_DEVICE);
 412                        if (unlikely(!mapped_dst_nents)) {
 413                                dev_err(dev, "unable to map destination\n");
 414                                dma_unmap_sg(dev, req->src, src_nents,
 415                                             DMA_TO_DEVICE);
 416                                qi_cache_free(edesc);
 417                                return ERR_PTR(-ENOMEM);
 418                        }
 419                } else {
 420                        mapped_dst_nents = 0;
 421                }
 422        } else {
 423                src_len = req->assoclen + req->cryptlen +
 424                          (encrypt ? authsize : 0);
 425
 426                src_nents = sg_nents_for_len(req->src, src_len);
 427                if (unlikely(src_nents < 0)) {
 428                        dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
 429                                src_len);
 430                        qi_cache_free(edesc);
 431                        return ERR_PTR(src_nents);
 432                }
 433
 434                mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
 435                                              DMA_BIDIRECTIONAL);
 436                if (unlikely(!mapped_src_nents)) {
 437                        dev_err(dev, "unable to map source\n");
 438                        qi_cache_free(edesc);
 439                        return ERR_PTR(-ENOMEM);
 440                }
 441        }
 442
 443        if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
 444                ivsize = crypto_aead_ivsize(aead);
 445
 446        /*
 447         * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
 448         * Input is not contiguous.
 449         * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
 450         * the end of the table by allocating more S/G entries. Logic:
 451         * if (src != dst && output S/G)
 452         *      pad output S/G, if needed
 453         * else if (src == dst && S/G)
 454         *      overlapping S/Gs; pad one of them
 455         * else if (input S/G) ...
 456         *      pad input S/G, if needed
 457         */
 458        qm_sg_nents = 1 + !!ivsize + mapped_src_nents;
 459        if (mapped_dst_nents > 1)
 460                qm_sg_nents += pad_sg_nents(mapped_dst_nents);
 461        else if ((req->src == req->dst) && (mapped_src_nents > 1))
 462                qm_sg_nents = max(pad_sg_nents(qm_sg_nents),
 463                                  1 + !!ivsize +
 464                                  pad_sg_nents(mapped_src_nents));
 465        else
 466                qm_sg_nents = pad_sg_nents(qm_sg_nents);
 467
 468        sg_table = &edesc->sgt[0];
 469        qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
 470        if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
 471                     CAAM_QI_MEMCACHE_SIZE)) {
 472                dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
 473                        qm_sg_nents, ivsize);
 474                caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
 475                           0, DMA_NONE, 0, 0);
 476                qi_cache_free(edesc);
 477                return ERR_PTR(-ENOMEM);
 478        }
 479
 480        if (ivsize) {
 481                u8 *iv = (u8 *)(sg_table + qm_sg_nents);
 482
 483                /* Make sure IV is located in a DMAable area */
 484                memcpy(iv, req->iv, ivsize);
 485
 486                iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
 487                if (dma_mapping_error(dev, iv_dma)) {
 488                        dev_err(dev, "unable to map IV\n");
 489                        caam_unmap(dev, req->src, req->dst, src_nents,
 490                                   dst_nents, 0, 0, DMA_NONE, 0, 0);
 491                        qi_cache_free(edesc);
 492                        return ERR_PTR(-ENOMEM);
 493                }
 494        }
 495
 496        edesc->src_nents = src_nents;
 497        edesc->dst_nents = dst_nents;
 498        edesc->iv_dma = iv_dma;
 499
 500        if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
 501            OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
 502                /*
 503                 * The associated data comes already with the IV but we need
 504                 * to skip it when we authenticate or encrypt...
 505                 */
 506                edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
 507        else
 508                edesc->assoclen = cpu_to_caam32(req->assoclen);
 509        edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
 510                                             DMA_TO_DEVICE);
 511        if (dma_mapping_error(dev, edesc->assoclen_dma)) {
 512                dev_err(dev, "unable to map assoclen\n");
 513                caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
 514                           iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
 515                qi_cache_free(edesc);
 516                return ERR_PTR(-ENOMEM);
 517        }
 518
 519        dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
 520        qm_sg_index++;
 521        if (ivsize) {
 522                dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
 523                qm_sg_index++;
 524        }
 525        sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
 526        qm_sg_index += mapped_src_nents;
 527
 528        if (mapped_dst_nents > 1)
 529                sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
 530
 531        qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
 532        if (dma_mapping_error(dev, qm_sg_dma)) {
 533                dev_err(dev, "unable to map S/G table\n");
 534                dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
 535                caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
 536                           iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
 537                qi_cache_free(edesc);
 538                return ERR_PTR(-ENOMEM);
 539        }
 540
 541        edesc->qm_sg_dma = qm_sg_dma;
 542        edesc->qm_sg_bytes = qm_sg_bytes;
 543
 544        out_len = req->assoclen + req->cryptlen +
 545                  (encrypt ? ctx->authsize : (-ctx->authsize));
 546        in_len = 4 + ivsize + req->assoclen + req->cryptlen;
 547
 548        memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
 549        dpaa2_fl_set_final(in_fle, true);
 550        dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
 551        dpaa2_fl_set_addr(in_fle, qm_sg_dma);
 552        dpaa2_fl_set_len(in_fle, in_len);
 553
 554        if (req->dst == req->src) {
 555                if (mapped_src_nents == 1) {
 556                        dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
 557                        dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
 558                } else {
 559                        dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
 560                        dpaa2_fl_set_addr(out_fle, qm_sg_dma +
 561                                          (1 + !!ivsize) * sizeof(*sg_table));
 562                }
 563        } else if (!mapped_dst_nents) {
 564                /*
 565                 * crypto engine requires the output entry to be present when
 566                 * "frame list" FD is used.
 567                 * Since engine does not support FMT=2'b11 (unused entry type),
 568                 * leaving out_fle zeroized is the best option.
 569                 */
 570                goto skip_out_fle;
 571        } else if (mapped_dst_nents == 1) {
 572                dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
 573                dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
 574        } else {
 575                dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
 576                dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
 577                                  sizeof(*sg_table));
 578        }
 579
 580        dpaa2_fl_set_len(out_fle, out_len);
 581
 582skip_out_fle:
 583        return edesc;
 584}
 585
 586static int chachapoly_set_sh_desc(struct crypto_aead *aead)
 587{
 588        struct caam_ctx *ctx = crypto_aead_ctx(aead);
 589        unsigned int ivsize = crypto_aead_ivsize(aead);
 590        struct device *dev = ctx->dev;
 591        struct caam_flc *flc;
 592        u32 *desc;
 593
 594        if (!ctx->cdata.keylen || !ctx->authsize)
 595                return 0;
 596
 597        flc = &ctx->flc[ENCRYPT];
 598        desc = flc->sh_desc;
 599        cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
 600                               ctx->authsize, true, true);
 601        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
 602        dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
 603                                   sizeof(flc->flc) + desc_bytes(desc),
 604                                   ctx->dir);
 605
 606        flc = &ctx->flc[DECRYPT];
 607        desc = flc->sh_desc;
 608        cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
 609                               ctx->authsize, false, true);
 610        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
 611        dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
 612                                   sizeof(flc->flc) + desc_bytes(desc),
 613                                   ctx->dir);
 614
 615        return 0;
 616}
 617
 618static int chachapoly_setauthsize(struct crypto_aead *aead,
 619                                  unsigned int authsize)
 620{
 621        struct caam_ctx *ctx = crypto_aead_ctx(aead);
 622
 623        if (authsize != POLY1305_DIGEST_SIZE)
 624                return -EINVAL;
 625
 626        ctx->authsize = authsize;
 627        return chachapoly_set_sh_desc(aead);
 628}
 629
 630static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
 631                             unsigned int keylen)
 632{
 633        struct caam_ctx *ctx = crypto_aead_ctx(aead);
 634        unsigned int ivsize = crypto_aead_ivsize(aead);
 635        unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
 636
 637        if (keylen != CHACHA_KEY_SIZE + saltlen) {
 638                crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
 639                return -EINVAL;
 640        }
 641
 642        ctx->cdata.key_virt = key;
 643        ctx->cdata.keylen = keylen - saltlen;
 644
 645        return chachapoly_set_sh_desc(aead);
 646}
 647
 648static int gcm_set_sh_desc(struct crypto_aead *aead)
 649{
 650        struct caam_ctx *ctx = crypto_aead_ctx(aead);
 651        struct device *dev = ctx->dev;
 652        unsigned int ivsize = crypto_aead_ivsize(aead);
 653        struct caam_flc *flc;
 654        u32 *desc;
 655        int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
 656                        ctx->cdata.keylen;
 657
 658        if (!ctx->cdata.keylen || !ctx->authsize)
 659                return 0;
 660
 661        /*
 662         * AES GCM encrypt shared descriptor
 663         * Job Descriptor and Shared Descriptor
 664         * must fit into the 64-word Descriptor h/w Buffer
 665         */
 666        if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
 667                ctx->cdata.key_inline = true;
 668                ctx->cdata.key_virt = ctx->key;
 669        } else {
 670                ctx->cdata.key_inline = false;
 671                ctx->cdata.key_dma = ctx->key_dma;
 672        }
 673
 674        flc = &ctx->flc[ENCRYPT];
 675        desc = flc->sh_desc;
 676        cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
 677        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
 678        dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
 679                                   sizeof(flc->flc) + desc_bytes(desc),
 680                                   ctx->dir);
 681
 682        /*
 683         * Job Descriptor and Shared Descriptors
 684         * must all fit into the 64-word Descriptor h/w Buffer
 685         */
 686        if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
 687                ctx->cdata.key_inline = true;
 688                ctx->cdata.key_virt = ctx->key;
 689        } else {
 690                ctx->cdata.key_inline = false;
 691                ctx->cdata.key_dma = ctx->key_dma;
 692        }
 693
 694        flc = &ctx->flc[DECRYPT];
 695        desc = flc->sh_desc;
 696        cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
 697        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
 698        dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
 699                                   sizeof(flc->flc) + desc_bytes(desc),
 700                                   ctx->dir);
 701
 702        return 0;
 703}
 704
 705static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
 706{
 707        struct caam_ctx *ctx = crypto_aead_ctx(authenc);
 708        int err;
 709
 710        err = crypto_gcm_check_authsize(authsize);
 711        if (err)
 712                return err;
 713
 714        ctx->authsize = authsize;
 715        gcm_set_sh_desc(authenc);
 716
 717        return 0;
 718}
 719
 720static int gcm_setkey(struct crypto_aead *aead,
 721                      const u8 *key, unsigned int keylen)
 722{
 723        struct caam_ctx *ctx = crypto_aead_ctx(aead);
 724        struct device *dev = ctx->dev;
 725        int ret;
 726
 727        ret = aes_check_keylen(keylen);
 728        if (ret) {
 729                crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
 730                return ret;
 731        }
 732        print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
 733                             DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
 734
 735        memcpy(ctx->key, key, keylen);
 736        dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
 737        ctx->cdata.keylen = keylen;
 738
 739        return gcm_set_sh_desc(aead);
 740}
 741
 742static int rfc4106_set_sh_desc(struct crypto_aead *aead)
 743{
 744        struct caam_ctx *ctx = crypto_aead_ctx(aead);
 745        struct device *dev = ctx->dev;
 746        unsigned int ivsize = crypto_aead_ivsize(aead);
 747        struct caam_flc *flc;
 748        u32 *desc;
 749        int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
 750                        ctx->cdata.keylen;
 751
 752        if (!ctx->cdata.keylen || !ctx->authsize)
 753                return 0;
 754
 755        ctx->cdata.key_virt = ctx->key;
 756
 757        /*
 758         * RFC4106 encrypt shared descriptor
 759         * Job Descriptor and Shared Descriptor
 760         * must fit into the 64-word Descriptor h/w Buffer
 761         */
 762        if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
 763                ctx->cdata.key_inline = true;
 764        } else {
 765                ctx->cdata.key_inline = false;
 766                ctx->cdata.key_dma = ctx->key_dma;
 767        }
 768
 769        flc = &ctx->flc[ENCRYPT];
 770        desc = flc->sh_desc;
 771        cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
 772                                  true);
 773        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
 774        dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
 775                                   sizeof(flc->flc) + desc_bytes(desc),
 776                                   ctx->dir);
 777
 778        /*
 779         * Job Descriptor and Shared Descriptors
 780         * must all fit into the 64-word Descriptor h/w Buffer
 781         */
 782        if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
 783                ctx->cdata.key_inline = true;
 784        } else {
 785                ctx->cdata.key_inline = false;
 786                ctx->cdata.key_dma = ctx->key_dma;
 787        }
 788
 789        flc = &ctx->flc[DECRYPT];
 790        desc = flc->sh_desc;
 791        cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
 792                                  true);
 793        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
 794        dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
 795                                   sizeof(flc->flc) + desc_bytes(desc),
 796                                   ctx->dir);
 797
 798        return 0;
 799}
 800
 801static int rfc4106_setauthsize(struct crypto_aead *authenc,
 802                               unsigned int authsize)
 803{
 804        struct caam_ctx *ctx = crypto_aead_ctx(authenc);
 805        int err;
 806
 807        err = crypto_rfc4106_check_authsize(authsize);
 808        if (err)
 809                return err;
 810
 811        ctx->authsize = authsize;
 812        rfc4106_set_sh_desc(authenc);
 813
 814        return 0;
 815}
 816
 817static int rfc4106_setkey(struct crypto_aead *aead,
 818                          const u8 *key, unsigned int keylen)
 819{
 820        struct caam_ctx *ctx = crypto_aead_ctx(aead);
 821        struct device *dev = ctx->dev;
 822        int ret;
 823
 824        ret = aes_check_keylen(keylen - 4);
 825        if (ret) {
 826                crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
 827                return ret;
 828        }
 829
 830        print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
 831                             DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
 832
 833        memcpy(ctx->key, key, keylen);
 834        /*
 835         * The last four bytes of the key material are used as the salt value
 836         * in the nonce. Update the AES key length.
 837         */
 838        ctx->cdata.keylen = keylen - 4;
 839        dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
 840                                   ctx->dir);
 841
 842        return rfc4106_set_sh_desc(aead);
 843}
 844
 845static int rfc4543_set_sh_desc(struct crypto_aead *aead)
 846{
 847        struct caam_ctx *ctx = crypto_aead_ctx(aead);
 848        struct device *dev = ctx->dev;
 849        unsigned int ivsize = crypto_aead_ivsize(aead);
 850        struct caam_flc *flc;
 851        u32 *desc;
 852        int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
 853                        ctx->cdata.keylen;
 854
 855        if (!ctx->cdata.keylen || !ctx->authsize)
 856                return 0;
 857
 858        ctx->cdata.key_virt = ctx->key;
 859
 860        /*
 861         * RFC4543 encrypt shared descriptor
 862         * Job Descriptor and Shared Descriptor
 863         * must fit into the 64-word Descriptor h/w Buffer
 864         */
 865        if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
 866                ctx->cdata.key_inline = true;
 867        } else {
 868                ctx->cdata.key_inline = false;
 869                ctx->cdata.key_dma = ctx->key_dma;
 870        }
 871
 872        flc = &ctx->flc[ENCRYPT];
 873        desc = flc->sh_desc;
 874        cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
 875                                  true);
 876        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
 877        dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
 878                                   sizeof(flc->flc) + desc_bytes(desc),
 879                                   ctx->dir);
 880
 881        /*
 882         * Job Descriptor and Shared Descriptors
 883         * must all fit into the 64-word Descriptor h/w Buffer
 884         */
 885        if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
 886                ctx->cdata.key_inline = true;
 887        } else {
 888                ctx->cdata.key_inline = false;
 889                ctx->cdata.key_dma = ctx->key_dma;
 890        }
 891
 892        flc = &ctx->flc[DECRYPT];
 893        desc = flc->sh_desc;
 894        cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
 895                                  true);
 896        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
 897        dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
 898                                   sizeof(flc->flc) + desc_bytes(desc),
 899                                   ctx->dir);
 900
 901        return 0;
 902}
 903
 904static int rfc4543_setauthsize(struct crypto_aead *authenc,
 905                               unsigned int authsize)
 906{
 907        struct caam_ctx *ctx = crypto_aead_ctx(authenc);
 908
 909        if (authsize != 16)
 910                return -EINVAL;
 911
 912        ctx->authsize = authsize;
 913        rfc4543_set_sh_desc(authenc);
 914
 915        return 0;
 916}
 917
 918static int rfc4543_setkey(struct crypto_aead *aead,
 919                          const u8 *key, unsigned int keylen)
 920{
 921        struct caam_ctx *ctx = crypto_aead_ctx(aead);
 922        struct device *dev = ctx->dev;
 923        int ret;
 924
 925        ret = aes_check_keylen(keylen - 4);
 926        if (ret) {
 927                crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
 928                return ret;
 929        }
 930
 931        print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
 932                             DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
 933
 934        memcpy(ctx->key, key, keylen);
 935        /*
 936         * The last four bytes of the key material are used as the salt value
 937         * in the nonce. Update the AES key length.
 938         */
 939        ctx->cdata.keylen = keylen - 4;
 940        dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
 941                                   ctx->dir);
 942
 943        return rfc4543_set_sh_desc(aead);
 944}
 945
 946static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
 947                           unsigned int keylen, const u32 ctx1_iv_off)
 948{
 949        struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
 950        struct caam_skcipher_alg *alg =
 951                container_of(crypto_skcipher_alg(skcipher),
 952                             struct caam_skcipher_alg, skcipher);
 953        struct device *dev = ctx->dev;
 954        struct caam_flc *flc;
 955        unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
 956        u32 *desc;
 957        const bool is_rfc3686 = alg->caam.rfc3686;
 958
 959        print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
 960                             DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
 961
 962        ctx->cdata.keylen = keylen;
 963        ctx->cdata.key_virt = key;
 964        ctx->cdata.key_inline = true;
 965
 966        /* skcipher_encrypt shared descriptor */
 967        flc = &ctx->flc[ENCRYPT];
 968        desc = flc->sh_desc;
 969        cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
 970                                   ctx1_iv_off);
 971        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
 972        dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
 973                                   sizeof(flc->flc) + desc_bytes(desc),
 974                                   ctx->dir);
 975
 976        /* skcipher_decrypt shared descriptor */
 977        flc = &ctx->flc[DECRYPT];
 978        desc = flc->sh_desc;
 979        cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
 980                                   ctx1_iv_off);
 981        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
 982        dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
 983                                   sizeof(flc->flc) + desc_bytes(desc),
 984                                   ctx->dir);
 985
 986        return 0;
 987}
 988
 989static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
 990                               const u8 *key, unsigned int keylen)
 991{
 992        int err;
 993
 994        err = aes_check_keylen(keylen);
 995        if (err) {
 996                crypto_skcipher_set_flags(skcipher,
 997                                          CRYPTO_TFM_RES_BAD_KEY_LEN);
 998                return err;
 999        }
1000
1001        return skcipher_setkey(skcipher, key, keylen, 0);
1002}
1003
1004static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
1005                                   const u8 *key, unsigned int keylen)
1006{
1007        u32 ctx1_iv_off;
1008        int err;
1009
1010        /*
1011         * RFC3686 specific:
1012         *      | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1013         *      | *key = {KEY, NONCE}
1014         */
1015        ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1016        keylen -= CTR_RFC3686_NONCE_SIZE;
1017
1018        err = aes_check_keylen(keylen);
1019        if (err) {
1020                crypto_skcipher_set_flags(skcipher,
1021                                          CRYPTO_TFM_RES_BAD_KEY_LEN);
1022                return err;
1023        }
1024
1025        return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1026}
1027
1028static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
1029                               const u8 *key, unsigned int keylen)
1030{
1031        u32 ctx1_iv_off;
1032        int err;
1033
1034        /*
1035         * AES-CTR needs to load IV in CONTEXT1 reg
1036         * at an offset of 128bits (16bytes)
1037         * CONTEXT1[255:128] = IV
1038         */
1039        ctx1_iv_off = 16;
1040
1041        err = aes_check_keylen(keylen);
1042        if (err) {
1043                crypto_skcipher_set_flags(skcipher,
1044                                          CRYPTO_TFM_RES_BAD_KEY_LEN);
1045                return err;
1046        }
1047
1048        return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1049}
1050
1051static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher,
1052                                    const u8 *key, unsigned int keylen)
1053{
1054        if (keylen != CHACHA_KEY_SIZE) {
1055                crypto_skcipher_set_flags(skcipher,
1056                                          CRYPTO_TFM_RES_BAD_KEY_LEN);
1057                return -EINVAL;
1058        }
1059
1060        return skcipher_setkey(skcipher, key, keylen, 0);
1061}
1062
1063static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
1064                               const u8 *key, unsigned int keylen)
1065{
1066        return verify_skcipher_des_key(skcipher, key) ?:
1067               skcipher_setkey(skcipher, key, keylen, 0);
1068}
1069
1070static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
1071                                const u8 *key, unsigned int keylen)
1072{
1073        return verify_skcipher_des3_key(skcipher, key) ?:
1074               skcipher_setkey(skcipher, key, keylen, 0);
1075}
1076
1077static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
1078                               unsigned int keylen)
1079{
1080        struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1081        struct device *dev = ctx->dev;
1082        struct caam_flc *flc;
1083        u32 *desc;
1084
1085        if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
1086                dev_err(dev, "key size mismatch\n");
1087                crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1088                return -EINVAL;
1089        }
1090
1091        ctx->cdata.keylen = keylen;
1092        ctx->cdata.key_virt = key;
1093        ctx->cdata.key_inline = true;
1094
1095        /* xts_skcipher_encrypt shared descriptor */
1096        flc = &ctx->flc[ENCRYPT];
1097        desc = flc->sh_desc;
1098        cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
1099        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1100        dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
1101                                   sizeof(flc->flc) + desc_bytes(desc),
1102                                   ctx->dir);
1103
1104        /* xts_skcipher_decrypt shared descriptor */
1105        flc = &ctx->flc[DECRYPT];
1106        desc = flc->sh_desc;
1107        cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
1108        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1109        dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
1110                                   sizeof(flc->flc) + desc_bytes(desc),
1111                                   ctx->dir);
1112
1113        return 0;
1114}
1115
1116static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
1117{
1118        struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1119        struct caam_request *req_ctx = skcipher_request_ctx(req);
1120        struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
1121        struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
1122        struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1123        struct device *dev = ctx->dev;
1124        gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1125                       GFP_KERNEL : GFP_ATOMIC;
1126        int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1127        struct skcipher_edesc *edesc;
1128        dma_addr_t iv_dma;
1129        u8 *iv;
1130        int ivsize = crypto_skcipher_ivsize(skcipher);
1131        int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1132        struct dpaa2_sg_entry *sg_table;
1133
1134        src_nents = sg_nents_for_len(req->src, req->cryptlen);
1135        if (unlikely(src_nents < 0)) {
1136                dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
1137                        req->cryptlen);
1138                return ERR_PTR(src_nents);
1139        }
1140
1141        if (unlikely(req->dst != req->src)) {
1142                dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1143                if (unlikely(dst_nents < 0)) {
1144                        dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
1145                                req->cryptlen);
1146                        return ERR_PTR(dst_nents);
1147                }
1148
1149                mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1150                                              DMA_TO_DEVICE);
1151                if (unlikely(!mapped_src_nents)) {
1152                        dev_err(dev, "unable to map source\n");
1153                        return ERR_PTR(-ENOMEM);
1154                }
1155
1156                mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
1157                                              DMA_FROM_DEVICE);
1158                if (unlikely(!mapped_dst_nents)) {
1159                        dev_err(dev, "unable to map destination\n");
1160                        dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
1161                        return ERR_PTR(-ENOMEM);
1162                }
1163        } else {
1164                mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1165                                              DMA_BIDIRECTIONAL);
1166                if (unlikely(!mapped_src_nents)) {
1167                        dev_err(dev, "unable to map source\n");
1168                        return ERR_PTR(-ENOMEM);
1169                }
1170        }
1171
1172        qm_sg_ents = 1 + mapped_src_nents;
1173        dst_sg_idx = qm_sg_ents;
1174
1175        /*
1176         * Input, output HW S/G tables: [IV, src][dst, IV]
1177         * IV entries point to the same buffer
1178         * If src == dst, S/G entries are reused (S/G tables overlap)
1179         *
1180         * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1181         * the end of the table by allocating more S/G entries.
1182         */
1183        if (req->src != req->dst)
1184                qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
1185        else
1186                qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
1187
1188        qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
1189        if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
1190                     ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1191                dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
1192                        qm_sg_ents, ivsize);
1193                caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1194                           0, DMA_NONE, 0, 0);
1195                return ERR_PTR(-ENOMEM);
1196        }
1197
1198        /* allocate space for base edesc, link tables and IV */
1199        edesc = qi_cache_zalloc(GFP_DMA | flags);
1200        if (unlikely(!edesc)) {
1201                dev_err(dev, "could not allocate extended descriptor\n");
1202                caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1203                           0, DMA_NONE, 0, 0);
1204                return ERR_PTR(-ENOMEM);
1205        }
1206
1207        /* Make sure IV is located in a DMAable area */
1208        sg_table = &edesc->sgt[0];
1209        iv = (u8 *)(sg_table + qm_sg_ents);
1210        memcpy(iv, req->iv, ivsize);
1211
1212        iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL);
1213        if (dma_mapping_error(dev, iv_dma)) {
1214                dev_err(dev, "unable to map IV\n");
1215                caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1216                           0, DMA_NONE, 0, 0);
1217                qi_cache_free(edesc);
1218                return ERR_PTR(-ENOMEM);
1219        }
1220
1221        edesc->src_nents = src_nents;
1222        edesc->dst_nents = dst_nents;
1223        edesc->iv_dma = iv_dma;
1224        edesc->qm_sg_bytes = qm_sg_bytes;
1225
1226        dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1227        sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
1228
1229        if (req->src != req->dst)
1230                sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
1231
1232        dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
1233                         ivsize, 0);
1234
1235        edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
1236                                          DMA_TO_DEVICE);
1237        if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
1238                dev_err(dev, "unable to map S/G table\n");
1239                caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
1240                           iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
1241                qi_cache_free(edesc);
1242                return ERR_PTR(-ENOMEM);
1243        }
1244
1245        memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
1246        dpaa2_fl_set_final(in_fle, true);
1247        dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
1248        dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize);
1249
1250        dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
1251        dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
1252
1253        dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1254
1255        if (req->src == req->dst)
1256                dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
1257                                  sizeof(*sg_table));
1258        else
1259                dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
1260                                  sizeof(*sg_table));
1261
1262        return edesc;
1263}
1264
1265static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
1266                       struct aead_request *req)
1267{
1268        struct crypto_aead *aead = crypto_aead_reqtfm(req);
1269        int ivsize = crypto_aead_ivsize(aead);
1270
1271        caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1272                   edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
1273                   edesc->qm_sg_bytes);
1274        dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1275}
1276
1277static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1278                           struct skcipher_request *req)
1279{
1280        struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1281        int ivsize = crypto_skcipher_ivsize(skcipher);
1282
1283        caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1284                   edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
1285                   edesc->qm_sg_bytes);
1286}
1287
1288static void aead_encrypt_done(void *cbk_ctx, u32 status)
1289{
1290        struct crypto_async_request *areq = cbk_ctx;
1291        struct aead_request *req = container_of(areq, struct aead_request,
1292                                                base);
1293        struct caam_request *req_ctx = to_caam_req(areq);
1294        struct aead_edesc *edesc = req_ctx->edesc;
1295        struct crypto_aead *aead = crypto_aead_reqtfm(req);
1296        struct caam_ctx *ctx = crypto_aead_ctx(aead);
1297        int ecode = 0;
1298
1299        dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1300
1301        if (unlikely(status))
1302                ecode = caam_qi2_strstatus(ctx->dev, status);
1303
1304        aead_unmap(ctx->dev, edesc, req);
1305        qi_cache_free(edesc);
1306        aead_request_complete(req, ecode);
1307}
1308
1309static void aead_decrypt_done(void *cbk_ctx, u32 status)
1310{
1311        struct crypto_async_request *areq = cbk_ctx;
1312        struct aead_request *req = container_of(areq, struct aead_request,
1313                                                base);
1314        struct caam_request *req_ctx = to_caam_req(areq);
1315        struct aead_edesc *edesc = req_ctx->edesc;
1316        struct crypto_aead *aead = crypto_aead_reqtfm(req);
1317        struct caam_ctx *ctx = crypto_aead_ctx(aead);
1318        int ecode = 0;
1319
1320        dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1321
1322        if (unlikely(status))
1323                ecode = caam_qi2_strstatus(ctx->dev, status);
1324
1325        aead_unmap(ctx->dev, edesc, req);
1326        qi_cache_free(edesc);
1327        aead_request_complete(req, ecode);
1328}
1329
1330static int aead_encrypt(struct aead_request *req)
1331{
1332        struct aead_edesc *edesc;
1333        struct crypto_aead *aead = crypto_aead_reqtfm(req);
1334        struct caam_ctx *ctx = crypto_aead_ctx(aead);
1335        struct caam_request *caam_req = aead_request_ctx(req);
1336        int ret;
1337
1338        /* allocate extended descriptor */
1339        edesc = aead_edesc_alloc(req, true);
1340        if (IS_ERR(edesc))
1341                return PTR_ERR(edesc);
1342
1343        caam_req->flc = &ctx->flc[ENCRYPT];
1344        caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1345        caam_req->cbk = aead_encrypt_done;
1346        caam_req->ctx = &req->base;
1347        caam_req->edesc = edesc;
1348        ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1349        if (ret != -EINPROGRESS &&
1350            !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1351                aead_unmap(ctx->dev, edesc, req);
1352                qi_cache_free(edesc);
1353        }
1354
1355        return ret;
1356}
1357
1358static int aead_decrypt(struct aead_request *req)
1359{
1360        struct aead_edesc *edesc;
1361        struct crypto_aead *aead = crypto_aead_reqtfm(req);
1362        struct caam_ctx *ctx = crypto_aead_ctx(aead);
1363        struct caam_request *caam_req = aead_request_ctx(req);
1364        int ret;
1365
1366        /* allocate extended descriptor */
1367        edesc = aead_edesc_alloc(req, false);
1368        if (IS_ERR(edesc))
1369                return PTR_ERR(edesc);
1370
1371        caam_req->flc = &ctx->flc[DECRYPT];
1372        caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1373        caam_req->cbk = aead_decrypt_done;
1374        caam_req->ctx = &req->base;
1375        caam_req->edesc = edesc;
1376        ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1377        if (ret != -EINPROGRESS &&
1378            !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1379                aead_unmap(ctx->dev, edesc, req);
1380                qi_cache_free(edesc);
1381        }
1382
1383        return ret;
1384}
1385
1386static int ipsec_gcm_encrypt(struct aead_request *req)
1387{
1388        return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_encrypt(req);
1389}
1390
1391static int ipsec_gcm_decrypt(struct aead_request *req)
1392{
1393        return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_decrypt(req);
1394}
1395
1396static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
1397{
1398        struct crypto_async_request *areq = cbk_ctx;
1399        struct skcipher_request *req = skcipher_request_cast(areq);
1400        struct caam_request *req_ctx = to_caam_req(areq);
1401        struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1402        struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1403        struct skcipher_edesc *edesc = req_ctx->edesc;
1404        int ecode = 0;
1405        int ivsize = crypto_skcipher_ivsize(skcipher);
1406
1407        dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1408
1409        if (unlikely(status))
1410                ecode = caam_qi2_strstatus(ctx->dev, status);
1411
1412        print_hex_dump_debug("dstiv  @" __stringify(__LINE__)": ",
1413                             DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1414                             edesc->src_nents > 1 ? 100 : ivsize, 1);
1415        caam_dump_sg("dst    @" __stringify(__LINE__)": ",
1416                     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1417                     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1418
1419        skcipher_unmap(ctx->dev, edesc, req);
1420
1421        /*
1422         * The crypto API expects us to set the IV (req->iv) to the last
1423         * ciphertext block (CBC mode) or last counter (CTR mode).
1424         * This is used e.g. by the CTS mode.
1425         */
1426        if (!ecode)
1427                memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1428                       ivsize);
1429
1430        qi_cache_free(edesc);
1431        skcipher_request_complete(req, ecode);
1432}
1433
1434static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
1435{
1436        struct crypto_async_request *areq = cbk_ctx;
1437        struct skcipher_request *req = skcipher_request_cast(areq);
1438        struct caam_request *req_ctx = to_caam_req(areq);
1439        struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1440        struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1441        struct skcipher_edesc *edesc = req_ctx->edesc;
1442        int ecode = 0;
1443        int ivsize = crypto_skcipher_ivsize(skcipher);
1444
1445        dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1446
1447        if (unlikely(status))
1448                ecode = caam_qi2_strstatus(ctx->dev, status);
1449
1450        print_hex_dump_debug("dstiv  @" __stringify(__LINE__)": ",
1451                             DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1452                             edesc->src_nents > 1 ? 100 : ivsize, 1);
1453        caam_dump_sg("dst    @" __stringify(__LINE__)": ",
1454                     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1455                     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1456
1457        skcipher_unmap(ctx->dev, edesc, req);
1458
1459        /*
1460         * The crypto API expects us to set the IV (req->iv) to the last
1461         * ciphertext block (CBC mode) or last counter (CTR mode).
1462         * This is used e.g. by the CTS mode.
1463         */
1464        if (!ecode)
1465                memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1466                       ivsize);
1467
1468        qi_cache_free(edesc);
1469        skcipher_request_complete(req, ecode);
1470}
1471
1472static int skcipher_encrypt(struct skcipher_request *req)
1473{
1474        struct skcipher_edesc *edesc;
1475        struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1476        struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1477        struct caam_request *caam_req = skcipher_request_ctx(req);
1478        int ret;
1479
1480        if (!req->cryptlen)
1481                return 0;
1482
1483        /* allocate extended descriptor */
1484        edesc = skcipher_edesc_alloc(req);
1485        if (IS_ERR(edesc))
1486                return PTR_ERR(edesc);
1487
1488        caam_req->flc = &ctx->flc[ENCRYPT];
1489        caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1490        caam_req->cbk = skcipher_encrypt_done;
1491        caam_req->ctx = &req->base;
1492        caam_req->edesc = edesc;
1493        ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1494        if (ret != -EINPROGRESS &&
1495            !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1496                skcipher_unmap(ctx->dev, edesc, req);
1497                qi_cache_free(edesc);
1498        }
1499
1500        return ret;
1501}
1502
1503static int skcipher_decrypt(struct skcipher_request *req)
1504{
1505        struct skcipher_edesc *edesc;
1506        struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1507        struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1508        struct caam_request *caam_req = skcipher_request_ctx(req);
1509        int ret;
1510
1511        if (!req->cryptlen)
1512                return 0;
1513        /* allocate extended descriptor */
1514        edesc = skcipher_edesc_alloc(req);
1515        if (IS_ERR(edesc))
1516                return PTR_ERR(edesc);
1517
1518        caam_req->flc = &ctx->flc[DECRYPT];
1519        caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1520        caam_req->cbk = skcipher_decrypt_done;
1521        caam_req->ctx = &req->base;
1522        caam_req->edesc = edesc;
1523        ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1524        if (ret != -EINPROGRESS &&
1525            !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1526                skcipher_unmap(ctx->dev, edesc, req);
1527                qi_cache_free(edesc);
1528        }
1529
1530        return ret;
1531}
1532
1533static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
1534                         bool uses_dkp)
1535{
1536        dma_addr_t dma_addr;
1537        int i;
1538
1539        /* copy descriptor header template value */
1540        ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
1541        ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
1542
1543        ctx->dev = caam->dev;
1544        ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1545
1546        dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
1547                                        offsetof(struct caam_ctx, flc_dma),
1548                                        ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1549        if (dma_mapping_error(ctx->dev, dma_addr)) {
1550                dev_err(ctx->dev, "unable to map key, shared descriptors\n");
1551                return -ENOMEM;
1552        }
1553
1554        for (i = 0; i < NUM_OP; i++)
1555                ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
1556        ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
1557
1558        return 0;
1559}
1560
1561static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
1562{
1563        struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1564        struct caam_skcipher_alg *caam_alg =
1565                container_of(alg, typeof(*caam_alg), skcipher);
1566
1567        crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
1568        return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
1569}
1570
1571static int caam_cra_init_aead(struct crypto_aead *tfm)
1572{
1573        struct aead_alg *alg = crypto_aead_alg(tfm);
1574        struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
1575                                                      aead);
1576
1577        crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
1578        return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
1579                             !caam_alg->caam.nodkp);
1580}
1581
1582static void caam_exit_common(struct caam_ctx *ctx)
1583{
1584        dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
1585                               offsetof(struct caam_ctx, flc_dma), ctx->dir,
1586                               DMA_ATTR_SKIP_CPU_SYNC);
1587}
1588
1589static void caam_cra_exit(struct crypto_skcipher *tfm)
1590{
1591        caam_exit_common(crypto_skcipher_ctx(tfm));
1592}
1593
1594static void caam_cra_exit_aead(struct crypto_aead *tfm)
1595{
1596        caam_exit_common(crypto_aead_ctx(tfm));
1597}
1598
1599static struct caam_skcipher_alg driver_algs[] = {
1600        {
1601                .skcipher = {
1602                        .base = {
1603                                .cra_name = "cbc(aes)",
1604                                .cra_driver_name = "cbc-aes-caam-qi2",
1605                                .cra_blocksize = AES_BLOCK_SIZE,
1606                        },
1607                        .setkey = aes_skcipher_setkey,
1608                        .encrypt = skcipher_encrypt,
1609                        .decrypt = skcipher_decrypt,
1610                        .min_keysize = AES_MIN_KEY_SIZE,
1611                        .max_keysize = AES_MAX_KEY_SIZE,
1612                        .ivsize = AES_BLOCK_SIZE,
1613                },
1614                .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1615        },
1616        {
1617                .skcipher = {
1618                        .base = {
1619                                .cra_name = "cbc(des3_ede)",
1620                                .cra_driver_name = "cbc-3des-caam-qi2",
1621                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1622                        },
1623                        .setkey = des3_skcipher_setkey,
1624                        .encrypt = skcipher_encrypt,
1625                        .decrypt = skcipher_decrypt,
1626                        .min_keysize = DES3_EDE_KEY_SIZE,
1627                        .max_keysize = DES3_EDE_KEY_SIZE,
1628                        .ivsize = DES3_EDE_BLOCK_SIZE,
1629                },
1630                .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1631        },
1632        {
1633                .skcipher = {
1634                        .base = {
1635                                .cra_name = "cbc(des)",
1636                                .cra_driver_name = "cbc-des-caam-qi2",
1637                                .cra_blocksize = DES_BLOCK_SIZE,
1638                        },
1639                        .setkey = des_skcipher_setkey,
1640                        .encrypt = skcipher_encrypt,
1641                        .decrypt = skcipher_decrypt,
1642                        .min_keysize = DES_KEY_SIZE,
1643                        .max_keysize = DES_KEY_SIZE,
1644                        .ivsize = DES_BLOCK_SIZE,
1645                },
1646                .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1647        },
1648        {
1649                .skcipher = {
1650                        .base = {
1651                                .cra_name = "ctr(aes)",
1652                                .cra_driver_name = "ctr-aes-caam-qi2",
1653                                .cra_blocksize = 1,
1654                        },
1655                        .setkey = ctr_skcipher_setkey,
1656                        .encrypt = skcipher_encrypt,
1657                        .decrypt = skcipher_decrypt,
1658                        .min_keysize = AES_MIN_KEY_SIZE,
1659                        .max_keysize = AES_MAX_KEY_SIZE,
1660                        .ivsize = AES_BLOCK_SIZE,
1661                        .chunksize = AES_BLOCK_SIZE,
1662                },
1663                .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1664                                        OP_ALG_AAI_CTR_MOD128,
1665        },
1666        {
1667                .skcipher = {
1668                        .base = {
1669                                .cra_name = "rfc3686(ctr(aes))",
1670                                .cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
1671                                .cra_blocksize = 1,
1672                        },
1673                        .setkey = rfc3686_skcipher_setkey,
1674                        .encrypt = skcipher_encrypt,
1675                        .decrypt = skcipher_decrypt,
1676                        .min_keysize = AES_MIN_KEY_SIZE +
1677                                       CTR_RFC3686_NONCE_SIZE,
1678                        .max_keysize = AES_MAX_KEY_SIZE +
1679                                       CTR_RFC3686_NONCE_SIZE,
1680                        .ivsize = CTR_RFC3686_IV_SIZE,
1681                        .chunksize = AES_BLOCK_SIZE,
1682                },
1683                .caam = {
1684                        .class1_alg_type = OP_ALG_ALGSEL_AES |
1685                                           OP_ALG_AAI_CTR_MOD128,
1686                        .rfc3686 = true,
1687                },
1688        },
1689        {
1690                .skcipher = {
1691                        .base = {
1692                                .cra_name = "xts(aes)",
1693                                .cra_driver_name = "xts-aes-caam-qi2",
1694                                .cra_blocksize = AES_BLOCK_SIZE,
1695                        },
1696                        .setkey = xts_skcipher_setkey,
1697                        .encrypt = skcipher_encrypt,
1698                        .decrypt = skcipher_decrypt,
1699                        .min_keysize = 2 * AES_MIN_KEY_SIZE,
1700                        .max_keysize = 2 * AES_MAX_KEY_SIZE,
1701                        .ivsize = AES_BLOCK_SIZE,
1702                },
1703                .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1704        },
1705        {
1706                .skcipher = {
1707                        .base = {
1708                                .cra_name = "chacha20",
1709                                .cra_driver_name = "chacha20-caam-qi2",
1710                                .cra_blocksize = 1,
1711                        },
1712                        .setkey = chacha20_skcipher_setkey,
1713                        .encrypt = skcipher_encrypt,
1714                        .decrypt = skcipher_decrypt,
1715                        .min_keysize = CHACHA_KEY_SIZE,
1716                        .max_keysize = CHACHA_KEY_SIZE,
1717                        .ivsize = CHACHA_IV_SIZE,
1718                },
1719                .caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
1720        },
1721};
1722
1723static struct caam_aead_alg driver_aeads[] = {
1724        {
1725                .aead = {
1726                        .base = {
1727                                .cra_name = "rfc4106(gcm(aes))",
1728                                .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
1729                                .cra_blocksize = 1,
1730                        },
1731                        .setkey = rfc4106_setkey,
1732                        .setauthsize = rfc4106_setauthsize,
1733                        .encrypt = ipsec_gcm_encrypt,
1734                        .decrypt = ipsec_gcm_decrypt,
1735                        .ivsize = 8,
1736                        .maxauthsize = AES_BLOCK_SIZE,
1737                },
1738                .caam = {
1739                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1740                        .nodkp = true,
1741                },
1742        },
1743        {
1744                .aead = {
1745                        .base = {
1746                                .cra_name = "rfc4543(gcm(aes))",
1747                                .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
1748                                .cra_blocksize = 1,
1749                        },
1750                        .setkey = rfc4543_setkey,
1751                        .setauthsize = rfc4543_setauthsize,
1752                        .encrypt = ipsec_gcm_encrypt,
1753                        .decrypt = ipsec_gcm_decrypt,
1754                        .ivsize = 8,
1755                        .maxauthsize = AES_BLOCK_SIZE,
1756                },
1757                .caam = {
1758                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1759                        .nodkp = true,
1760                },
1761        },
1762        /* Galois Counter Mode */
1763        {
1764                .aead = {
1765                        .base = {
1766                                .cra_name = "gcm(aes)",
1767                                .cra_driver_name = "gcm-aes-caam-qi2",
1768                                .cra_blocksize = 1,
1769                        },
1770                        .setkey = gcm_setkey,
1771                        .setauthsize = gcm_setauthsize,
1772                        .encrypt = aead_encrypt,
1773                        .decrypt = aead_decrypt,
1774                        .ivsize = 12,
1775                        .maxauthsize = AES_BLOCK_SIZE,
1776                },
1777                .caam = {
1778                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1779                        .nodkp = true,
1780                }
1781        },
1782        /* single-pass ipsec_esp descriptor */
1783        {
1784                .aead = {
1785                        .base = {
1786                                .cra_name = "authenc(hmac(md5),cbc(aes))",
1787                                .cra_driver_name = "authenc-hmac-md5-"
1788                                                   "cbc-aes-caam-qi2",
1789                                .cra_blocksize = AES_BLOCK_SIZE,
1790                        },
1791                        .setkey = aead_setkey,
1792                        .setauthsize = aead_setauthsize,
1793                        .encrypt = aead_encrypt,
1794                        .decrypt = aead_decrypt,
1795                        .ivsize = AES_BLOCK_SIZE,
1796                        .maxauthsize = MD5_DIGEST_SIZE,
1797                },
1798                .caam = {
1799                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1800                        .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1801                                           OP_ALG_AAI_HMAC_PRECOMP,
1802                }
1803        },
1804        {
1805                .aead = {
1806                        .base = {
1807                                .cra_name = "echainiv(authenc(hmac(md5),"
1808                                            "cbc(aes)))",
1809                                .cra_driver_name = "echainiv-authenc-hmac-md5-"
1810                                                   "cbc-aes-caam-qi2",
1811                                .cra_blocksize = AES_BLOCK_SIZE,
1812                        },
1813                        .setkey = aead_setkey,
1814                        .setauthsize = aead_setauthsize,
1815                        .encrypt = aead_encrypt,
1816                        .decrypt = aead_decrypt,
1817                        .ivsize = AES_BLOCK_SIZE,
1818                        .maxauthsize = MD5_DIGEST_SIZE,
1819                },
1820                .caam = {
1821                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1822                        .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1823                                           OP_ALG_AAI_HMAC_PRECOMP,
1824                        .geniv = true,
1825                }
1826        },
1827        {
1828                .aead = {
1829                        .base = {
1830                                .cra_name = "authenc(hmac(sha1),cbc(aes))",
1831                                .cra_driver_name = "authenc-hmac-sha1-"
1832                                                   "cbc-aes-caam-qi2",
1833                                .cra_blocksize = AES_BLOCK_SIZE,
1834                        },
1835                        .setkey = aead_setkey,
1836                        .setauthsize = aead_setauthsize,
1837                        .encrypt = aead_encrypt,
1838                        .decrypt = aead_decrypt,
1839                        .ivsize = AES_BLOCK_SIZE,
1840                        .maxauthsize = SHA1_DIGEST_SIZE,
1841                },
1842                .caam = {
1843                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1844                        .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1845                                           OP_ALG_AAI_HMAC_PRECOMP,
1846                }
1847        },
1848        {
1849                .aead = {
1850                        .base = {
1851                                .cra_name = "echainiv(authenc(hmac(sha1),"
1852                                            "cbc(aes)))",
1853                                .cra_driver_name = "echainiv-authenc-"
1854                                                   "hmac-sha1-cbc-aes-caam-qi2",
1855                                .cra_blocksize = AES_BLOCK_SIZE,
1856                        },
1857                        .setkey = aead_setkey,
1858                        .setauthsize = aead_setauthsize,
1859                        .encrypt = aead_encrypt,
1860                        .decrypt = aead_decrypt,
1861                        .ivsize = AES_BLOCK_SIZE,
1862                        .maxauthsize = SHA1_DIGEST_SIZE,
1863                },
1864                .caam = {
1865                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1866                        .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1867                                           OP_ALG_AAI_HMAC_PRECOMP,
1868                        .geniv = true,
1869                },
1870        },
1871        {
1872                .aead = {
1873                        .base = {
1874                                .cra_name = "authenc(hmac(sha224),cbc(aes))",
1875                                .cra_driver_name = "authenc-hmac-sha224-"
1876                                                   "cbc-aes-caam-qi2",
1877                                .cra_blocksize = AES_BLOCK_SIZE,
1878                        },
1879                        .setkey = aead_setkey,
1880                        .setauthsize = aead_setauthsize,
1881                        .encrypt = aead_encrypt,
1882                        .decrypt = aead_decrypt,
1883                        .ivsize = AES_BLOCK_SIZE,
1884                        .maxauthsize = SHA224_DIGEST_SIZE,
1885                },
1886                .caam = {
1887                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1888                        .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1889                                           OP_ALG_AAI_HMAC_PRECOMP,
1890                }
1891        },
1892        {
1893                .aead = {
1894                        .base = {
1895                                .cra_name = "echainiv(authenc(hmac(sha224),"
1896                                            "cbc(aes)))",
1897                                .cra_driver_name = "echainiv-authenc-"
1898                                                   "hmac-sha224-cbc-aes-caam-qi2",
1899                                .cra_blocksize = AES_BLOCK_SIZE,
1900                        },
1901                        .setkey = aead_setkey,
1902                        .setauthsize = aead_setauthsize,
1903                        .encrypt = aead_encrypt,
1904                        .decrypt = aead_decrypt,
1905                        .ivsize = AES_BLOCK_SIZE,
1906                        .maxauthsize = SHA224_DIGEST_SIZE,
1907                },
1908                .caam = {
1909                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1910                        .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1911                                           OP_ALG_AAI_HMAC_PRECOMP,
1912                        .geniv = true,
1913                }
1914        },
1915        {
1916                .aead = {
1917                        .base = {
1918                                .cra_name = "authenc(hmac(sha256),cbc(aes))",
1919                                .cra_driver_name = "authenc-hmac-sha256-"
1920                                                   "cbc-aes-caam-qi2",
1921                                .cra_blocksize = AES_BLOCK_SIZE,
1922                        },
1923                        .setkey = aead_setkey,
1924                        .setauthsize = aead_setauthsize,
1925                        .encrypt = aead_encrypt,
1926                        .decrypt = aead_decrypt,
1927                        .ivsize = AES_BLOCK_SIZE,
1928                        .maxauthsize = SHA256_DIGEST_SIZE,
1929                },
1930                .caam = {
1931                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1932                        .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1933                                           OP_ALG_AAI_HMAC_PRECOMP,
1934                }
1935        },
1936        {
1937                .aead = {
1938                        .base = {
1939                                .cra_name = "echainiv(authenc(hmac(sha256),"
1940                                            "cbc(aes)))",
1941                                .cra_driver_name = "echainiv-authenc-"
1942                                                   "hmac-sha256-cbc-aes-"
1943                                                   "caam-qi2",
1944                                .cra_blocksize = AES_BLOCK_SIZE,
1945                        },
1946                        .setkey = aead_setkey,
1947                        .setauthsize = aead_setauthsize,
1948                        .encrypt = aead_encrypt,
1949                        .decrypt = aead_decrypt,
1950                        .ivsize = AES_BLOCK_SIZE,
1951                        .maxauthsize = SHA256_DIGEST_SIZE,
1952                },
1953                .caam = {
1954                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1955                        .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1956                                           OP_ALG_AAI_HMAC_PRECOMP,
1957                        .geniv = true,
1958                }
1959        },
1960        {
1961                .aead = {
1962                        .base = {
1963                                .cra_name = "authenc(hmac(sha384),cbc(aes))",
1964                                .cra_driver_name = "authenc-hmac-sha384-"
1965                                                   "cbc-aes-caam-qi2",
1966                                .cra_blocksize = AES_BLOCK_SIZE,
1967                        },
1968                        .setkey = aead_setkey,
1969                        .setauthsize = aead_setauthsize,
1970                        .encrypt = aead_encrypt,
1971                        .decrypt = aead_decrypt,
1972                        .ivsize = AES_BLOCK_SIZE,
1973                        .maxauthsize = SHA384_DIGEST_SIZE,
1974                },
1975                .caam = {
1976                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1977                        .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1978                                           OP_ALG_AAI_HMAC_PRECOMP,
1979                }
1980        },
1981        {
1982                .aead = {
1983                        .base = {
1984                                .cra_name = "echainiv(authenc(hmac(sha384),"
1985                                            "cbc(aes)))",
1986                                .cra_driver_name = "echainiv-authenc-"
1987                                                   "hmac-sha384-cbc-aes-"
1988                                                   "caam-qi2",
1989                                .cra_blocksize = AES_BLOCK_SIZE,
1990                        },
1991                        .setkey = aead_setkey,
1992                        .setauthsize = aead_setauthsize,
1993                        .encrypt = aead_encrypt,
1994                        .decrypt = aead_decrypt,
1995                        .ivsize = AES_BLOCK_SIZE,
1996                        .maxauthsize = SHA384_DIGEST_SIZE,
1997                },
1998                .caam = {
1999                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2000                        .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2001                                           OP_ALG_AAI_HMAC_PRECOMP,
2002                        .geniv = true,
2003                }
2004        },
2005        {
2006                .aead = {
2007                        .base = {
2008                                .cra_name = "authenc(hmac(sha512),cbc(aes))",
2009                                .cra_driver_name = "authenc-hmac-sha512-"
2010                                                   "cbc-aes-caam-qi2",
2011                                .cra_blocksize = AES_BLOCK_SIZE,
2012                        },
2013                        .setkey = aead_setkey,
2014                        .setauthsize = aead_setauthsize,
2015                        .encrypt = aead_encrypt,
2016                        .decrypt = aead_decrypt,
2017                        .ivsize = AES_BLOCK_SIZE,
2018                        .maxauthsize = SHA512_DIGEST_SIZE,
2019                },
2020                .caam = {
2021                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2022                        .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2023                                           OP_ALG_AAI_HMAC_PRECOMP,
2024                }
2025        },
2026        {
2027                .aead = {
2028                        .base = {
2029                                .cra_name = "echainiv(authenc(hmac(sha512),"
2030                                            "cbc(aes)))",
2031                                .cra_driver_name = "echainiv-authenc-"
2032                                                   "hmac-sha512-cbc-aes-"
2033                                                   "caam-qi2",
2034                                .cra_blocksize = AES_BLOCK_SIZE,
2035                        },
2036                        .setkey = aead_setkey,
2037                        .setauthsize = aead_setauthsize,
2038                        .encrypt = aead_encrypt,
2039                        .decrypt = aead_decrypt,
2040                        .ivsize = AES_BLOCK_SIZE,
2041                        .maxauthsize = SHA512_DIGEST_SIZE,
2042                },
2043                .caam = {
2044                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2045                        .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2046                                           OP_ALG_AAI_HMAC_PRECOMP,
2047                        .geniv = true,
2048                }
2049        },
2050        {
2051                .aead = {
2052                        .base = {
2053                                .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2054                                .cra_driver_name = "authenc-hmac-md5-"
2055                                                   "cbc-des3_ede-caam-qi2",
2056                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2057                        },
2058                        .setkey = des3_aead_setkey,
2059                        .setauthsize = aead_setauthsize,
2060                        .encrypt = aead_encrypt,
2061                        .decrypt = aead_decrypt,
2062                        .ivsize = DES3_EDE_BLOCK_SIZE,
2063                        .maxauthsize = MD5_DIGEST_SIZE,
2064                },
2065                .caam = {
2066                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2067                        .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2068                                           OP_ALG_AAI_HMAC_PRECOMP,
2069                }
2070        },
2071        {
2072                .aead = {
2073                        .base = {
2074                                .cra_name = "echainiv(authenc(hmac(md5),"
2075                                            "cbc(des3_ede)))",
2076                                .cra_driver_name = "echainiv-authenc-hmac-md5-"
2077                                                   "cbc-des3_ede-caam-qi2",
2078                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2079                        },
2080                        .setkey = des3_aead_setkey,
2081                        .setauthsize = aead_setauthsize,
2082                        .encrypt = aead_encrypt,
2083                        .decrypt = aead_decrypt,
2084                        .ivsize = DES3_EDE_BLOCK_SIZE,
2085                        .maxauthsize = MD5_DIGEST_SIZE,
2086                },
2087                .caam = {
2088                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2089                        .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2090                                           OP_ALG_AAI_HMAC_PRECOMP,
2091                        .geniv = true,
2092                }
2093        },
2094        {
2095                .aead = {
2096                        .base = {
2097                                .cra_name = "authenc(hmac(sha1),"
2098                                            "cbc(des3_ede))",
2099                                .cra_driver_name = "authenc-hmac-sha1-"
2100                                                   "cbc-des3_ede-caam-qi2",
2101                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2102                        },
2103                        .setkey = des3_aead_setkey,
2104                        .setauthsize = aead_setauthsize,
2105                        .encrypt = aead_encrypt,
2106                        .decrypt = aead_decrypt,
2107                        .ivsize = DES3_EDE_BLOCK_SIZE,
2108                        .maxauthsize = SHA1_DIGEST_SIZE,
2109                },
2110                .caam = {
2111                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2112                        .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2113                                           OP_ALG_AAI_HMAC_PRECOMP,
2114                },
2115        },
2116        {
2117                .aead = {
2118                        .base = {
2119                                .cra_name = "echainiv(authenc(hmac(sha1),"
2120                                            "cbc(des3_ede)))",
2121                                .cra_driver_name = "echainiv-authenc-"
2122                                                   "hmac-sha1-"
2123                                                   "cbc-des3_ede-caam-qi2",
2124                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2125                        },
2126                        .setkey = des3_aead_setkey,
2127                        .setauthsize = aead_setauthsize,
2128                        .encrypt = aead_encrypt,
2129                        .decrypt = aead_decrypt,
2130                        .ivsize = DES3_EDE_BLOCK_SIZE,
2131                        .maxauthsize = SHA1_DIGEST_SIZE,
2132                },
2133                .caam = {
2134                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2135                        .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2136                                           OP_ALG_AAI_HMAC_PRECOMP,
2137                        .geniv = true,
2138                }
2139        },
2140        {
2141                .aead = {
2142                        .base = {
2143                                .cra_name = "authenc(hmac(sha224),"
2144                                            "cbc(des3_ede))",
2145                                .cra_driver_name = "authenc-hmac-sha224-"
2146                                                   "cbc-des3_ede-caam-qi2",
2147                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2148                        },
2149                        .setkey = des3_aead_setkey,
2150                        .setauthsize = aead_setauthsize,
2151                        .encrypt = aead_encrypt,
2152                        .decrypt = aead_decrypt,
2153                        .ivsize = DES3_EDE_BLOCK_SIZE,
2154                        .maxauthsize = SHA224_DIGEST_SIZE,
2155                },
2156                .caam = {
2157                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2158                        .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2159                                           OP_ALG_AAI_HMAC_PRECOMP,
2160                },
2161        },
2162        {
2163                .aead = {
2164                        .base = {
2165                                .cra_name = "echainiv(authenc(hmac(sha224),"
2166                                            "cbc(des3_ede)))",
2167                                .cra_driver_name = "echainiv-authenc-"
2168                                                   "hmac-sha224-"
2169                                                   "cbc-des3_ede-caam-qi2",
2170                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2171                        },
2172                        .setkey = des3_aead_setkey,
2173                        .setauthsize = aead_setauthsize,
2174                        .encrypt = aead_encrypt,
2175                        .decrypt = aead_decrypt,
2176                        .ivsize = DES3_EDE_BLOCK_SIZE,
2177                        .maxauthsize = SHA224_DIGEST_SIZE,
2178                },
2179                .caam = {
2180                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2181                        .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2182                                           OP_ALG_AAI_HMAC_PRECOMP,
2183                        .geniv = true,
2184                }
2185        },
2186        {
2187                .aead = {
2188                        .base = {
2189                                .cra_name = "authenc(hmac(sha256),"
2190                                            "cbc(des3_ede))",
2191                                .cra_driver_name = "authenc-hmac-sha256-"
2192                                                   "cbc-des3_ede-caam-qi2",
2193                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2194                        },
2195                        .setkey = des3_aead_setkey,
2196                        .setauthsize = aead_setauthsize,
2197                        .encrypt = aead_encrypt,
2198                        .decrypt = aead_decrypt,
2199                        .ivsize = DES3_EDE_BLOCK_SIZE,
2200                        .maxauthsize = SHA256_DIGEST_SIZE,
2201                },
2202                .caam = {
2203                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2204                        .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2205                                           OP_ALG_AAI_HMAC_PRECOMP,
2206                },
2207        },
2208        {
2209                .aead = {
2210                        .base = {
2211                                .cra_name = "echainiv(authenc(hmac(sha256),"
2212                                            "cbc(des3_ede)))",
2213                                .cra_driver_name = "echainiv-authenc-"
2214                                                   "hmac-sha256-"
2215                                                   "cbc-des3_ede-caam-qi2",
2216                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2217                        },
2218                        .setkey = des3_aead_setkey,
2219                        .setauthsize = aead_setauthsize,
2220                        .encrypt = aead_encrypt,
2221                        .decrypt = aead_decrypt,
2222                        .ivsize = DES3_EDE_BLOCK_SIZE,
2223                        .maxauthsize = SHA256_DIGEST_SIZE,
2224                },
2225                .caam = {
2226                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2227                        .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2228                                           OP_ALG_AAI_HMAC_PRECOMP,
2229                        .geniv = true,
2230                }
2231        },
2232        {
2233                .aead = {
2234                        .base = {
2235                                .cra_name = "authenc(hmac(sha384),"
2236                                            "cbc(des3_ede))",
2237                                .cra_driver_name = "authenc-hmac-sha384-"
2238                                                   "cbc-des3_ede-caam-qi2",
2239                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2240                        },
2241                        .setkey = des3_aead_setkey,
2242                        .setauthsize = aead_setauthsize,
2243                        .encrypt = aead_encrypt,
2244                        .decrypt = aead_decrypt,
2245                        .ivsize = DES3_EDE_BLOCK_SIZE,
2246                        .maxauthsize = SHA384_DIGEST_SIZE,
2247                },
2248                .caam = {
2249                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2250                        .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2251                                           OP_ALG_AAI_HMAC_PRECOMP,
2252                },
2253        },
2254        {
2255                .aead = {
2256                        .base = {
2257                                .cra_name = "echainiv(authenc(hmac(sha384),"
2258                                            "cbc(des3_ede)))",
2259                                .cra_driver_name = "echainiv-authenc-"
2260                                                   "hmac-sha384-"
2261                                                   "cbc-des3_ede-caam-qi2",
2262                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2263                        },
2264                        .setkey = des3_aead_setkey,
2265                        .setauthsize = aead_setauthsize,
2266                        .encrypt = aead_encrypt,
2267                        .decrypt = aead_decrypt,
2268                        .ivsize = DES3_EDE_BLOCK_SIZE,
2269                        .maxauthsize = SHA384_DIGEST_SIZE,
2270                },
2271                .caam = {
2272                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2273                        .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2274                                           OP_ALG_AAI_HMAC_PRECOMP,
2275                        .geniv = true,
2276                }
2277        },
2278        {
2279                .aead = {
2280                        .base = {
2281                                .cra_name = "authenc(hmac(sha512),"
2282                                            "cbc(des3_ede))",
2283                                .cra_driver_name = "authenc-hmac-sha512-"
2284                                                   "cbc-des3_ede-caam-qi2",
2285                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2286                        },
2287                        .setkey = des3_aead_setkey,
2288                        .setauthsize = aead_setauthsize,
2289                        .encrypt = aead_encrypt,
2290                        .decrypt = aead_decrypt,
2291                        .ivsize = DES3_EDE_BLOCK_SIZE,
2292                        .maxauthsize = SHA512_DIGEST_SIZE,
2293                },
2294                .caam = {
2295                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2296                        .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2297                                           OP_ALG_AAI_HMAC_PRECOMP,
2298                },
2299        },
2300        {
2301                .aead = {
2302                        .base = {
2303                                .cra_name = "echainiv(authenc(hmac(sha512),"
2304                                            "cbc(des3_ede)))",
2305                                .cra_driver_name = "echainiv-authenc-"
2306                                                   "hmac-sha512-"
2307                                                   "cbc-des3_ede-caam-qi2",
2308                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2309                        },
2310                        .setkey = des3_aead_setkey,
2311                        .setauthsize = aead_setauthsize,
2312                        .encrypt = aead_encrypt,
2313                        .decrypt = aead_decrypt,
2314                        .ivsize = DES3_EDE_BLOCK_SIZE,
2315                        .maxauthsize = SHA512_DIGEST_SIZE,
2316                },
2317                .caam = {
2318                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2319                        .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2320                                           OP_ALG_AAI_HMAC_PRECOMP,
2321                        .geniv = true,
2322                }
2323        },
2324        {
2325                .aead = {
2326                        .base = {
2327                                .cra_name = "authenc(hmac(md5),cbc(des))",
2328                                .cra_driver_name = "authenc-hmac-md5-"
2329                                                   "cbc-des-caam-qi2",
2330                                .cra_blocksize = DES_BLOCK_SIZE,
2331                        },
2332                        .setkey = aead_setkey,
2333                        .setauthsize = aead_setauthsize,
2334                        .encrypt = aead_encrypt,
2335                        .decrypt = aead_decrypt,
2336                        .ivsize = DES_BLOCK_SIZE,
2337                        .maxauthsize = MD5_DIGEST_SIZE,
2338                },
2339                .caam = {
2340                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2341                        .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2342                                           OP_ALG_AAI_HMAC_PRECOMP,
2343                },
2344        },
2345        {
2346                .aead = {
2347                        .base = {
2348                                .cra_name = "echainiv(authenc(hmac(md5),"
2349                                            "cbc(des)))",
2350                                .cra_driver_name = "echainiv-authenc-hmac-md5-"
2351                                                   "cbc-des-caam-qi2",
2352                                .cra_blocksize = DES_BLOCK_SIZE,
2353                        },
2354                        .setkey = aead_setkey,
2355                        .setauthsize = aead_setauthsize,
2356                        .encrypt = aead_encrypt,
2357                        .decrypt = aead_decrypt,
2358                        .ivsize = DES_BLOCK_SIZE,
2359                        .maxauthsize = MD5_DIGEST_SIZE,
2360                },
2361                .caam = {
2362                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2363                        .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2364                                           OP_ALG_AAI_HMAC_PRECOMP,
2365                        .geniv = true,
2366                }
2367        },
2368        {
2369                .aead = {
2370                        .base = {
2371                                .cra_name = "authenc(hmac(sha1),cbc(des))",
2372                                .cra_driver_name = "authenc-hmac-sha1-"
2373                                                   "cbc-des-caam-qi2",
2374                                .cra_blocksize = DES_BLOCK_SIZE,
2375                        },
2376                        .setkey = aead_setkey,
2377                        .setauthsize = aead_setauthsize,
2378                        .encrypt = aead_encrypt,
2379                        .decrypt = aead_decrypt,
2380                        .ivsize = DES_BLOCK_SIZE,
2381                        .maxauthsize = SHA1_DIGEST_SIZE,
2382                },
2383                .caam = {
2384                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2385                        .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2386                                           OP_ALG_AAI_HMAC_PRECOMP,
2387                },
2388        },
2389        {
2390                .aead = {
2391                        .base = {
2392                                .cra_name = "echainiv(authenc(hmac(sha1),"
2393                                            "cbc(des)))",
2394                                .cra_driver_name = "echainiv-authenc-"
2395                                                   "hmac-sha1-cbc-des-caam-qi2",
2396                                .cra_blocksize = DES_BLOCK_SIZE,
2397                        },
2398                        .setkey = aead_setkey,
2399                        .setauthsize = aead_setauthsize,
2400                        .encrypt = aead_encrypt,
2401                        .decrypt = aead_decrypt,
2402                        .ivsize = DES_BLOCK_SIZE,
2403                        .maxauthsize = SHA1_DIGEST_SIZE,
2404                },
2405                .caam = {
2406                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2407                        .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2408                                           OP_ALG_AAI_HMAC_PRECOMP,
2409                        .geniv = true,
2410                }
2411        },
2412        {
2413                .aead = {
2414                        .base = {
2415                                .cra_name = "authenc(hmac(sha224),cbc(des))",
2416                                .cra_driver_name = "authenc-hmac-sha224-"
2417                                                   "cbc-des-caam-qi2",
2418                                .cra_blocksize = DES_BLOCK_SIZE,
2419                        },
2420                        .setkey = aead_setkey,
2421                        .setauthsize = aead_setauthsize,
2422                        .encrypt = aead_encrypt,
2423                        .decrypt = aead_decrypt,
2424                        .ivsize = DES_BLOCK_SIZE,
2425                        .maxauthsize = SHA224_DIGEST_SIZE,
2426                },
2427                .caam = {
2428                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2429                        .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2430                                           OP_ALG_AAI_HMAC_PRECOMP,
2431                },
2432        },
2433        {
2434                .aead = {
2435                        .base = {
2436                                .cra_name = "echainiv(authenc(hmac(sha224),"
2437                                            "cbc(des)))",
2438                                .cra_driver_name = "echainiv-authenc-"
2439                                                   "hmac-sha224-cbc-des-"
2440                                                   "caam-qi2",
2441                                .cra_blocksize = DES_BLOCK_SIZE,
2442                        },
2443                        .setkey = aead_setkey,
2444                        .setauthsize = aead_setauthsize,
2445                        .encrypt = aead_encrypt,
2446                        .decrypt = aead_decrypt,
2447                        .ivsize = DES_BLOCK_SIZE,
2448                        .maxauthsize = SHA224_DIGEST_SIZE,
2449                },
2450                .caam = {
2451                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2452                        .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2453                                           OP_ALG_AAI_HMAC_PRECOMP,
2454                        .geniv = true,
2455                }
2456        },
2457        {
2458                .aead = {
2459                        .base = {
2460                                .cra_name = "authenc(hmac(sha256),cbc(des))",
2461                                .cra_driver_name = "authenc-hmac-sha256-"
2462                                                   "cbc-des-caam-qi2",
2463                                .cra_blocksize = DES_BLOCK_SIZE,
2464                        },
2465                        .setkey = aead_setkey,
2466                        .setauthsize = aead_setauthsize,
2467                        .encrypt = aead_encrypt,
2468                        .decrypt = aead_decrypt,
2469                        .ivsize = DES_BLOCK_SIZE,
2470                        .maxauthsize = SHA256_DIGEST_SIZE,
2471                },
2472                .caam = {
2473                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2474                        .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2475                                           OP_ALG_AAI_HMAC_PRECOMP,
2476                },
2477        },
2478        {
2479                .aead = {
2480                        .base = {
2481                                .cra_name = "echainiv(authenc(hmac(sha256),"
2482                                            "cbc(des)))",
2483                                .cra_driver_name = "echainiv-authenc-"
2484                                                   "hmac-sha256-cbc-desi-"
2485                                                   "caam-qi2",
2486                                .cra_blocksize = DES_BLOCK_SIZE,
2487                        },
2488                        .setkey = aead_setkey,
2489                        .setauthsize = aead_setauthsize,
2490                        .encrypt = aead_encrypt,
2491                        .decrypt = aead_decrypt,
2492                        .ivsize = DES_BLOCK_SIZE,
2493                        .maxauthsize = SHA256_DIGEST_SIZE,
2494                },
2495                .caam = {
2496                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2497                        .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2498                                           OP_ALG_AAI_HMAC_PRECOMP,
2499                        .geniv = true,
2500                },
2501        },
2502        {
2503                .aead = {
2504                        .base = {
2505                                .cra_name = "authenc(hmac(sha384),cbc(des))",
2506                                .cra_driver_name = "authenc-hmac-sha384-"
2507                                                   "cbc-des-caam-qi2",
2508                                .cra_blocksize = DES_BLOCK_SIZE,
2509                        },
2510                        .setkey = aead_setkey,
2511                        .setauthsize = aead_setauthsize,
2512                        .encrypt = aead_encrypt,
2513                        .decrypt = aead_decrypt,
2514                        .ivsize = DES_BLOCK_SIZE,
2515                        .maxauthsize = SHA384_DIGEST_SIZE,
2516                },
2517                .caam = {
2518                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2519                        .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2520                                           OP_ALG_AAI_HMAC_PRECOMP,
2521                },
2522        },
2523        {
2524                .aead = {
2525                        .base = {
2526                                .cra_name = "echainiv(authenc(hmac(sha384),"
2527                                            "cbc(des)))",
2528                                .cra_driver_name = "echainiv-authenc-"
2529                                                   "hmac-sha384-cbc-des-"
2530                                                   "caam-qi2",
2531                                .cra_blocksize = DES_BLOCK_SIZE,
2532                        },
2533                        .setkey = aead_setkey,
2534                        .setauthsize = aead_setauthsize,
2535                        .encrypt = aead_encrypt,
2536                        .decrypt = aead_decrypt,
2537                        .ivsize = DES_BLOCK_SIZE,
2538                        .maxauthsize = SHA384_DIGEST_SIZE,
2539                },
2540                .caam = {
2541                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2542                        .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2543                                           OP_ALG_AAI_HMAC_PRECOMP,
2544                        .geniv = true,
2545                }
2546        },
2547        {
2548                .aead = {
2549                        .base = {
2550                                .cra_name = "authenc(hmac(sha512),cbc(des))",
2551                                .cra_driver_name = "authenc-hmac-sha512-"
2552                                                   "cbc-des-caam-qi2",
2553                                .cra_blocksize = DES_BLOCK_SIZE,
2554                        },
2555                        .setkey = aead_setkey,
2556                        .setauthsize = aead_setauthsize,
2557                        .encrypt = aead_encrypt,
2558                        .decrypt = aead_decrypt,
2559                        .ivsize = DES_BLOCK_SIZE,
2560                        .maxauthsize = SHA512_DIGEST_SIZE,
2561                },
2562                .caam = {
2563                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2564                        .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2565                                           OP_ALG_AAI_HMAC_PRECOMP,
2566                }
2567        },
2568        {
2569                .aead = {
2570                        .base = {
2571                                .cra_name = "echainiv(authenc(hmac(sha512),"
2572                                            "cbc(des)))",
2573                                .cra_driver_name = "echainiv-authenc-"
2574                                                   "hmac-sha512-cbc-des-"
2575                                                   "caam-qi2",
2576                                .cra_blocksize = DES_BLOCK_SIZE,
2577                        },
2578                        .setkey = aead_setkey,
2579                        .setauthsize = aead_setauthsize,
2580                        .encrypt = aead_encrypt,
2581                        .decrypt = aead_decrypt,
2582                        .ivsize = DES_BLOCK_SIZE,
2583                        .maxauthsize = SHA512_DIGEST_SIZE,
2584                },
2585                .caam = {
2586                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2587                        .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2588                                           OP_ALG_AAI_HMAC_PRECOMP,
2589                        .geniv = true,
2590                }
2591        },
2592        {
2593                .aead = {
2594                        .base = {
2595                                .cra_name = "authenc(hmac(md5),"
2596                                            "rfc3686(ctr(aes)))",
2597                                .cra_driver_name = "authenc-hmac-md5-"
2598                                                   "rfc3686-ctr-aes-caam-qi2",
2599                                .cra_blocksize = 1,
2600                        },
2601                        .setkey = aead_setkey,
2602                        .setauthsize = aead_setauthsize,
2603                        .encrypt = aead_encrypt,
2604                        .decrypt = aead_decrypt,
2605                        .ivsize = CTR_RFC3686_IV_SIZE,
2606                        .maxauthsize = MD5_DIGEST_SIZE,
2607                },
2608                .caam = {
2609                        .class1_alg_type = OP_ALG_ALGSEL_AES |
2610                                           OP_ALG_AAI_CTR_MOD128,
2611                        .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2612                                           OP_ALG_AAI_HMAC_PRECOMP,
2613                        .rfc3686 = true,
2614                },
2615        },
2616        {
2617                .aead = {
2618                        .base = {
2619                                .cra_name = "seqiv(authenc("
2620                                            "hmac(md5),rfc3686(ctr(aes))))",
2621                                .cra_driver_name = "seqiv-authenc-hmac-md5-"
2622                                                   "rfc3686-ctr-aes-caam-qi2",
2623                                .cra_blocksize = 1,
2624                        },
2625                        .setkey = aead_setkey,
2626                        .setauthsize = aead_setauthsize,
2627                        .encrypt = aead_encrypt,
2628                        .decrypt = aead_decrypt,
2629                        .ivsize = CTR_RFC3686_IV_SIZE,
2630                        .maxauthsize = MD5_DIGEST_SIZE,
2631                },
2632                .caam = {
2633                        .class1_alg_type = OP_ALG_ALGSEL_AES |
2634                                           OP_ALG_AAI_CTR_MOD128,
2635                        .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2636                                           OP_ALG_AAI_HMAC_PRECOMP,
2637                        .rfc3686 = true,
2638                        .geniv = true,
2639                },
2640        },
2641        {
2642                .aead = {
2643                        .base = {
2644                                .cra_name = "authenc(hmac(sha1),"
2645                                            "rfc3686(ctr(aes)))",
2646                                .cra_driver_name = "authenc-hmac-sha1-"
2647                                                   "rfc3686-ctr-aes-caam-qi2",
2648                                .cra_blocksize = 1,
2649                        },
2650                        .setkey = aead_setkey,
2651                        .setauthsize = aead_setauthsize,
2652                        .encrypt = aead_encrypt,
2653                        .decrypt = aead_decrypt,
2654                        .ivsize = CTR_RFC3686_IV_SIZE,
2655                        .maxauthsize = SHA1_DIGEST_SIZE,
2656                },
2657                .caam = {
2658                        .class1_alg_type = OP_ALG_ALGSEL_AES |
2659                                           OP_ALG_AAI_CTR_MOD128,
2660                        .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2661                                           OP_ALG_AAI_HMAC_PRECOMP,
2662                        .rfc3686 = true,
2663                },
2664        },
2665        {
2666                .aead = {
2667                        .base = {
2668                                .cra_name = "seqiv(authenc("
2669                                            "hmac(sha1),rfc3686(ctr(aes))))",
2670                                .cra_driver_name = "seqiv-authenc-hmac-sha1-"
2671                                                   "rfc3686-ctr-aes-caam-qi2",
2672                                .cra_blocksize = 1,
2673                        },
2674                        .setkey = aead_setkey,
2675                        .setauthsize = aead_setauthsize,
2676                        .encrypt = aead_encrypt,
2677                        .decrypt = aead_decrypt,
2678                        .ivsize = CTR_RFC3686_IV_SIZE,
2679                        .maxauthsize = SHA1_DIGEST_SIZE,
2680                },
2681                .caam = {
2682                        .class1_alg_type = OP_ALG_ALGSEL_AES |
2683                                           OP_ALG_AAI_CTR_MOD128,
2684                        .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2685                                           OP_ALG_AAI_HMAC_PRECOMP,
2686                        .rfc3686 = true,
2687                        .geniv = true,
2688                },
2689        },
2690        {
2691                .aead = {
2692                        .base = {
2693                                .cra_name = "authenc(hmac(sha224),"
2694                                            "rfc3686(ctr(aes)))",
2695                                .cra_driver_name = "authenc-hmac-sha224-"
2696                                                   "rfc3686-ctr-aes-caam-qi2",
2697                                .cra_blocksize = 1,
2698                        },
2699                        .setkey = aead_setkey,
2700                        .setauthsize = aead_setauthsize,
2701                        .encrypt = aead_encrypt,
2702                        .decrypt = aead_decrypt,
2703                        .ivsize = CTR_RFC3686_IV_SIZE,
2704                        .maxauthsize = SHA224_DIGEST_SIZE,
2705                },
2706                .caam = {
2707                        .class1_alg_type = OP_ALG_ALGSEL_AES |
2708                                           OP_ALG_AAI_CTR_MOD128,
2709                        .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2710                                           OP_ALG_AAI_HMAC_PRECOMP,
2711                        .rfc3686 = true,
2712                },
2713        },
2714        {
2715                .aead = {
2716                        .base = {
2717                                .cra_name = "seqiv(authenc("
2718                                            "hmac(sha224),rfc3686(ctr(aes))))",
2719                                .cra_driver_name = "seqiv-authenc-hmac-sha224-"
2720                                                   "rfc3686-ctr-aes-caam-qi2",
2721                                .cra_blocksize = 1,
2722                        },
2723                        .setkey = aead_setkey,
2724                        .setauthsize = aead_setauthsize,
2725                        .encrypt = aead_encrypt,
2726                        .decrypt = aead_decrypt,
2727                        .ivsize = CTR_RFC3686_IV_SIZE,
2728                        .maxauthsize = SHA224_DIGEST_SIZE,
2729                },
2730                .caam = {
2731                        .class1_alg_type = OP_ALG_ALGSEL_AES |
2732                                           OP_ALG_AAI_CTR_MOD128,
2733                        .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2734                                           OP_ALG_AAI_HMAC_PRECOMP,
2735                        .rfc3686 = true,
2736                        .geniv = true,
2737                },
2738        },
2739        {
2740                .aead = {
2741                        .base = {
2742                                .cra_name = "authenc(hmac(sha256),"
2743                                            "rfc3686(ctr(aes)))",
2744                                .cra_driver_name = "authenc-hmac-sha256-"
2745                                                   "rfc3686-ctr-aes-caam-qi2",
2746                                .cra_blocksize = 1,
2747                        },
2748                        .setkey = aead_setkey,
2749                        .setauthsize = aead_setauthsize,
2750                        .encrypt = aead_encrypt,
2751                        .decrypt = aead_decrypt,
2752                        .ivsize = CTR_RFC3686_IV_SIZE,
2753                        .maxauthsize = SHA256_DIGEST_SIZE,
2754                },
2755                .caam = {
2756                        .class1_alg_type = OP_ALG_ALGSEL_AES |
2757                                           OP_ALG_AAI_CTR_MOD128,
2758                        .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2759                                           OP_ALG_AAI_HMAC_PRECOMP,
2760                        .rfc3686 = true,
2761                },
2762        },
2763        {
2764                .aead = {
2765                        .base = {
2766                                .cra_name = "seqiv(authenc(hmac(sha256),"
2767                                            "rfc3686(ctr(aes))))",
2768                                .cra_driver_name = "seqiv-authenc-hmac-sha256-"
2769                                                   "rfc3686-ctr-aes-caam-qi2",
2770                                .cra_blocksize = 1,
2771                        },
2772                        .setkey = aead_setkey,
2773                        .setauthsize = aead_setauthsize,
2774                        .encrypt = aead_encrypt,
2775                        .decrypt = aead_decrypt,
2776                        .ivsize = CTR_RFC3686_IV_SIZE,
2777                        .maxauthsize = SHA256_DIGEST_SIZE,
2778                },
2779                .caam = {
2780                        .class1_alg_type = OP_ALG_ALGSEL_AES |
2781                                           OP_ALG_AAI_CTR_MOD128,
2782                        .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2783                                           OP_ALG_AAI_HMAC_PRECOMP,
2784                        .rfc3686 = true,
2785                        .geniv = true,
2786                },
2787        },
2788        {
2789                .aead = {
2790                        .base = {
2791                                .cra_name = "authenc(hmac(sha384),"
2792                                            "rfc3686(ctr(aes)))",
2793                                .cra_driver_name = "authenc-hmac-sha384-"
2794                                                   "rfc3686-ctr-aes-caam-qi2",
2795                                .cra_blocksize = 1,
2796                        },
2797                        .setkey = aead_setkey,
2798                        .setauthsize = aead_setauthsize,
2799                        .encrypt = aead_encrypt,
2800                        .decrypt = aead_decrypt,
2801                        .ivsize = CTR_RFC3686_IV_SIZE,
2802                        .maxauthsize = SHA384_DIGEST_SIZE,
2803                },
2804                .caam = {
2805                        .class1_alg_type = OP_ALG_ALGSEL_AES |
2806                                           OP_ALG_AAI_CTR_MOD128,
2807                        .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2808                                           OP_ALG_AAI_HMAC_PRECOMP,
2809                        .rfc3686 = true,
2810                },
2811        },
2812        {
2813                .aead = {
2814                        .base = {
2815                                .cra_name = "seqiv(authenc(hmac(sha384),"
2816                                            "rfc3686(ctr(aes))))",
2817                                .cra_driver_name = "seqiv-authenc-hmac-sha384-"
2818                                                   "rfc3686-ctr-aes-caam-qi2",
2819                                .cra_blocksize = 1,
2820                        },
2821                        .setkey = aead_setkey,
2822                        .setauthsize = aead_setauthsize,
2823                        .encrypt = aead_encrypt,
2824                        .decrypt = aead_decrypt,
2825                        .ivsize = CTR_RFC3686_IV_SIZE,
2826                        .maxauthsize = SHA384_DIGEST_SIZE,
2827                },
2828                .caam = {
2829                        .class1_alg_type = OP_ALG_ALGSEL_AES |
2830                                           OP_ALG_AAI_CTR_MOD128,
2831                        .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2832                                           OP_ALG_AAI_HMAC_PRECOMP,
2833                        .rfc3686 = true,
2834                        .geniv = true,
2835                },
2836        },
2837        {
2838                .aead = {
2839                        .base = {
2840                                .cra_name = "rfc7539(chacha20,poly1305)",
2841                                .cra_driver_name = "rfc7539-chacha20-poly1305-"
2842                                                   "caam-qi2",
2843                                .cra_blocksize = 1,
2844                        },
2845                        .setkey = chachapoly_setkey,
2846                        .setauthsize = chachapoly_setauthsize,
2847                        .encrypt = aead_encrypt,
2848                        .decrypt = aead_decrypt,
2849                        .ivsize = CHACHAPOLY_IV_SIZE,
2850                        .maxauthsize = POLY1305_DIGEST_SIZE,
2851                },
2852                .caam = {
2853                        .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2854                                           OP_ALG_AAI_AEAD,
2855                        .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2856                                           OP_ALG_AAI_AEAD,
2857                        .nodkp = true,
2858                },
2859        },
2860        {
2861                .aead = {
2862                        .base = {
2863                                .cra_name = "rfc7539esp(chacha20,poly1305)",
2864                                .cra_driver_name = "rfc7539esp-chacha20-"
2865                                                   "poly1305-caam-qi2",
2866                                .cra_blocksize = 1,
2867                        },
2868                        .setkey = chachapoly_setkey,
2869                        .setauthsize = chachapoly_setauthsize,
2870                        .encrypt = aead_encrypt,
2871                        .decrypt = aead_decrypt,
2872                        .ivsize = 8,
2873                        .maxauthsize = POLY1305_DIGEST_SIZE,
2874                },
2875                .caam = {
2876                        .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2877                                           OP_ALG_AAI_AEAD,
2878                        .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2879                                           OP_ALG_AAI_AEAD,
2880                        .nodkp = true,
2881                },
2882        },
2883        {
2884                .aead = {
2885                        .base = {
2886                                .cra_name = "authenc(hmac(sha512),"
2887                                            "rfc3686(ctr(aes)))",
2888                                .cra_driver_name = "authenc-hmac-sha512-"
2889                                                   "rfc3686-ctr-aes-caam-qi2",
2890                                .cra_blocksize = 1,
2891                        },
2892                        .setkey = aead_setkey,
2893                        .setauthsize = aead_setauthsize,
2894                        .encrypt = aead_encrypt,
2895                        .decrypt = aead_decrypt,
2896                        .ivsize = CTR_RFC3686_IV_SIZE,
2897                        .maxauthsize = SHA512_DIGEST_SIZE,
2898                },
2899                .caam = {
2900                        .class1_alg_type = OP_ALG_ALGSEL_AES |
2901                                           OP_ALG_AAI_CTR_MOD128,
2902                        .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2903                                           OP_ALG_AAI_HMAC_PRECOMP,
2904                        .rfc3686 = true,
2905                },
2906        },
2907        {
2908                .aead = {
2909                        .base = {
2910                                .cra_name = "seqiv(authenc(hmac(sha512),"
2911                                            "rfc3686(ctr(aes))))",
2912                                .cra_driver_name = "seqiv-authenc-hmac-sha512-"
2913                                                   "rfc3686-ctr-aes-caam-qi2",
2914                                .cra_blocksize = 1,
2915                        },
2916                        .setkey = aead_setkey,
2917                        .setauthsize = aead_setauthsize,
2918                        .encrypt = aead_encrypt,
2919                        .decrypt = aead_decrypt,
2920                        .ivsize = CTR_RFC3686_IV_SIZE,
2921                        .maxauthsize = SHA512_DIGEST_SIZE,
2922                },
2923                .caam = {
2924                        .class1_alg_type = OP_ALG_ALGSEL_AES |
2925                                           OP_ALG_AAI_CTR_MOD128,
2926                        .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2927                                           OP_ALG_AAI_HMAC_PRECOMP,
2928                        .rfc3686 = true,
2929                        .geniv = true,
2930                },
2931        },
2932};
2933
2934static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
2935{
2936        struct skcipher_alg *alg = &t_alg->skcipher;
2937
2938        alg->base.cra_module = THIS_MODULE;
2939        alg->base.cra_priority = CAAM_CRA_PRIORITY;
2940        alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2941        alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2942
2943        alg->init = caam_cra_init_skcipher;
2944        alg->exit = caam_cra_exit;
2945}
2946
2947static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
2948{
2949        struct aead_alg *alg = &t_alg->aead;
2950
2951        alg->base.cra_module = THIS_MODULE;
2952        alg->base.cra_priority = CAAM_CRA_PRIORITY;
2953        alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2954        alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2955
2956        alg->init = caam_cra_init_aead;
2957        alg->exit = caam_cra_exit_aead;
2958}
2959
2960/* max hash key is max split key size */
2961#define CAAM_MAX_HASH_KEY_SIZE          (SHA512_DIGEST_SIZE * 2)
2962
2963#define CAAM_MAX_HASH_BLOCK_SIZE        SHA512_BLOCK_SIZE
2964
2965/* caam context sizes for hashes: running digest + 8 */
2966#define HASH_MSG_LEN                    8
2967#define MAX_CTX_LEN                     (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
2968
2969enum hash_optype {
2970        UPDATE = 0,
2971        UPDATE_FIRST,
2972        FINALIZE,
2973        DIGEST,
2974        HASH_NUM_OP
2975};
2976
2977/**
2978 * caam_hash_ctx - ahash per-session context
2979 * @flc: Flow Contexts array
2980 * @key: authentication key
2981 * @flc_dma: I/O virtual addresses of the Flow Contexts
2982 * @dev: dpseci device
2983 * @ctx_len: size of Context Register
2984 * @adata: hashing algorithm details
2985 */
2986struct caam_hash_ctx {
2987        struct caam_flc flc[HASH_NUM_OP];
2988        u8 key[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
2989        dma_addr_t flc_dma[HASH_NUM_OP];
2990        struct device *dev;
2991        int ctx_len;
2992        struct alginfo adata;
2993};
2994
2995/* ahash state */
2996struct caam_hash_state {
2997        struct caam_request caam_req;
2998        dma_addr_t buf_dma;
2999        dma_addr_t ctx_dma;
3000        int ctx_dma_len;
3001        u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
3002        int buflen_0;
3003        u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
3004        int buflen_1;
3005        u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
3006        int (*update)(struct ahash_request *req);
3007        int (*final)(struct ahash_request *req);
3008        int (*finup)(struct ahash_request *req);
3009        int current_buf;
3010};
3011
3012struct caam_export_state {
3013        u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
3014        u8 caam_ctx[MAX_CTX_LEN];
3015        int buflen;
3016        int (*update)(struct ahash_request *req);
3017        int (*final)(struct ahash_request *req);
3018        int (*finup)(struct ahash_request *req);
3019};
3020
3021static inline void switch_buf(struct caam_hash_state *state)
3022{
3023        state->current_buf ^= 1;
3024}
3025
3026static inline u8 *current_buf(struct caam_hash_state *state)
3027{
3028        return state->current_buf ? state->buf_1 : state->buf_0;
3029}
3030
3031static inline u8 *alt_buf(struct caam_hash_state *state)
3032{
3033        return state->current_buf ? state->buf_0 : state->buf_1;
3034}
3035
3036static inline int *current_buflen(struct caam_hash_state *state)
3037{
3038        return state->current_buf ? &state->buflen_1 : &state->buflen_0;
3039}
3040
3041static inline int *alt_buflen(struct caam_hash_state *state)
3042{
3043        return state->current_buf ? &state->buflen_0 : &state->buflen_1;
3044}
3045
3046/* Map current buffer in state (if length > 0) and put it in link table */
3047static inline int buf_map_to_qm_sg(struct device *dev,
3048                                   struct dpaa2_sg_entry *qm_sg,
3049                                   struct caam_hash_state *state)
3050{
3051        int buflen = *current_buflen(state);
3052
3053        if (!buflen)
3054                return 0;
3055
3056        state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
3057                                        DMA_TO_DEVICE);
3058        if (dma_mapping_error(dev, state->buf_dma)) {
3059                dev_err(dev, "unable to map buf\n");
3060                state->buf_dma = 0;
3061                return -ENOMEM;
3062        }
3063
3064        dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
3065
3066        return 0;
3067}
3068
3069/* Map state->caam_ctx, and add it to link table */
3070static inline int ctx_map_to_qm_sg(struct device *dev,
3071                                   struct caam_hash_state *state, int ctx_len,
3072                                   struct dpaa2_sg_entry *qm_sg, u32 flag)
3073{
3074        state->ctx_dma_len = ctx_len;
3075        state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
3076        if (dma_mapping_error(dev, state->ctx_dma)) {
3077                dev_err(dev, "unable to map ctx\n");
3078                state->ctx_dma = 0;
3079                return -ENOMEM;
3080        }
3081
3082        dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
3083
3084        return 0;
3085}
3086
3087static int ahash_set_sh_desc(struct crypto_ahash *ahash)
3088{
3089        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3090        int digestsize = crypto_ahash_digestsize(ahash);
3091        struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
3092        struct caam_flc *flc;
3093        u32 *desc;
3094
3095        /* ahash_update shared descriptor */
3096        flc = &ctx->flc[UPDATE];
3097        desc = flc->sh_desc;
3098        cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
3099                          ctx->ctx_len, true, priv->sec_attr.era);
3100        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3101        dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
3102                                   desc_bytes(desc), DMA_BIDIRECTIONAL);
3103        print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ",
3104                             DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3105                             1);
3106
3107        /* ahash_update_first shared descriptor */
3108        flc = &ctx->flc[UPDATE_FIRST];
3109        desc = flc->sh_desc;
3110        cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
3111                          ctx->ctx_len, false, priv->sec_attr.era);
3112        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3113        dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
3114                                   desc_bytes(desc), DMA_BIDIRECTIONAL);
3115        print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ",
3116                             DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3117                             1);
3118
3119        /* ahash_final shared descriptor */
3120        flc = &ctx->flc[FINALIZE];
3121        desc = flc->sh_desc;
3122        cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
3123                          ctx->ctx_len, true, priv->sec_attr.era);
3124        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3125        dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
3126                                   desc_bytes(desc), DMA_BIDIRECTIONAL);
3127        print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ",
3128                             DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3129                             1);
3130
3131        /* ahash_digest shared descriptor */
3132        flc = &ctx->flc[DIGEST];
3133        desc = flc->sh_desc;
3134        cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
3135                          ctx->ctx_len, false, priv->sec_attr.era);
3136        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3137        dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
3138                                   desc_bytes(desc), DMA_BIDIRECTIONAL);
3139        print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ",
3140                             DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3141                             1);
3142
3143        return 0;
3144}
3145
3146struct split_key_sh_result {
3147        struct completion completion;
3148        int err;
3149        struct device *dev;
3150};
3151
3152static void split_key_sh_done(void *cbk_ctx, u32 err)
3153{
3154        struct split_key_sh_result *res = cbk_ctx;
3155
3156        dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
3157
3158        res->err = err ? caam_qi2_strstatus(res->dev, err) : 0;
3159        complete(&res->completion);
3160}
3161
3162/* Digest hash size if it is too large */
3163static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
3164                           u32 digestsize)
3165{
3166        struct caam_request *req_ctx;
3167        u32 *desc;
3168        struct split_key_sh_result result;
3169        dma_addr_t key_dma;
3170        struct caam_flc *flc;
3171        dma_addr_t flc_dma;
3172        int ret = -ENOMEM;
3173        struct dpaa2_fl_entry *in_fle, *out_fle;
3174
3175        req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
3176        if (!req_ctx)
3177                return -ENOMEM;
3178
3179        in_fle = &req_ctx->fd_flt[1];
3180        out_fle = &req_ctx->fd_flt[0];
3181
3182        flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
3183        if (!flc)
3184                goto err_flc;
3185
3186        key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL);
3187        if (dma_mapping_error(ctx->dev, key_dma)) {
3188                dev_err(ctx->dev, "unable to map key memory\n");
3189                goto err_key_dma;
3190        }
3191
3192        desc = flc->sh_desc;
3193
3194        init_sh_desc(desc, 0);
3195
3196        /* descriptor to perform unkeyed hash on key_in */
3197        append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
3198                         OP_ALG_AS_INITFINAL);
3199        append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
3200                             FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
3201        append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
3202                         LDST_SRCDST_BYTE_CONTEXT);
3203
3204        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3205        flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
3206                                 desc_bytes(desc), DMA_TO_DEVICE);
3207        if (dma_mapping_error(ctx->dev, flc_dma)) {
3208                dev_err(ctx->dev, "unable to map shared descriptor\n");
3209                goto err_flc_dma;
3210        }
3211
3212        dpaa2_fl_set_final(in_fle, true);
3213        dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3214        dpaa2_fl_set_addr(in_fle, key_dma);
3215        dpaa2_fl_set_len(in_fle, *keylen);
3216        dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3217        dpaa2_fl_set_addr(out_fle, key_dma);
3218        dpaa2_fl_set_len(out_fle, digestsize);
3219
3220        print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
3221                             DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
3222        print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
3223                             DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3224                             1);
3225
3226        result.err = 0;
3227        init_completion(&result.completion);
3228        result.dev = ctx->dev;
3229
3230        req_ctx->flc = flc;
3231        req_ctx->flc_dma = flc_dma;
3232        req_ctx->cbk = split_key_sh_done;
3233        req_ctx->ctx = &result;
3234
3235        ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3236        if (ret == -EINPROGRESS) {
3237                /* in progress */
3238                wait_for_completion(&result.completion);
3239                ret = result.err;
3240                print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
3241                                     DUMP_PREFIX_ADDRESS, 16, 4, key,
3242                                     digestsize, 1);
3243        }
3244
3245        dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
3246                         DMA_TO_DEVICE);
3247err_flc_dma:
3248        dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL);
3249err_key_dma:
3250        kfree(flc);
3251err_flc:
3252        kfree(req_ctx);
3253
3254        *keylen = digestsize;
3255
3256        return ret;
3257}
3258
3259static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
3260                        unsigned int keylen)
3261{
3262        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3263        unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
3264        unsigned int digestsize = crypto_ahash_digestsize(ahash);
3265        int ret;
3266        u8 *hashed_key = NULL;
3267
3268        dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
3269
3270        if (keylen > blocksize) {
3271                hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
3272                if (!hashed_key)
3273                        return -ENOMEM;
3274                ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
3275                if (ret)
3276                        goto bad_free_key;
3277                key = hashed_key;
3278        }
3279
3280        ctx->adata.keylen = keylen;
3281        ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3282                                              OP_ALG_ALGSEL_MASK);
3283        if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
3284                goto bad_free_key;
3285
3286        ctx->adata.key_virt = key;
3287        ctx->adata.key_inline = true;
3288
3289        /*
3290         * In case |user key| > |derived key|, using DKP<imm,imm> would result
3291         * in invalid opcodes (last bytes of user key) in the resulting
3292         * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
3293         * addresses are needed.
3294         */
3295        if (keylen > ctx->adata.keylen_pad) {
3296                memcpy(ctx->key, key, keylen);
3297                dma_sync_single_for_device(ctx->dev, ctx->adata.key_dma,
3298                                           ctx->adata.keylen_pad,
3299                                           DMA_TO_DEVICE);
3300        }
3301
3302        ret = ahash_set_sh_desc(ahash);
3303        kfree(hashed_key);
3304        return ret;
3305bad_free_key:
3306        kfree(hashed_key);
3307        crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
3308        return -EINVAL;
3309}
3310
3311static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
3312                               struct ahash_request *req)
3313{
3314        struct caam_hash_state *state = ahash_request_ctx(req);
3315
3316        if (edesc->src_nents)
3317                dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
3318
3319        if (edesc->qm_sg_bytes)
3320                dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
3321                                 DMA_TO_DEVICE);
3322
3323        if (state->buf_dma) {
3324                dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
3325                                 DMA_TO_DEVICE);
3326                state->buf_dma = 0;
3327        }
3328}
3329
3330static inline void ahash_unmap_ctx(struct device *dev,
3331                                   struct ahash_edesc *edesc,
3332                                   struct ahash_request *req, u32 flag)
3333{
3334        struct caam_hash_state *state = ahash_request_ctx(req);
3335
3336        if (state->ctx_dma) {
3337                dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
3338                state->ctx_dma = 0;
3339        }
3340        ahash_unmap(dev, edesc, req);
3341}
3342
3343static void ahash_done(void *cbk_ctx, u32 status)
3344{
3345        struct crypto_async_request *areq = cbk_ctx;
3346        struct ahash_request *req = ahash_request_cast(areq);
3347        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3348        struct caam_hash_state *state = ahash_request_ctx(req);
3349        struct ahash_edesc *edesc = state->caam_req.edesc;
3350        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3351        int digestsize = crypto_ahash_digestsize(ahash);
3352        int ecode = 0;
3353
3354        dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3355
3356        if (unlikely(status))
3357                ecode = caam_qi2_strstatus(ctx->dev, status);
3358
3359        ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3360        memcpy(req->result, state->caam_ctx, digestsize);
3361        qi_cache_free(edesc);
3362
3363        print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3364                             DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3365                             ctx->ctx_len, 1);
3366
3367        req->base.complete(&req->base, ecode);
3368}
3369
3370static void ahash_done_bi(void *cbk_ctx, u32 status)
3371{
3372        struct crypto_async_request *areq = cbk_ctx;
3373        struct ahash_request *req = ahash_request_cast(areq);
3374        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3375        struct caam_hash_state *state = ahash_request_ctx(req);
3376        struct ahash_edesc *edesc = state->caam_req.edesc;
3377        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3378        int ecode = 0;
3379
3380        dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3381
3382        if (unlikely(status))
3383                ecode = caam_qi2_strstatus(ctx->dev, status);
3384
3385        ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3386        switch_buf(state);
3387        qi_cache_free(edesc);
3388
3389        print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3390                             DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3391                             ctx->ctx_len, 1);
3392        if (req->result)
3393                print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3394                                     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3395                                     crypto_ahash_digestsize(ahash), 1);
3396
3397        req->base.complete(&req->base, ecode);
3398}
3399
3400static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
3401{
3402        struct crypto_async_request *areq = cbk_ctx;
3403        struct ahash_request *req = ahash_request_cast(areq);
3404        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3405        struct caam_hash_state *state = ahash_request_ctx(req);
3406        struct ahash_edesc *edesc = state->caam_req.edesc;
3407        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3408        int digestsize = crypto_ahash_digestsize(ahash);
3409        int ecode = 0;
3410
3411        dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3412
3413        if (unlikely(status))
3414                ecode = caam_qi2_strstatus(ctx->dev, status);
3415
3416        ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3417        memcpy(req->result, state->caam_ctx, digestsize);
3418        qi_cache_free(edesc);
3419
3420        print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3421                             DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3422                             ctx->ctx_len, 1);
3423
3424        req->base.complete(&req->base, ecode);
3425}
3426
3427static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
3428{
3429        struct crypto_async_request *areq = cbk_ctx;
3430        struct ahash_request *req = ahash_request_cast(areq);
3431        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3432        struct caam_hash_state *state = ahash_request_ctx(req);
3433        struct ahash_edesc *edesc = state->caam_req.edesc;
3434        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3435        int ecode = 0;
3436
3437        dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3438
3439        if (unlikely(status))
3440                ecode = caam_qi2_strstatus(ctx->dev, status);
3441
3442        ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3443        switch_buf(state);
3444        qi_cache_free(edesc);
3445
3446        print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3447                             DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3448                             ctx->ctx_len, 1);
3449        if (req->result)
3450                print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3451                                     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3452                                     crypto_ahash_digestsize(ahash), 1);
3453
3454        req->base.complete(&req->base, ecode);
3455}
3456
3457static int ahash_update_ctx(struct ahash_request *req)
3458{
3459        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3460        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3461        struct caam_hash_state *state = ahash_request_ctx(req);
3462        struct caam_request *req_ctx = &state->caam_req;
3463        struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3464        struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3465        gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3466                      GFP_KERNEL : GFP_ATOMIC;
3467        u8 *buf = current_buf(state);
3468        int *buflen = current_buflen(state);
3469        u8 *next_buf = alt_buf(state);
3470        int *next_buflen = alt_buflen(state), last_buflen;
3471        int in_len = *buflen + req->nbytes, to_hash;
3472        int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
3473        struct ahash_edesc *edesc;
3474        int ret = 0;
3475
3476        last_buflen = *next_buflen;
3477        *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3478        to_hash = in_len - *next_buflen;
3479
3480        if (to_hash) {
3481                struct dpaa2_sg_entry *sg_table;
3482                int src_len = req->nbytes - *next_buflen;
3483
3484                src_nents = sg_nents_for_len(req->src, src_len);
3485                if (src_nents < 0) {
3486                        dev_err(ctx->dev, "Invalid number of src SG.\n");
3487                        return src_nents;
3488                }
3489
3490                if (src_nents) {
3491                        mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3492                                                  DMA_TO_DEVICE);
3493                        if (!mapped_nents) {
3494                                dev_err(ctx->dev, "unable to DMA map source\n");
3495                                return -ENOMEM;
3496                        }
3497                } else {
3498                        mapped_nents = 0;
3499                }
3500
3501                /* allocate space for base edesc and link tables */
3502                edesc = qi_cache_zalloc(GFP_DMA | flags);
3503                if (!edesc) {
3504                        dma_unmap_sg(ctx->dev, req->src, src_nents,
3505                                     DMA_TO_DEVICE);
3506                        return -ENOMEM;
3507                }
3508
3509                edesc->src_nents = src_nents;
3510                qm_sg_src_index = 1 + (*buflen ? 1 : 0);
3511                qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3512                              sizeof(*sg_table);
3513                sg_table = &edesc->sgt[0];
3514
3515                ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3516                                       DMA_BIDIRECTIONAL);
3517                if (ret)
3518                        goto unmap_ctx;
3519
3520                ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3521                if (ret)
3522                        goto unmap_ctx;
3523
3524                if (mapped_nents) {
3525                        sg_to_qm_sg_last(req->src, src_len,
3526                                         sg_table + qm_sg_src_index, 0);
3527                        if (*next_buflen)
3528                                scatterwalk_map_and_copy(next_buf, req->src,
3529                                                         to_hash - *buflen,
3530                                                         *next_buflen, 0);
3531                } else {
3532                        dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
3533                                           true);
3534                }
3535
3536                edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3537                                                  qm_sg_bytes, DMA_TO_DEVICE);
3538                if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3539                        dev_err(ctx->dev, "unable to map S/G table\n");
3540                        ret = -ENOMEM;
3541                        goto unmap_ctx;
3542                }
3543                edesc->qm_sg_bytes = qm_sg_bytes;
3544
3545                memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3546                dpaa2_fl_set_final(in_fle, true);
3547                dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3548                dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3549                dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
3550                dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3551                dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3552                dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3553
3554                req_ctx->flc = &ctx->flc[UPDATE];
3555                req_ctx->flc_dma = ctx->flc_dma[UPDATE];
3556                req_ctx->cbk = ahash_done_bi;
3557                req_ctx->ctx = &req->base;
3558                req_ctx->edesc = edesc;
3559
3560                ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3561                if (ret != -EINPROGRESS &&
3562                    !(ret == -EBUSY &&
3563                      req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3564                        goto unmap_ctx;
3565        } else if (*next_buflen) {
3566                scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3567                                         req->nbytes, 0);
3568                *buflen = *next_buflen;
3569                *next_buflen = last_buflen;
3570        }
3571
3572        print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3573                             DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
3574        print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
3575                             DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
3576                             1);
3577
3578        return ret;
3579unmap_ctx:
3580        ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3581        qi_cache_free(edesc);
3582        return ret;
3583}
3584
3585static int ahash_final_ctx(struct ahash_request *req)
3586{
3587        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3588        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3589        struct caam_hash_state *state = ahash_request_ctx(req);
3590        struct caam_request *req_ctx = &state->caam_req;
3591        struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3592        struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3593        gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3594                      GFP_KERNEL : GFP_ATOMIC;
3595        int buflen = *current_buflen(state);
3596        int qm_sg_bytes;
3597        int digestsize = crypto_ahash_digestsize(ahash);
3598        struct ahash_edesc *edesc;
3599        struct dpaa2_sg_entry *sg_table;
3600        int ret;
3601
3602        /* allocate space for base edesc and link tables */
3603        edesc = qi_cache_zalloc(GFP_DMA | flags);
3604        if (!edesc)
3605                return -ENOMEM;
3606
3607        qm_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(*sg_table);
3608        sg_table = &edesc->sgt[0];
3609
3610        ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3611                               DMA_BIDIRECTIONAL);
3612        if (ret)
3613                goto unmap_ctx;
3614
3615        ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3616        if (ret)
3617                goto unmap_ctx;
3618
3619        dpaa2_sg_set_final(sg_table + (buflen ? 1 : 0), true);
3620
3621        edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3622                                          DMA_TO_DEVICE);
3623        if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3624                dev_err(ctx->dev, "unable to map S/G table\n");
3625                ret = -ENOMEM;
3626                goto unmap_ctx;
3627        }
3628        edesc->qm_sg_bytes = qm_sg_bytes;
3629
3630        memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3631        dpaa2_fl_set_final(in_fle, true);
3632        dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3633        dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3634        dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
3635        dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3636        dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3637        dpaa2_fl_set_len(out_fle, digestsize);
3638
3639        req_ctx->flc = &ctx->flc[FINALIZE];
3640        req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3641        req_ctx->cbk = ahash_done_ctx_src;
3642        req_ctx->ctx = &req->base;
3643        req_ctx->edesc = edesc;
3644
3645        ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3646        if (ret == -EINPROGRESS ||
3647            (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3648                return ret;
3649
3650unmap_ctx:
3651        ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3652        qi_cache_free(edesc);
3653        return ret;
3654}
3655
3656static int ahash_finup_ctx(struct ahash_request *req)
3657{
3658        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3659        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3660        struct caam_hash_state *state = ahash_request_ctx(req);
3661        struct caam_request *req_ctx = &state->caam_req;
3662        struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3663        struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3664        gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3665                      GFP_KERNEL : GFP_ATOMIC;
3666        int buflen = *current_buflen(state);
3667        int qm_sg_bytes, qm_sg_src_index;
3668        int src_nents, mapped_nents;
3669        int digestsize = crypto_ahash_digestsize(ahash);
3670        struct ahash_edesc *edesc;
3671        struct dpaa2_sg_entry *sg_table;
3672        int ret;
3673
3674        src_nents = sg_nents_for_len(req->src, req->nbytes);
3675        if (src_nents < 0) {
3676                dev_err(ctx->dev, "Invalid number of src SG.\n");
3677                return src_nents;
3678        }
3679
3680        if (src_nents) {
3681                mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3682                                          DMA_TO_DEVICE);
3683                if (!mapped_nents) {
3684                        dev_err(ctx->dev, "unable to DMA map source\n");
3685                        return -ENOMEM;
3686                }
3687        } else {
3688                mapped_nents = 0;
3689        }
3690
3691        /* allocate space for base edesc and link tables */
3692        edesc = qi_cache_zalloc(GFP_DMA | flags);
3693        if (!edesc) {
3694                dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3695                return -ENOMEM;
3696        }
3697
3698        edesc->src_nents = src_nents;
3699        qm_sg_src_index = 1 + (buflen ? 1 : 0);
3700        qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3701                      sizeof(*sg_table);
3702        sg_table = &edesc->sgt[0];
3703
3704        ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3705                               DMA_BIDIRECTIONAL);
3706        if (ret)
3707                goto unmap_ctx;
3708
3709        ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3710        if (ret)
3711                goto unmap_ctx;
3712
3713        sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0);
3714
3715        edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3716                                          DMA_TO_DEVICE);
3717        if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3718                dev_err(ctx->dev, "unable to map S/G table\n");
3719                ret = -ENOMEM;
3720                goto unmap_ctx;
3721        }
3722        edesc->qm_sg_bytes = qm_sg_bytes;
3723
3724        memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3725        dpaa2_fl_set_final(in_fle, true);
3726        dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3727        dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3728        dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
3729        dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3730        dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3731        dpaa2_fl_set_len(out_fle, digestsize);
3732
3733        req_ctx->flc = &ctx->flc[FINALIZE];
3734        req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3735        req_ctx->cbk = ahash_done_ctx_src;
3736        req_ctx->ctx = &req->base;
3737        req_ctx->edesc = edesc;
3738
3739        ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3740        if (ret == -EINPROGRESS ||
3741            (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3742                return ret;
3743
3744unmap_ctx:
3745        ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3746        qi_cache_free(edesc);
3747        return ret;
3748}
3749
3750static int ahash_digest(struct ahash_request *req)
3751{
3752        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3753        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3754        struct caam_hash_state *state = ahash_request_ctx(req);
3755        struct caam_request *req_ctx = &state->caam_req;
3756        struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3757        struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3758        gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3759                      GFP_KERNEL : GFP_ATOMIC;
3760        int digestsize = crypto_ahash_digestsize(ahash);
3761        int src_nents, mapped_nents;
3762        struct ahash_edesc *edesc;
3763        int ret = -ENOMEM;
3764
3765        state->buf_dma = 0;
3766
3767        src_nents = sg_nents_for_len(req->src, req->nbytes);
3768        if (src_nents < 0) {
3769                dev_err(ctx->dev, "Invalid number of src SG.\n");
3770                return src_nents;
3771        }
3772
3773        if (src_nents) {
3774                mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3775                                          DMA_TO_DEVICE);
3776                if (!mapped_nents) {
3777                        dev_err(ctx->dev, "unable to map source for DMA\n");
3778                        return ret;
3779                }
3780        } else {
3781                mapped_nents = 0;
3782        }
3783
3784        /* allocate space for base edesc and link tables */
3785        edesc = qi_cache_zalloc(GFP_DMA | flags);
3786        if (!edesc) {
3787                dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3788                return ret;
3789        }
3790
3791        edesc->src_nents = src_nents;
3792        memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3793
3794        if (mapped_nents > 1) {
3795                int qm_sg_bytes;
3796                struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
3797
3798                qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table);
3799                sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0);
3800                edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3801                                                  qm_sg_bytes, DMA_TO_DEVICE);
3802                if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3803                        dev_err(ctx->dev, "unable to map S/G table\n");
3804                        goto unmap;
3805                }
3806                edesc->qm_sg_bytes = qm_sg_bytes;
3807                dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3808                dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3809        } else {
3810                dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3811                dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
3812        }
3813
3814        state->ctx_dma_len = digestsize;
3815        state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3816                                        DMA_FROM_DEVICE);
3817        if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3818                dev_err(ctx->dev, "unable to map ctx\n");
3819                state->ctx_dma = 0;
3820                goto unmap;
3821        }
3822
3823        dpaa2_fl_set_final(in_fle, true);
3824        dpaa2_fl_set_len(in_fle, req->nbytes);
3825        dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3826        dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3827        dpaa2_fl_set_len(out_fle, digestsize);
3828
3829        req_ctx->flc = &ctx->flc[DIGEST];
3830        req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3831        req_ctx->cbk = ahash_done;
3832        req_ctx->ctx = &req->base;
3833        req_ctx->edesc = edesc;
3834        ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3835        if (ret == -EINPROGRESS ||
3836            (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3837                return ret;
3838
3839unmap:
3840        ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3841        qi_cache_free(edesc);
3842        return ret;
3843}
3844
3845static int ahash_final_no_ctx(struct ahash_request *req)
3846{
3847        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3848        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3849        struct caam_hash_state *state = ahash_request_ctx(req);
3850        struct caam_request *req_ctx = &state->caam_req;
3851        struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3852        struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3853        gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3854                      GFP_KERNEL : GFP_ATOMIC;
3855        u8 *buf = current_buf(state);
3856        int buflen = *current_buflen(state);
3857        int digestsize = crypto_ahash_digestsize(ahash);
3858        struct ahash_edesc *edesc;
3859        int ret = -ENOMEM;
3860
3861        /* allocate space for base edesc and link tables */
3862        edesc = qi_cache_zalloc(GFP_DMA | flags);
3863        if (!edesc)
3864                return ret;
3865
3866        if (buflen) {
3867                state->buf_dma = dma_map_single(ctx->dev, buf, buflen,
3868                                                DMA_TO_DEVICE);
3869                if (dma_mapping_error(ctx->dev, state->buf_dma)) {
3870                        dev_err(ctx->dev, "unable to map src\n");
3871                        goto unmap;
3872                }
3873        }
3874
3875        state->ctx_dma_len = digestsize;
3876        state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3877                                        DMA_FROM_DEVICE);
3878        if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3879                dev_err(ctx->dev, "unable to map ctx\n");
3880                state->ctx_dma = 0;
3881                goto unmap;
3882        }
3883
3884        memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3885        dpaa2_fl_set_final(in_fle, true);
3886        /*
3887         * crypto engine requires the input entry to be present when
3888         * "frame list" FD is used.
3889         * Since engine does not support FMT=2'b11 (unused entry type), leaving
3890         * in_fle zeroized (except for "Final" flag) is the best option.
3891         */
3892        if (buflen) {
3893                dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3894                dpaa2_fl_set_addr(in_fle, state->buf_dma);
3895                dpaa2_fl_set_len(in_fle, buflen);
3896        }
3897        dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3898        dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3899        dpaa2_fl_set_len(out_fle, digestsize);
3900
3901        req_ctx->flc = &ctx->flc[DIGEST];
3902        req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3903        req_ctx->cbk = ahash_done;
3904        req_ctx->ctx = &req->base;
3905        req_ctx->edesc = edesc;
3906
3907        ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3908        if (ret == -EINPROGRESS ||
3909            (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3910                return ret;
3911
3912unmap:
3913        ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3914        qi_cache_free(edesc);
3915        return ret;
3916}
3917
3918static int ahash_update_no_ctx(struct ahash_request *req)
3919{
3920        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3921        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3922        struct caam_hash_state *state = ahash_request_ctx(req);
3923        struct caam_request *req_ctx = &state->caam_req;
3924        struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3925        struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3926        gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3927                      GFP_KERNEL : GFP_ATOMIC;
3928        u8 *buf = current_buf(state);
3929        int *buflen = current_buflen(state);
3930        u8 *next_buf = alt_buf(state);
3931        int *next_buflen = alt_buflen(state);
3932        int in_len = *buflen + req->nbytes, to_hash;
3933        int qm_sg_bytes, src_nents, mapped_nents;
3934        struct ahash_edesc *edesc;
3935        int ret = 0;
3936
3937        *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3938        to_hash = in_len - *next_buflen;
3939
3940        if (to_hash) {
3941                struct dpaa2_sg_entry *sg_table;
3942                int src_len = req->nbytes - *next_buflen;
3943
3944                src_nents = sg_nents_for_len(req->src, src_len);
3945                if (src_nents < 0) {
3946                        dev_err(ctx->dev, "Invalid number of src SG.\n");
3947                        return src_nents;
3948                }
3949
3950                if (src_nents) {
3951                        mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3952                                                  DMA_TO_DEVICE);
3953                        if (!mapped_nents) {
3954                                dev_err(ctx->dev, "unable to DMA map source\n");
3955                                return -ENOMEM;
3956                        }
3957                } else {
3958                        mapped_nents = 0;
3959                }
3960
3961                /* allocate space for base edesc and link tables */
3962                edesc = qi_cache_zalloc(GFP_DMA | flags);
3963                if (!edesc) {
3964                        dma_unmap_sg(ctx->dev, req->src, src_nents,
3965                                     DMA_TO_DEVICE);
3966                        return -ENOMEM;
3967                }
3968
3969                edesc->src_nents = src_nents;
3970                qm_sg_bytes = pad_sg_nents(1 + mapped_nents) *
3971                              sizeof(*sg_table);
3972                sg_table = &edesc->sgt[0];
3973
3974                ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
3975                if (ret)
3976                        goto unmap_ctx;
3977
3978                sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);
3979
3980                if (*next_buflen)
3981                        scatterwalk_map_and_copy(next_buf, req->src,
3982                                                 to_hash - *buflen,
3983                                                 *next_buflen, 0);
3984
3985                edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3986                                                  qm_sg_bytes, DMA_TO_DEVICE);
3987                if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3988                        dev_err(ctx->dev, "unable to map S/G table\n");
3989                        ret = -ENOMEM;
3990                        goto unmap_ctx;
3991                }
3992                edesc->qm_sg_bytes = qm_sg_bytes;
3993
3994                state->ctx_dma_len = ctx->ctx_len;
3995                state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
3996                                                ctx->ctx_len, DMA_FROM_DEVICE);
3997                if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3998                        dev_err(ctx->dev, "unable to map ctx\n");
3999                        state->ctx_dma = 0;
4000                        ret = -ENOMEM;
4001                        goto unmap_ctx;
4002                }
4003
4004                memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4005                dpaa2_fl_set_final(in_fle, true);
4006                dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4007                dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4008                dpaa2_fl_set_len(in_fle, to_hash);
4009                dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4010                dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4011                dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4012
4013                req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4014                req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4015                req_ctx->cbk = ahash_done_ctx_dst;
4016                req_ctx->ctx = &req->base;
4017                req_ctx->edesc = edesc;
4018
4019                ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4020                if (ret != -EINPROGRESS &&
4021                    !(ret == -EBUSY &&
4022                      req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4023                        goto unmap_ctx;
4024
4025                state->update = ahash_update_ctx;
4026                state->finup = ahash_finup_ctx;
4027                state->final = ahash_final_ctx;
4028        } else if (*next_buflen) {
4029                scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
4030                                         req->nbytes, 0);
4031                *buflen = *next_buflen;
4032                *next_buflen = 0;
4033        }
4034
4035        print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
4036                             DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
4037        print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
4038                             DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
4039                             1);
4040
4041        return ret;
4042unmap_ctx:
4043        ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4044        qi_cache_free(edesc);
4045        return ret;
4046}
4047
4048static int ahash_finup_no_ctx(struct ahash_request *req)
4049{
4050        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4051        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
4052        struct caam_hash_state *state = ahash_request_ctx(req);
4053        struct caam_request *req_ctx = &state->caam_req;
4054        struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4055        struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4056        gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4057                      GFP_KERNEL : GFP_ATOMIC;
4058        int buflen = *current_buflen(state);
4059        int qm_sg_bytes, src_nents, mapped_nents;
4060        int digestsize = crypto_ahash_digestsize(ahash);
4061        struct ahash_edesc *edesc;
4062        struct dpaa2_sg_entry *sg_table;
4063        int ret;
4064
4065        src_nents = sg_nents_for_len(req->src, req->nbytes);
4066        if (src_nents < 0) {
4067                dev_err(ctx->dev, "Invalid number of src SG.\n");
4068                return src_nents;
4069        }
4070
4071        if (src_nents) {
4072                mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4073                                          DMA_TO_DEVICE);
4074                if (!mapped_nents) {
4075                        dev_err(ctx->dev, "unable to DMA map source\n");
4076                        return -ENOMEM;
4077                }
4078        } else {
4079                mapped_nents = 0;
4080        }
4081
4082        /* allocate space for base edesc and link tables */
4083        edesc = qi_cache_zalloc(GFP_DMA | flags);
4084        if (!edesc) {
4085                dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
4086                return -ENOMEM;
4087        }
4088
4089        edesc->src_nents = src_nents;
4090        qm_sg_bytes = pad_sg_nents(2 + mapped_nents) * sizeof(*sg_table);
4091        sg_table = &edesc->sgt[0];
4092
4093        ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
4094        if (ret)
4095                goto unmap;
4096
4097        sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0);
4098
4099        edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
4100                                          DMA_TO_DEVICE);
4101        if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4102                dev_err(ctx->dev, "unable to map S/G table\n");
4103                ret = -ENOMEM;
4104                goto unmap;
4105        }
4106        edesc->qm_sg_bytes = qm_sg_bytes;
4107
4108        state->ctx_dma_len = digestsize;
4109        state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
4110                                        DMA_FROM_DEVICE);
4111        if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4112                dev_err(ctx->dev, "unable to map ctx\n");
4113                state->ctx_dma = 0;
4114                ret = -ENOMEM;
4115                goto unmap;
4116        }
4117
4118        memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4119        dpaa2_fl_set_final(in_fle, true);
4120        dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4121        dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4122        dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
4123        dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4124        dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4125        dpaa2_fl_set_len(out_fle, digestsize);
4126
4127        req_ctx->flc = &ctx->flc[DIGEST];
4128        req_ctx->flc_dma = ctx->flc_dma[DIGEST];
4129        req_ctx->cbk = ahash_done;
4130        req_ctx->ctx = &req->base;
4131        req_ctx->edesc = edesc;
4132        ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4133        if (ret != -EINPROGRESS &&
4134            !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4135                goto unmap;
4136
4137        return ret;
4138unmap:
4139        ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
4140        qi_cache_free(edesc);
4141        return -ENOMEM;
4142}
4143
4144static int ahash_update_first(struct ahash_request *req)
4145{
4146        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4147        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
4148        struct caam_hash_state *state = ahash_request_ctx(req);
4149        struct caam_request *req_ctx = &state->caam_req;
4150        struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4151        struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4152        gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4153                      GFP_KERNEL : GFP_ATOMIC;
4154        u8 *next_buf = alt_buf(state);
4155        int *next_buflen = alt_buflen(state);
4156        int to_hash;
4157        int src_nents, mapped_nents;
4158        struct ahash_edesc *edesc;
4159        int ret = 0;
4160
4161        *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
4162                                      1);
4163        to_hash = req->nbytes - *next_buflen;
4164
4165        if (to_hash) {
4166                struct dpaa2_sg_entry *sg_table;
4167                int src_len = req->nbytes - *next_buflen;
4168
4169                src_nents = sg_nents_for_len(req->src, src_len);
4170                if (src_nents < 0) {
4171                        dev_err(ctx->dev, "Invalid number of src SG.\n");
4172                        return src_nents;
4173                }
4174
4175                if (src_nents) {
4176                        mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4177                                                  DMA_TO_DEVICE);
4178                        if (!mapped_nents) {
4179                                dev_err(ctx->dev, "unable to map source for DMA\n");
4180                                return -ENOMEM;
4181                        }
4182                } else {
4183                        mapped_nents = 0;
4184                }
4185
4186                /* allocate space for base edesc and link tables */
4187                edesc = qi_cache_zalloc(GFP_DMA | flags);
4188                if (!edesc) {
4189                        dma_unmap_sg(ctx->dev, req->src, src_nents,
4190                                     DMA_TO_DEVICE);
4191                        return -ENOMEM;
4192                }
4193
4194                edesc->src_nents = src_nents;
4195                sg_table = &edesc->sgt[0];
4196
4197                memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4198                dpaa2_fl_set_final(in_fle, true);
4199                dpaa2_fl_set_len(in_fle, to_hash);
4200
4201                if (mapped_nents > 1) {
4202                        int qm_sg_bytes;
4203
4204                        sg_to_qm_sg_last(req->src, src_len, sg_table, 0);
4205                        qm_sg_bytes = pad_sg_nents(mapped_nents) *
4206                                      sizeof(*sg_table);
4207                        edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4208                                                          qm_sg_bytes,
4209                                                          DMA_TO_DEVICE);
4210                        if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4211                                dev_err(ctx->dev, "unable to map S/G table\n");
4212                                ret = -ENOMEM;
4213                                goto unmap_ctx;
4214                        }
4215                        edesc->qm_sg_bytes = qm_sg_bytes;
4216                        dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4217                        dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4218                } else {
4219                        dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
4220                        dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
4221                }
4222
4223                if (*next_buflen)
4224                        scatterwalk_map_and_copy(next_buf, req->src, to_hash,
4225                                                 *next_buflen, 0);
4226
4227                state->ctx_dma_len = ctx->ctx_len;
4228                state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4229                                                ctx->ctx_len, DMA_FROM_DEVICE);
4230                if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4231                        dev_err(ctx->dev, "unable to map ctx\n");
4232                        state->ctx_dma = 0;
4233                        ret = -ENOMEM;
4234                        goto unmap_ctx;
4235                }
4236
4237                dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4238                dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4239                dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4240
4241                req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4242                req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4243                req_ctx->cbk = ahash_done_ctx_dst;
4244                req_ctx->ctx = &req->base;
4245                req_ctx->edesc = edesc;
4246
4247                ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4248                if (ret != -EINPROGRESS &&
4249                    !(ret == -EBUSY && req->base.flags &
4250                      CRYPTO_TFM_REQ_MAY_BACKLOG))
4251                        goto unmap_ctx;
4252
4253                state->update = ahash_update_ctx;
4254                state->finup = ahash_finup_ctx;
4255                state->final = ahash_final_ctx;
4256        } else if (*next_buflen) {
4257                state->update = ahash_update_no_ctx;
4258                state->finup = ahash_finup_no_ctx;
4259                state->final = ahash_final_no_ctx;
4260                scatterwalk_map_and_copy(next_buf, req->src, 0,
4261                                         req->nbytes, 0);
4262                switch_buf(state);
4263        }
4264
4265        print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
4266                             DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
4267                             1);
4268
4269        return ret;
4270unmap_ctx:
4271        ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4272        qi_cache_free(edesc);
4273        return ret;
4274}
4275
4276static int ahash_finup_first(struct ahash_request *req)
4277{
4278        return ahash_digest(req);
4279}
4280
4281static int ahash_init(struct ahash_request *req)
4282{
4283        struct caam_hash_state *state = ahash_request_ctx(req);
4284
4285        state->update = ahash_update_first;
4286        state->finup = ahash_finup_first;
4287        state->final = ahash_final_no_ctx;
4288
4289        state->ctx_dma = 0;
4290        state->ctx_dma_len = 0;
4291        state->current_buf = 0;
4292        state->buf_dma = 0;
4293        state->buflen_0 = 0;
4294        state->buflen_1 = 0;
4295
4296        return 0;
4297}
4298
4299static int ahash_update(struct ahash_request *req)
4300{
4301        struct caam_hash_state *state = ahash_request_ctx(req);
4302
4303        return state->update(req);
4304}
4305
4306static int ahash_finup(struct ahash_request *req)
4307{
4308        struct caam_hash_state *state = ahash_request_ctx(req);
4309
4310        return state->finup(req);
4311}
4312
4313static int ahash_final(struct ahash_request *req)
4314{
4315        struct caam_hash_state *state = ahash_request_ctx(req);
4316
4317        return state->final(req);
4318}
4319
4320static int ahash_export(struct ahash_request *req, void *out)
4321{
4322        struct caam_hash_state *state = ahash_request_ctx(req);
4323        struct caam_export_state *export = out;
4324        int len;
4325        u8 *buf;
4326
4327        if (state->current_buf) {
4328                buf = state->buf_1;
4329                len = state->buflen_1;
4330        } else {
4331                buf = state->buf_0;
4332                len = state->buflen_0;
4333        }
4334
4335        memcpy(export->buf, buf, len);
4336        memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
4337        export->buflen = len;
4338        export->update = state->update;
4339        export->final = state->final;
4340        export->finup = state->finup;
4341
4342        return 0;
4343}
4344
4345static int ahash_import(struct ahash_request *req, const void *in)
4346{
4347        struct caam_hash_state *state = ahash_request_ctx(req);
4348        const struct caam_export_state *export = in;
4349
4350        memset(state, 0, sizeof(*state));
4351        memcpy(state->buf_0, export->buf, export->buflen);
4352        memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
4353        state->buflen_0 = export->buflen;
4354        state->update = export->update;
4355        state->final = export->final;
4356        state->finup = export->finup;
4357
4358        return 0;
4359}
4360
4361struct caam_hash_template {
4362        char name[CRYPTO_MAX_ALG_NAME];
4363        char driver_name[CRYPTO_MAX_ALG_NAME];
4364        char hmac_name[CRYPTO_MAX_ALG_NAME];
4365        char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
4366        unsigned int blocksize;
4367        struct ahash_alg template_ahash;
4368        u32 alg_type;
4369};
4370
4371/* ahash descriptors */
4372static struct caam_hash_template driver_hash[] = {
4373        {
4374                .name = "sha1",
4375                .driver_name = "sha1-caam-qi2",
4376                .hmac_name = "hmac(sha1)",
4377                .hmac_driver_name = "hmac-sha1-caam-qi2",
4378                .blocksize = SHA1_BLOCK_SIZE,
4379                .template_ahash = {
4380                        .init = ahash_init,
4381                        .update = ahash_update,
4382                        .final = ahash_final,
4383                        .finup = ahash_finup,
4384                        .digest = ahash_digest,
4385                        .export = ahash_export,
4386                        .import = ahash_import,
4387                        .setkey = ahash_setkey,
4388                        .halg = {
4389                                .digestsize = SHA1_DIGEST_SIZE,
4390                                .statesize = sizeof(struct caam_export_state),
4391                        },
4392                },
4393                .alg_type = OP_ALG_ALGSEL_SHA1,
4394        }, {
4395                .name = "sha224",
4396                .driver_name = "sha224-caam-qi2",
4397                .hmac_name = "hmac(sha224)",
4398                .hmac_driver_name = "hmac-sha224-caam-qi2",
4399                .blocksize = SHA224_BLOCK_SIZE,
4400                .template_ahash = {
4401                        .init = ahash_init,
4402                        .update = ahash_update,
4403                        .final = ahash_final,
4404                        .finup = ahash_finup,
4405                        .digest = ahash_digest,
4406                        .export = ahash_export,
4407                        .import = ahash_import,
4408                        .setkey = ahash_setkey,
4409                        .halg = {
4410                                .digestsize = SHA224_DIGEST_SIZE,
4411                                .statesize = sizeof(struct caam_export_state),
4412                        },
4413                },
4414                .alg_type = OP_ALG_ALGSEL_SHA224,
4415        }, {
4416                .name = "sha256",
4417                .driver_name = "sha256-caam-qi2",
4418                .hmac_name = "hmac(sha256)",
4419                .hmac_driver_name = "hmac-sha256-caam-qi2",
4420                .blocksize = SHA256_BLOCK_SIZE,
4421                .template_ahash = {
4422                        .init = ahash_init,
4423                        .update = ahash_update,
4424                        .final = ahash_final,
4425                        .finup = ahash_finup,
4426                        .digest = ahash_digest,
4427                        .export = ahash_export,
4428                        .import = ahash_import,
4429                        .setkey = ahash_setkey,
4430                        .halg = {
4431                                .digestsize = SHA256_DIGEST_SIZE,
4432                                .statesize = sizeof(struct caam_export_state),
4433                        },
4434                },
4435                .alg_type = OP_ALG_ALGSEL_SHA256,
4436        }, {
4437                .name = "sha384",
4438                .driver_name = "sha384-caam-qi2",
4439                .hmac_name = "hmac(sha384)",
4440                .hmac_driver_name = "hmac-sha384-caam-qi2",
4441                .blocksize = SHA384_BLOCK_SIZE,
4442                .template_ahash = {
4443                        .init = ahash_init,
4444                        .update = ahash_update,
4445                        .final = ahash_final,
4446                        .finup = ahash_finup,
4447                        .digest = ahash_digest,
4448                        .export = ahash_export,
4449                        .import = ahash_import,
4450                        .setkey = ahash_setkey,
4451                        .halg = {
4452                                .digestsize = SHA384_DIGEST_SIZE,
4453                                .statesize = sizeof(struct caam_export_state),
4454                        },
4455                },
4456                .alg_type = OP_ALG_ALGSEL_SHA384,
4457        }, {
4458                .name = "sha512",
4459                .driver_name = "sha512-caam-qi2",
4460                .hmac_name = "hmac(sha512)",
4461                .hmac_driver_name = "hmac-sha512-caam-qi2",
4462                .blocksize = SHA512_BLOCK_SIZE,
4463                .template_ahash = {
4464                        .init = ahash_init,
4465                        .update = ahash_update,
4466                        .final = ahash_final,
4467                        .finup = ahash_finup,
4468                        .digest = ahash_digest,
4469                        .export = ahash_export,
4470                        .import = ahash_import,
4471                        .setkey = ahash_setkey,
4472                        .halg = {
4473                                .digestsize = SHA512_DIGEST_SIZE,
4474                                .statesize = sizeof(struct caam_export_state),
4475                        },
4476                },
4477                .alg_type = OP_ALG_ALGSEL_SHA512,
4478        }, {
4479                .name = "md5",
4480                .driver_name = "md5-caam-qi2",
4481                .hmac_name = "hmac(md5)",
4482                .hmac_driver_name = "hmac-md5-caam-qi2",
4483                .blocksize = MD5_BLOCK_WORDS * 4,
4484                .template_ahash = {
4485                        .init = ahash_init,
4486                        .update = ahash_update,
4487                        .final = ahash_final,
4488                        .finup = ahash_finup,
4489                        .digest = ahash_digest,
4490                        .export = ahash_export,
4491                        .import = ahash_import,
4492                        .setkey = ahash_setkey,
4493                        .halg = {
4494                                .digestsize = MD5_DIGEST_SIZE,
4495                                .statesize = sizeof(struct caam_export_state),
4496                        },
4497                },
4498                .alg_type = OP_ALG_ALGSEL_MD5,
4499        }
4500};
4501
4502struct caam_hash_alg {
4503        struct list_head entry;
4504        struct device *dev;
4505        int alg_type;
4506        struct ahash_alg ahash_alg;
4507};
4508
4509static int caam_hash_cra_init(struct crypto_tfm *tfm)
4510{
4511        struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
4512        struct crypto_alg *base = tfm->__crt_alg;
4513        struct hash_alg_common *halg =
4514                 container_of(base, struct hash_alg_common, base);
4515        struct ahash_alg *alg =
4516                 container_of(halg, struct ahash_alg, halg);
4517        struct caam_hash_alg *caam_hash =
4518                 container_of(alg, struct caam_hash_alg, ahash_alg);
4519        struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4520        /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
4521        static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
4522                                         HASH_MSG_LEN + SHA1_DIGEST_SIZE,
4523                                         HASH_MSG_LEN + 32,
4524                                         HASH_MSG_LEN + SHA256_DIGEST_SIZE,
4525                                         HASH_MSG_LEN + 64,
4526                                         HASH_MSG_LEN + SHA512_DIGEST_SIZE };
4527        dma_addr_t dma_addr;
4528        int i;
4529
4530        ctx->dev = caam_hash->dev;
4531
4532        if (alg->setkey) {
4533                ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key,
4534                                                          ARRAY_SIZE(ctx->key),
4535                                                          DMA_TO_DEVICE,
4536                                                          DMA_ATTR_SKIP_CPU_SYNC);
4537                if (dma_mapping_error(ctx->dev, ctx->adata.key_dma)) {
4538                        dev_err(ctx->dev, "unable to map key\n");
4539                        return -ENOMEM;
4540                }
4541        }
4542
4543        dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
4544                                        DMA_BIDIRECTIONAL,
4545                                        DMA_ATTR_SKIP_CPU_SYNC);
4546        if (dma_mapping_error(ctx->dev, dma_addr)) {
4547                dev_err(ctx->dev, "unable to map shared descriptors\n");
4548                if (ctx->adata.key_dma)
4549                        dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4550                                               ARRAY_SIZE(ctx->key),
4551                                               DMA_TO_DEVICE,
4552                                               DMA_ATTR_SKIP_CPU_SYNC);
4553                return -ENOMEM;
4554        }
4555
4556        for (i = 0; i < HASH_NUM_OP; i++)
4557                ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
4558
4559        /* copy descriptor header template value */
4560        ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
4561
4562        ctx->ctx_len = runninglen[(ctx->adata.algtype &
4563                                   OP_ALG_ALGSEL_SUBMASK) >>
4564                                  OP_ALG_ALGSEL_SHIFT];
4565
4566        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4567                                 sizeof(struct caam_hash_state));
4568
4569        return ahash_set_sh_desc(ahash);
4570}
4571
4572static void caam_hash_cra_exit(struct crypto_tfm *tfm)
4573{
4574        struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4575
4576        dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
4577                               DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
4578        if (ctx->adata.key_dma)
4579                dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4580                                       ARRAY_SIZE(ctx->key), DMA_TO_DEVICE,
4581                                       DMA_ATTR_SKIP_CPU_SYNC);
4582}
4583
4584static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
4585        struct caam_hash_template *template, bool keyed)
4586{
4587        struct caam_hash_alg *t_alg;
4588        struct ahash_alg *halg;
4589        struct crypto_alg *alg;
4590
4591        t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
4592        if (!t_alg)
4593                return ERR_PTR(-ENOMEM);
4594
4595        t_alg->ahash_alg = template->template_ahash;
4596        halg = &t_alg->ahash_alg;
4597        alg = &halg->halg.base;
4598
4599        if (keyed) {
4600                snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4601                         template->hmac_name);
4602                snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4603                         template->hmac_driver_name);
4604        } else {
4605                snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4606                         template->name);
4607                snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4608                         template->driver_name);
4609                t_alg->ahash_alg.setkey = NULL;
4610        }
4611        alg->cra_module = THIS_MODULE;
4612        alg->cra_init = caam_hash_cra_init;
4613        alg->cra_exit = caam_hash_cra_exit;
4614        alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
4615        alg->cra_priority = CAAM_CRA_PRIORITY;
4616        alg->cra_blocksize = template->blocksize;
4617        alg->cra_alignmask = 0;
4618        alg->cra_flags = CRYPTO_ALG_ASYNC;
4619
4620        t_alg->alg_type = template->alg_type;
4621        t_alg->dev = dev;
4622
4623        return t_alg;
4624}
4625
4626static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
4627{
4628        struct dpaa2_caam_priv_per_cpu *ppriv;
4629
4630        ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
4631        napi_schedule_irqoff(&ppriv->napi);
4632}
4633
4634static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
4635{
4636        struct device *dev = priv->dev;
4637        struct dpaa2_io_notification_ctx *nctx;
4638        struct dpaa2_caam_priv_per_cpu *ppriv;
4639        int err, i = 0, cpu;
4640
4641        for_each_online_cpu(cpu) {
4642                ppriv = per_cpu_ptr(priv->ppriv, cpu);
4643                ppriv->priv = priv;
4644                nctx = &ppriv->nctx;
4645                nctx->is_cdan = 0;
4646                nctx->id = ppriv->rsp_fqid;
4647                nctx->desired_cpu = cpu;
4648                nctx->cb = dpaa2_caam_fqdan_cb;
4649
4650                /* Register notification callbacks */
4651                ppriv->dpio = dpaa2_io_service_select(cpu);
4652                err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
4653                if (unlikely(err)) {
4654                        dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
4655                        nctx->cb = NULL;
4656                        /*
4657                         * If no affine DPIO for this core, there's probably
4658                         * none available for next cores either. Signal we want
4659                         * to retry later, in case the DPIO devices weren't
4660                         * probed yet.
4661                         */
4662                        err = -EPROBE_DEFER;
4663                        goto err;
4664                }
4665
4666                ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
4667                                                     dev);
4668                if (unlikely(!ppriv->store)) {
4669                        dev_err(dev, "dpaa2_io_store_create() failed\n");
4670                        err = -ENOMEM;
4671                        goto err;
4672                }
4673
4674                if (++i == priv->num_pairs)
4675                        break;
4676        }
4677
4678        return 0;
4679
4680err:
4681        for_each_online_cpu(cpu) {
4682                ppriv = per_cpu_ptr(priv->ppriv, cpu);
4683                if (!ppriv->nctx.cb)
4684                        break;
4685                dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
4686        }
4687
4688        for_each_online_cpu(cpu) {
4689                ppriv = per_cpu_ptr(priv->ppriv, cpu);
4690                if (!ppriv->store)
4691                        break;
4692                dpaa2_io_store_destroy(ppriv->store);
4693        }
4694
4695        return err;
4696}
4697
4698static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
4699{
4700        struct dpaa2_caam_priv_per_cpu *ppriv;
4701        int i = 0, cpu;
4702
4703        for_each_online_cpu(cpu) {
4704                ppriv = per_cpu_ptr(priv->ppriv, cpu);
4705                dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx,
4706                                            priv->dev);
4707                dpaa2_io_store_destroy(ppriv->store);
4708
4709                if (++i == priv->num_pairs)
4710                        return;
4711        }
4712}
4713
4714static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
4715{
4716        struct dpseci_rx_queue_cfg rx_queue_cfg;
4717        struct device *dev = priv->dev;
4718        struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4719        struct dpaa2_caam_priv_per_cpu *ppriv;
4720        int err = 0, i = 0, cpu;
4721
4722        /* Configure Rx queues */
4723        for_each_online_cpu(cpu) {
4724                ppriv = per_cpu_ptr(priv->ppriv, cpu);
4725
4726                rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
4727                                       DPSECI_QUEUE_OPT_USER_CTX;
4728                rx_queue_cfg.order_preservation_en = 0;
4729                rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
4730                rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
4731                /*
4732                 * Rx priority (WQ) doesn't really matter, since we use
4733                 * pull mode, i.e. volatile dequeues from specific FQs
4734                 */
4735                rx_queue_cfg.dest_cfg.priority = 0;
4736                rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
4737
4738                err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4739                                          &rx_queue_cfg);
4740                if (err) {
4741                        dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
4742                                err);
4743                        return err;
4744                }
4745
4746                if (++i == priv->num_pairs)
4747                        break;
4748        }
4749
4750        return err;
4751}
4752
4753static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
4754{
4755        struct device *dev = priv->dev;
4756
4757        if (!priv->cscn_mem)
4758                return;
4759
4760        dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4761        kfree(priv->cscn_mem);
4762}
4763
4764static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
4765{
4766        struct device *dev = priv->dev;
4767        struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4768
4769        dpaa2_dpseci_congestion_free(priv);
4770        dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4771}
4772
4773static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
4774                                  const struct dpaa2_fd *fd)
4775{
4776        struct caam_request *req;
4777        u32 fd_err;
4778
4779        if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
4780                dev_err(priv->dev, "Only Frame List FD format is supported!\n");
4781                return;
4782        }
4783
4784        fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
4785        if (unlikely(fd_err))
4786                dev_err_ratelimited(priv->dev, "FD error: %08x\n", fd_err);
4787
4788        /*
4789         * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
4790         * in FD[ERR] or FD[FRC].
4791         */
4792        req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
4793        dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
4794                         DMA_BIDIRECTIONAL);
4795        req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
4796}
4797
4798static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
4799{
4800        int err;
4801
4802        /* Retry while portal is busy */
4803        do {
4804                err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
4805                                               ppriv->store);
4806        } while (err == -EBUSY);
4807
4808        if (unlikely(err))
4809                dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
4810
4811        return err;
4812}
4813
4814static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
4815{
4816        struct dpaa2_dq *dq;
4817        int cleaned = 0, is_last;
4818
4819        do {
4820                dq = dpaa2_io_store_next(ppriv->store, &is_last);
4821                if (unlikely(!dq)) {
4822                        if (unlikely(!is_last)) {
4823                                dev_dbg(ppriv->priv->dev,
4824                                        "FQ %d returned no valid frames\n",
4825                                        ppriv->rsp_fqid);
4826                                /*
4827                                 * MUST retry until we get some sort of
4828                                 * valid response token (be it "empty dequeue"
4829                                 * or a valid frame).
4830                                 */
4831                                continue;
4832                        }
4833                        break;
4834                }
4835
4836                /* Process FD */
4837                dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
4838                cleaned++;
4839        } while (!is_last);
4840
4841        return cleaned;
4842}
4843
4844static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
4845{
4846        struct dpaa2_caam_priv_per_cpu *ppriv;
4847        struct dpaa2_caam_priv *priv;
4848        int err, cleaned = 0, store_cleaned;
4849
4850        ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
4851        priv = ppriv->priv;
4852
4853        if (unlikely(dpaa2_caam_pull_fq(ppriv)))
4854                return 0;
4855
4856        do {
4857                store_cleaned = dpaa2_caam_store_consume(ppriv);
4858                cleaned += store_cleaned;
4859
4860                if (store_cleaned == 0 ||
4861                    cleaned > budget - DPAA2_CAAM_STORE_SIZE)
4862                        break;
4863
4864                /* Try to dequeue some more */
4865                err = dpaa2_caam_pull_fq(ppriv);
4866                if (unlikely(err))
4867                        break;
4868        } while (1);
4869
4870        if (cleaned < budget) {
4871                napi_complete_done(napi, cleaned);
4872                err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
4873                if (unlikely(err))
4874                        dev_err(priv->dev, "Notification rearm failed: %d\n",
4875                                err);
4876        }
4877
4878        return cleaned;
4879}
4880
4881static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
4882                                         u16 token)
4883{
4884        struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
4885        struct device *dev = priv->dev;
4886        int err;
4887
4888        /*
4889         * Congestion group feature supported starting with DPSECI API v5.1
4890         * and only when object has been created with this capability.
4891         */
4892        if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
4893            !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
4894                return 0;
4895
4896        priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
4897                                 GFP_KERNEL | GFP_DMA);
4898        if (!priv->cscn_mem)
4899                return -ENOMEM;
4900
4901        priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
4902        priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
4903                                        DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4904        if (dma_mapping_error(dev, priv->cscn_dma)) {
4905                dev_err(dev, "Error mapping CSCN memory area\n");
4906                err = -ENOMEM;
4907                goto err_dma_map;
4908        }
4909
4910        cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
4911        cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
4912        cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
4913        cong_notif_cfg.message_ctx = (uintptr_t)priv;
4914        cong_notif_cfg.message_iova = priv->cscn_dma;
4915        cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
4916                                        DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
4917                                        DPSECI_CGN_MODE_COHERENT_WRITE;
4918
4919        err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
4920                                                 &cong_notif_cfg);
4921        if (err) {
4922                dev_err(dev, "dpseci_set_congestion_notification failed\n");
4923                goto err_set_cong;
4924        }
4925
4926        return 0;
4927
4928err_set_cong:
4929        dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4930err_dma_map:
4931        kfree(priv->cscn_mem);
4932
4933        return err;
4934}
4935
4936static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
4937{
4938        struct device *dev = &ls_dev->dev;
4939        struct dpaa2_caam_priv *priv;
4940        struct dpaa2_caam_priv_per_cpu *ppriv;
4941        int err, cpu;
4942        u8 i;
4943
4944        priv = dev_get_drvdata(dev);
4945
4946        priv->dev = dev;
4947        priv->dpsec_id = ls_dev->obj_desc.id;
4948
4949        /* Get a handle for the DPSECI this interface is associate with */
4950        err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
4951        if (err) {
4952                dev_err(dev, "dpseci_open() failed: %d\n", err);
4953                goto err_open;
4954        }
4955
4956        err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
4957                                     &priv->minor_ver);
4958        if (err) {
4959                dev_err(dev, "dpseci_get_api_version() failed\n");
4960                goto err_get_vers;
4961        }
4962
4963        dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
4964
4965        err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
4966                                    &priv->dpseci_attr);
4967        if (err) {
4968                dev_err(dev, "dpseci_get_attributes() failed\n");
4969                goto err_get_vers;
4970        }
4971
4972        err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
4973                                  &priv->sec_attr);
4974        if (err) {
4975                dev_err(dev, "dpseci_get_sec_attr() failed\n");
4976                goto err_get_vers;
4977        }
4978
4979        err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
4980        if (err) {
4981                dev_err(dev, "setup_congestion() failed\n");
4982                goto err_get_vers;
4983        }
4984
4985        priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
4986                              priv->dpseci_attr.num_tx_queues);
4987        if (priv->num_pairs > num_online_cpus()) {
4988                dev_warn(dev, "%d queues won't be used\n",
4989                         priv->num_pairs - num_online_cpus());
4990                priv->num_pairs = num_online_cpus();
4991        }
4992
4993        for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
4994                err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4995                                          &priv->rx_queue_attr[i]);
4996                if (err) {
4997                        dev_err(dev, "dpseci_get_rx_queue() failed\n");
4998                        goto err_get_rx_queue;
4999                }
5000        }
5001
5002        for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
5003                err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
5004                                          &priv->tx_queue_attr[i]);
5005                if (err) {
5006                        dev_err(dev, "dpseci_get_tx_queue() failed\n");
5007                        goto err_get_rx_queue;
5008                }
5009        }
5010
5011        i = 0;
5012        for_each_online_cpu(cpu) {
5013                u8 j;
5014
5015                j = i % priv->num_pairs;
5016
5017                ppriv = per_cpu_ptr(priv->ppriv, cpu);
5018                ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
5019
5020                /*
5021                 * Allow all cores to enqueue, while only some of them
5022                 * will take part in dequeuing.
5023                 */
5024                if (++i > priv->num_pairs)
5025                        continue;
5026
5027                ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
5028                ppriv->prio = j;
5029
5030                dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", j,
5031                        priv->rx_queue_attr[j].fqid,
5032                        priv->tx_queue_attr[j].fqid);
5033
5034                ppriv->net_dev.dev = *dev;
5035                INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
5036                netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
5037                               DPAA2_CAAM_NAPI_WEIGHT);
5038        }
5039
5040        return 0;
5041
5042err_get_rx_queue:
5043        dpaa2_dpseci_congestion_free(priv);
5044err_get_vers:
5045        dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
5046err_open:
5047        return err;
5048}
5049
5050static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
5051{
5052        struct device *dev = priv->dev;
5053        struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
5054        struct dpaa2_caam_priv_per_cpu *ppriv;
5055        int i;
5056
5057        for (i = 0; i < priv->num_pairs; i++) {
5058                ppriv = per_cpu_ptr(priv->ppriv, i);
5059                napi_enable(&ppriv->napi);
5060        }
5061
5062        return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
5063}
5064
5065static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
5066{
5067        struct device *dev = priv->dev;
5068        struct dpaa2_caam_priv_per_cpu *ppriv;
5069        struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
5070        int i, err = 0, enabled;
5071
5072        err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
5073        if (err) {
5074                dev_err(dev, "dpseci_disable() failed\n");
5075                return err;
5076        }
5077
5078        err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
5079        if (err) {
5080                dev_err(dev, "dpseci_is_enabled() failed\n");
5081                return err;
5082        }
5083
5084        dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
5085
5086        for (i = 0; i < priv->num_pairs; i++) {
5087                ppriv = per_cpu_ptr(priv->ppriv, i);
5088                napi_disable(&ppriv->napi);
5089                netif_napi_del(&ppriv->napi);
5090        }
5091
5092        return 0;
5093}
5094
5095static struct list_head hash_list;
5096
5097static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
5098{
5099        struct device *dev;
5100        struct dpaa2_caam_priv *priv;
5101        int i, err = 0;
5102        bool registered = false;
5103
5104        /*
5105         * There is no way to get CAAM endianness - there is no direct register
5106         * space access and MC f/w does not provide this attribute.
5107         * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
5108         * property.
5109         */
5110        caam_little_end = true;
5111
5112        caam_imx = false;
5113
5114        dev = &dpseci_dev->dev;
5115
5116        priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
5117        if (!priv)
5118                return -ENOMEM;
5119
5120        dev_set_drvdata(dev, priv);
5121
5122        priv->domain = iommu_get_domain_for_dev(dev);
5123
5124        qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
5125                                     0, SLAB_CACHE_DMA, NULL);
5126        if (!qi_cache) {
5127                dev_err(dev, "Can't allocate SEC cache\n");
5128                return -ENOMEM;
5129        }
5130
5131        err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
5132        if (err) {
5133                dev_err(dev, "dma_set_mask_and_coherent() failed\n");
5134                goto err_dma_mask;
5135        }
5136
5137        /* Obtain a MC portal */
5138        err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
5139        if (err) {
5140                if (err == -ENXIO)
5141                        err = -EPROBE_DEFER;
5142                else
5143                        dev_err(dev, "MC portal allocation failed\n");
5144
5145                goto err_dma_mask;
5146        }
5147
5148        priv->ppriv = alloc_percpu(*priv->ppriv);
5149        if (!priv->ppriv) {
5150                dev_err(dev, "alloc_percpu() failed\n");
5151                err = -ENOMEM;
5152                goto err_alloc_ppriv;
5153        }
5154
5155        /* DPSECI initialization */
5156        err = dpaa2_dpseci_setup(dpseci_dev);
5157        if (err) {
5158                dev_err(dev, "dpaa2_dpseci_setup() failed\n");
5159                goto err_dpseci_setup;
5160        }
5161
5162        /* DPIO */
5163        err = dpaa2_dpseci_dpio_setup(priv);
5164        if (err) {
5165                if (err != -EPROBE_DEFER)
5166                        dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
5167                goto err_dpio_setup;
5168        }
5169
5170        /* DPSECI binding to DPIO */
5171        err = dpaa2_dpseci_bind(priv);
5172        if (err) {
5173                dev_err(dev, "dpaa2_dpseci_bind() failed\n");
5174                goto err_bind;
5175        }
5176
5177        /* DPSECI enable */
5178        err = dpaa2_dpseci_enable(priv);
5179        if (err) {
5180                dev_err(dev, "dpaa2_dpseci_enable() failed\n");
5181                goto err_bind;
5182        }
5183
5184        dpaa2_dpseci_debugfs_init(priv);
5185
5186        /* register crypto algorithms the device supports */
5187        for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5188                struct caam_skcipher_alg *t_alg = driver_algs + i;
5189                u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
5190
5191                /* Skip DES algorithms if not supported by device */
5192                if (!priv->sec_attr.des_acc_num &&
5193                    (alg_sel == OP_ALG_ALGSEL_3DES ||
5194                     alg_sel == OP_ALG_ALGSEL_DES))
5195                        continue;
5196
5197                /* Skip AES algorithms if not supported by device */
5198                if (!priv->sec_attr.aes_acc_num &&
5199                    alg_sel == OP_ALG_ALGSEL_AES)
5200                        continue;
5201
5202                /* Skip CHACHA20 algorithms if not supported by device */
5203                if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5204                    !priv->sec_attr.ccha_acc_num)
5205                        continue;
5206
5207                t_alg->caam.dev = dev;
5208                caam_skcipher_alg_init(t_alg);
5209
5210                err = crypto_register_skcipher(&t_alg->skcipher);
5211                if (err) {
5212                        dev_warn(dev, "%s alg registration failed: %d\n",
5213                                 t_alg->skcipher.base.cra_driver_name, err);
5214                        continue;
5215                }
5216
5217                t_alg->registered = true;
5218                registered = true;
5219        }
5220
5221        for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5222                struct caam_aead_alg *t_alg = driver_aeads + i;
5223                u32 c1_alg_sel = t_alg->caam.class1_alg_type &
5224                                 OP_ALG_ALGSEL_MASK;
5225                u32 c2_alg_sel = t_alg->caam.class2_alg_type &
5226                                 OP_ALG_ALGSEL_MASK;
5227
5228                /* Skip DES algorithms if not supported by device */
5229                if (!priv->sec_attr.des_acc_num &&
5230                    (c1_alg_sel == OP_ALG_ALGSEL_3DES ||
5231                     c1_alg_sel == OP_ALG_ALGSEL_DES))
5232                        continue;
5233
5234                /* Skip AES algorithms if not supported by device */
5235                if (!priv->sec_attr.aes_acc_num &&
5236                    c1_alg_sel == OP_ALG_ALGSEL_AES)
5237                        continue;
5238
5239                /* Skip CHACHA20 algorithms if not supported by device */
5240                if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5241                    !priv->sec_attr.ccha_acc_num)
5242                        continue;
5243
5244                /* Skip POLY1305 algorithms if not supported by device */
5245                if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 &&
5246                    !priv->sec_attr.ptha_acc_num)
5247                        continue;
5248
5249                /*
5250                 * Skip algorithms requiring message digests
5251                 * if MD not supported by device.
5252                 */
5253                if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
5254                    !priv->sec_attr.md_acc_num)
5255                        continue;
5256
5257                t_alg->caam.dev = dev;
5258                caam_aead_alg_init(t_alg);
5259
5260                err = crypto_register_aead(&t_alg->aead);
5261                if (err) {
5262                        dev_warn(dev, "%s alg registration failed: %d\n",
5263                                 t_alg->aead.base.cra_driver_name, err);
5264                        continue;
5265                }
5266
5267                t_alg->registered = true;
5268                registered = true;
5269        }
5270        if (registered)
5271                dev_info(dev, "algorithms registered in /proc/crypto\n");
5272
5273        /* register hash algorithms the device supports */
5274        INIT_LIST_HEAD(&hash_list);
5275
5276        /*
5277         * Skip registration of any hashing algorithms if MD block
5278         * is not present.
5279         */
5280        if (!priv->sec_attr.md_acc_num)
5281                return 0;
5282
5283        for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
5284                struct caam_hash_alg *t_alg;
5285                struct caam_hash_template *alg = driver_hash + i;
5286
5287                /* register hmac version */
5288                t_alg = caam_hash_alloc(dev, alg, true);
5289                if (IS_ERR(t_alg)) {
5290                        err = PTR_ERR(t_alg);
5291                        dev_warn(dev, "%s hash alg allocation failed: %d\n",
5292                                 alg->driver_name, err);
5293                        continue;
5294                }
5295
5296                err = crypto_register_ahash(&t_alg->ahash_alg);
5297                if (err) {
5298                        dev_warn(dev, "%s alg registration failed: %d\n",
5299                                 t_alg->ahash_alg.halg.base.cra_driver_name,
5300                                 err);
5301                        kfree(t_alg);
5302                } else {
5303                        list_add_tail(&t_alg->entry, &hash_list);
5304                }
5305
5306                /* register unkeyed version */
5307                t_alg = caam_hash_alloc(dev, alg, false);
5308                if (IS_ERR(t_alg)) {
5309                        err = PTR_ERR(t_alg);
5310                        dev_warn(dev, "%s alg allocation failed: %d\n",
5311                                 alg->driver_name, err);
5312                        continue;
5313                }
5314
5315                err = crypto_register_ahash(&t_alg->ahash_alg);
5316                if (err) {
5317                        dev_warn(dev, "%s alg registration failed: %d\n",
5318                                 t_alg->ahash_alg.halg.base.cra_driver_name,
5319                                 err);
5320                        kfree(t_alg);
5321                } else {
5322                        list_add_tail(&t_alg->entry, &hash_list);
5323                }
5324        }
5325        if (!list_empty(&hash_list))
5326                dev_info(dev, "hash algorithms registered in /proc/crypto\n");
5327
5328        return err;
5329
5330err_bind:
5331        dpaa2_dpseci_dpio_free(priv);
5332err_dpio_setup:
5333        dpaa2_dpseci_free(priv);
5334err_dpseci_setup:
5335        free_percpu(priv->ppriv);
5336err_alloc_ppriv:
5337        fsl_mc_portal_free(priv->mc_io);
5338err_dma_mask:
5339        kmem_cache_destroy(qi_cache);
5340
5341        return err;
5342}
5343
5344static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
5345{
5346        struct device *dev;
5347        struct dpaa2_caam_priv *priv;
5348        int i;
5349
5350        dev = &ls_dev->dev;
5351        priv = dev_get_drvdata(dev);
5352
5353        dpaa2_dpseci_debugfs_exit(priv);
5354
5355        for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5356                struct caam_aead_alg *t_alg = driver_aeads + i;
5357
5358                if (t_alg->registered)
5359                        crypto_unregister_aead(&t_alg->aead);
5360        }
5361
5362        for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5363                struct caam_skcipher_alg *t_alg = driver_algs + i;
5364
5365                if (t_alg->registered)
5366                        crypto_unregister_skcipher(&t_alg->skcipher);
5367        }
5368
5369        if (hash_list.next) {
5370                struct caam_hash_alg *t_hash_alg, *p;
5371
5372                list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
5373                        crypto_unregister_ahash(&t_hash_alg->ahash_alg);
5374                        list_del(&t_hash_alg->entry);
5375                        kfree(t_hash_alg);
5376                }
5377        }
5378
5379        dpaa2_dpseci_disable(priv);
5380        dpaa2_dpseci_dpio_free(priv);
5381        dpaa2_dpseci_free(priv);
5382        free_percpu(priv->ppriv);
5383        fsl_mc_portal_free(priv->mc_io);
5384        kmem_cache_destroy(qi_cache);
5385
5386        return 0;
5387}
5388
5389int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
5390{
5391        struct dpaa2_fd fd;
5392        struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
5393        struct dpaa2_caam_priv_per_cpu *ppriv;
5394        int err = 0, i;
5395
5396        if (IS_ERR(req))
5397                return PTR_ERR(req);
5398
5399        if (priv->cscn_mem) {
5400                dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
5401                                        DPAA2_CSCN_SIZE,
5402                                        DMA_FROM_DEVICE);
5403                if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
5404                        dev_dbg_ratelimited(dev, "Dropping request\n");
5405                        return -EBUSY;
5406                }
5407        }
5408
5409        dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
5410
5411        req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
5412                                         DMA_BIDIRECTIONAL);
5413        if (dma_mapping_error(dev, req->fd_flt_dma)) {
5414                dev_err(dev, "DMA mapping error for QI enqueue request\n");
5415                goto err_out;
5416        }
5417
5418        memset(&fd, 0, sizeof(fd));
5419        dpaa2_fd_set_format(&fd, dpaa2_fd_list);
5420        dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
5421        dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
5422        dpaa2_fd_set_flc(&fd, req->flc_dma);
5423
5424        ppriv = this_cpu_ptr(priv->ppriv);
5425        for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
5426                err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
5427                                                  &fd);
5428                if (err != -EBUSY)
5429                        break;
5430
5431                cpu_relax();
5432        }
5433
5434        if (unlikely(err)) {
5435                dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err);
5436                goto err_out;
5437        }
5438
5439        return -EINPROGRESS;
5440
5441err_out:
5442        dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
5443                         DMA_BIDIRECTIONAL);
5444        return -EIO;
5445}
5446EXPORT_SYMBOL(dpaa2_caam_enqueue);
5447
5448static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
5449        {
5450                .vendor = FSL_MC_VENDOR_FREESCALE,
5451                .obj_type = "dpseci",
5452        },
5453        { .vendor = 0x0 }
5454};
5455
5456static struct fsl_mc_driver dpaa2_caam_driver = {
5457        .driver = {
5458                .name           = KBUILD_MODNAME,
5459                .owner          = THIS_MODULE,
5460        },
5461        .probe          = dpaa2_caam_probe,
5462        .remove         = dpaa2_caam_remove,
5463        .match_id_table = dpaa2_caam_match_id_table
5464};
5465
5466MODULE_LICENSE("Dual BSD/GPL");
5467MODULE_AUTHOR("Freescale Semiconductor, Inc");
5468MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
5469
5470module_fsl_mc_driver(dpaa2_caam_driver);
5471