linux/drivers/crypto/ccree/cc_buffer_mgr.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
   3
   4#include <crypto/internal/aead.h>
   5#include <crypto/authenc.h>
   6#include <crypto/scatterwalk.h>
   7#include <linux/dmapool.h>
   8#include <linux/dma-mapping.h>
   9
  10#include "cc_buffer_mgr.h"
  11#include "cc_lli_defs.h"
  12#include "cc_cipher.h"
  13#include "cc_hash.h"
  14#include "cc_aead.h"
  15
  16enum dma_buffer_type {
  17        DMA_NULL_TYPE = -1,
  18        DMA_SGL_TYPE = 1,
  19        DMA_BUFF_TYPE = 2,
  20};
  21
  22struct buff_mgr_handle {
  23        struct dma_pool *mlli_buffs_pool;
  24};
  25
  26union buffer_array_entry {
  27        struct scatterlist *sgl;
  28        dma_addr_t buffer_dma;
  29};
  30
  31struct buffer_array {
  32        unsigned int num_of_buffers;
  33        union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI];
  34        unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
  35        int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
  36        int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
  37        enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI];
  38        bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
  39        u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
  40};
  41
  42static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type)
  43{
  44        switch (type) {
  45        case CC_DMA_BUF_NULL:
  46                return "BUF_NULL";
  47        case CC_DMA_BUF_DLLI:
  48                return "BUF_DLLI";
  49        case CC_DMA_BUF_MLLI:
  50                return "BUF_MLLI";
  51        default:
  52                return "BUF_INVALID";
  53        }
  54}
  55
  56/**
  57 * cc_copy_mac() - Copy MAC to temporary location
  58 *
  59 * @dev: device object
  60 * @req: aead request object
  61 * @dir: [IN] copy from/to sgl
  62 */
  63static void cc_copy_mac(struct device *dev, struct aead_request *req,
  64                        enum cc_sg_cpy_direct dir)
  65{
  66        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
  67        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  68        u32 skip = req->assoclen + req->cryptlen;
  69
  70        if (areq_ctx->is_gcm4543)
  71                skip += crypto_aead_ivsize(tfm);
  72
  73        cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
  74                           (skip - areq_ctx->req_authsize), skip, dir);
  75}
  76
  77/**
  78 * cc_get_sgl_nents() - Get scatterlist number of entries.
  79 *
  80 * @sg_list: SG list
  81 * @nbytes: [IN] Total SGL data bytes.
  82 * @lbytes: [OUT] Returns the amount of bytes at the last entry
  83 */
  84static unsigned int cc_get_sgl_nents(struct device *dev,
  85                                     struct scatterlist *sg_list,
  86                                     unsigned int nbytes, u32 *lbytes,
  87                                     bool *is_chained)
  88{
  89        unsigned int nents = 0;
  90
  91        while (nbytes && sg_list) {
  92                if (sg_list->length) {
  93                        nents++;
  94                        /* get the number of bytes in the last entry */
  95                        *lbytes = nbytes;
  96                        nbytes -= (sg_list->length > nbytes) ?
  97                                        nbytes : sg_list->length;
  98                        sg_list = sg_next(sg_list);
  99                } else {
 100                        sg_list = (struct scatterlist *)sg_page(sg_list);
 101                        if (is_chained)
 102                                *is_chained = true;
 103                }
 104        }
 105        dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
 106        return nents;
 107}
 108
 109/**
 110 * cc_zero_sgl() - Zero scatter scatter list data.
 111 *
 112 * @sgl:
 113 */
 114void cc_zero_sgl(struct scatterlist *sgl, u32 data_len)
 115{
 116        struct scatterlist *current_sg = sgl;
 117        int sg_index = 0;
 118
 119        while (sg_index <= data_len) {
 120                if (!current_sg) {
 121                        /* reached the end of the sgl --> just return back */
 122                        return;
 123                }
 124                memset(sg_virt(current_sg), 0, current_sg->length);
 125                sg_index += current_sg->length;
 126                current_sg = sg_next(current_sg);
 127        }
 128}
 129
 130/**
 131 * cc_copy_sg_portion() - Copy scatter list data,
 132 * from to_skip to end, to dest and vice versa
 133 *
 134 * @dest:
 135 * @sg:
 136 * @to_skip:
 137 * @end:
 138 * @direct:
 139 */
 140void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
 141                        u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
 142{
 143        u32 nents, lbytes;
 144
 145        nents = cc_get_sgl_nents(dev, sg, end, &lbytes, NULL);
 146        sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
 147                       (direct == CC_SG_TO_BUF));
 148}
 149
 150static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma,
 151                                  u32 buff_size, u32 *curr_nents,
 152                                  u32 **mlli_entry_pp)
 153{
 154        u32 *mlli_entry_p = *mlli_entry_pp;
 155        u32 new_nents;
 156
 157        /* Verify there is no memory overflow*/
 158        new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
 159        if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES)
 160                return -ENOMEM;
 161
 162        /*handle buffer longer than 64 kbytes */
 163        while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
 164                cc_lli_set_addr(mlli_entry_p, buff_dma);
 165                cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
 166                dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
 167                        *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
 168                        mlli_entry_p[LLI_WORD1_OFFSET]);
 169                buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
 170                buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
 171                mlli_entry_p = mlli_entry_p + 2;
 172                (*curr_nents)++;
 173        }
 174        /*Last entry */
 175        cc_lli_set_addr(mlli_entry_p, buff_dma);
 176        cc_lli_set_size(mlli_entry_p, buff_size);
 177        dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
 178                *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
 179                mlli_entry_p[LLI_WORD1_OFFSET]);
 180        mlli_entry_p = mlli_entry_p + 2;
 181        *mlli_entry_pp = mlli_entry_p;
 182        (*curr_nents)++;
 183        return 0;
 184}
 185
 186static int cc_render_sg_to_mlli(struct device *dev, struct scatterlist *sgl,
 187                                u32 sgl_data_len, u32 sgl_offset,
 188                                u32 *curr_nents, u32 **mlli_entry_pp)
 189{
 190        struct scatterlist *curr_sgl = sgl;
 191        u32 *mlli_entry_p = *mlli_entry_pp;
 192        s32 rc = 0;
 193
 194        for ( ; (curr_sgl && sgl_data_len);
 195              curr_sgl = sg_next(curr_sgl)) {
 196                u32 entry_data_len =
 197                        (sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ?
 198                                sg_dma_len(curr_sgl) - sgl_offset :
 199                                sgl_data_len;
 200                sgl_data_len -= entry_data_len;
 201                rc = cc_render_buff_to_mlli(dev, sg_dma_address(curr_sgl) +
 202                                            sgl_offset, entry_data_len,
 203                                            curr_nents, &mlli_entry_p);
 204                if (rc)
 205                        return rc;
 206
 207                sgl_offset = 0;
 208        }
 209        *mlli_entry_pp = mlli_entry_p;
 210        return 0;
 211}
 212
 213static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
 214                            struct mlli_params *mlli_params, gfp_t flags)
 215{
 216        u32 *mlli_p;
 217        u32 total_nents = 0, prev_total_nents = 0;
 218        int rc = 0, i;
 219
 220        dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers);
 221
 222        /* Allocate memory from the pointed pool */
 223        mlli_params->mlli_virt_addr =
 224                dma_pool_alloc(mlli_params->curr_pool, flags,
 225                               &mlli_params->mlli_dma_addr);
 226        if (!mlli_params->mlli_virt_addr) {
 227                dev_err(dev, "dma_pool_alloc() failed\n");
 228                rc = -ENOMEM;
 229                goto build_mlli_exit;
 230        }
 231        /* Point to start of MLLI */
 232        mlli_p = (u32 *)mlli_params->mlli_virt_addr;
 233        /* go over all SG's and link it to one MLLI table */
 234        for (i = 0; i < sg_data->num_of_buffers; i++) {
 235                union buffer_array_entry *entry = &sg_data->entry[i];
 236                u32 tot_len = sg_data->total_data_len[i];
 237                u32 offset = sg_data->offset[i];
 238
 239                if (sg_data->type[i] == DMA_SGL_TYPE)
 240                        rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len,
 241                                                  offset, &total_nents,
 242                                                  &mlli_p);
 243                else /*DMA_BUFF_TYPE*/
 244                        rc = cc_render_buff_to_mlli(dev, entry->buffer_dma,
 245                                                    tot_len, &total_nents,
 246                                                    &mlli_p);
 247                if (rc)
 248                        return rc;
 249
 250                /* set last bit in the current table */
 251                if (sg_data->mlli_nents[i]) {
 252                        /*Calculate the current MLLI table length for the
 253                         *length field in the descriptor
 254                         */
 255                        *sg_data->mlli_nents[i] +=
 256                                (total_nents - prev_total_nents);
 257                        prev_total_nents = total_nents;
 258                }
 259        }
 260
 261        /* Set MLLI size for the bypass operation */
 262        mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
 263
 264        dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
 265                mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr,
 266                mlli_params->mlli_len);
 267
 268build_mlli_exit:
 269        return rc;
 270}
 271
 272static void cc_add_buffer_entry(struct device *dev,
 273                                struct buffer_array *sgl_data,
 274                                dma_addr_t buffer_dma, unsigned int buffer_len,
 275                                bool is_last_entry, u32 *mlli_nents)
 276{
 277        unsigned int index = sgl_data->num_of_buffers;
 278
 279        dev_dbg(dev, "index=%u single_buff=%pad buffer_len=0x%08X is_last=%d\n",
 280                index, &buffer_dma, buffer_len, is_last_entry);
 281        sgl_data->nents[index] = 1;
 282        sgl_data->entry[index].buffer_dma = buffer_dma;
 283        sgl_data->offset[index] = 0;
 284        sgl_data->total_data_len[index] = buffer_len;
 285        sgl_data->type[index] = DMA_BUFF_TYPE;
 286        sgl_data->is_last[index] = is_last_entry;
 287        sgl_data->mlli_nents[index] = mlli_nents;
 288        if (sgl_data->mlli_nents[index])
 289                *sgl_data->mlli_nents[index] = 0;
 290        sgl_data->num_of_buffers++;
 291}
 292
 293static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
 294                            unsigned int nents, struct scatterlist *sgl,
 295                            unsigned int data_len, unsigned int data_offset,
 296                            bool is_last_table, u32 *mlli_nents)
 297{
 298        unsigned int index = sgl_data->num_of_buffers;
 299
 300        dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
 301                index, nents, sgl, data_len, is_last_table);
 302        sgl_data->nents[index] = nents;
 303        sgl_data->entry[index].sgl = sgl;
 304        sgl_data->offset[index] = data_offset;
 305        sgl_data->total_data_len[index] = data_len;
 306        sgl_data->type[index] = DMA_SGL_TYPE;
 307        sgl_data->is_last[index] = is_last_table;
 308        sgl_data->mlli_nents[index] = mlli_nents;
 309        if (sgl_data->mlli_nents[index])
 310                *sgl_data->mlli_nents[index] = 0;
 311        sgl_data->num_of_buffers++;
 312}
 313
 314static int cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
 315                         enum dma_data_direction direction)
 316{
 317        u32 i, j;
 318        struct scatterlist *l_sg = sg;
 319
 320        for (i = 0; i < nents; i++) {
 321                if (!l_sg)
 322                        break;
 323                if (dma_map_sg(dev, l_sg, 1, direction) != 1) {
 324                        dev_err(dev, "dma_map_page() sg buffer failed\n");
 325                        goto err;
 326                }
 327                l_sg = sg_next(l_sg);
 328        }
 329        return nents;
 330
 331err:
 332        /* Restore mapped parts */
 333        for (j = 0; j < i; j++) {
 334                if (!sg)
 335                        break;
 336                dma_unmap_sg(dev, sg, 1, direction);
 337                sg = sg_next(sg);
 338        }
 339        return 0;
 340}
 341
 342static int cc_map_sg(struct device *dev, struct scatterlist *sg,
 343                     unsigned int nbytes, int direction, u32 *nents,
 344                     u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
 345{
 346        bool is_chained = false;
 347
 348        if (sg_is_last(sg)) {
 349                /* One entry only case -set to DLLI */
 350                if (dma_map_sg(dev, sg, 1, direction) != 1) {
 351                        dev_err(dev, "dma_map_sg() single buffer failed\n");
 352                        return -ENOMEM;
 353                }
 354                dev_dbg(dev, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
 355                        &sg_dma_address(sg), sg_page(sg), sg_virt(sg),
 356                        sg->offset, sg->length);
 357                *lbytes = nbytes;
 358                *nents = 1;
 359                *mapped_nents = 1;
 360        } else {  /*sg_is_last*/
 361                *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes,
 362                                          &is_chained);
 363                if (*nents > max_sg_nents) {
 364                        *nents = 0;
 365                        dev_err(dev, "Too many fragments. current %d max %d\n",
 366                                *nents, max_sg_nents);
 367                        return -ENOMEM;
 368                }
 369                if (!is_chained) {
 370                        /* In case of mmu the number of mapped nents might
 371                         * be changed from the original sgl nents
 372                         */
 373                        *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
 374                        if (*mapped_nents == 0) {
 375                                *nents = 0;
 376                                dev_err(dev, "dma_map_sg() sg buffer failed\n");
 377                                return -ENOMEM;
 378                        }
 379                } else {
 380                        /*In this case the driver maps entry by entry so it
 381                         * must have the same nents before and after map
 382                         */
 383                        *mapped_nents = cc_dma_map_sg(dev, sg, *nents,
 384                                                      direction);
 385                        if (*mapped_nents != *nents) {
 386                                *nents = *mapped_nents;
 387                                dev_err(dev, "dma_map_sg() sg buffer failed\n");
 388                                return -ENOMEM;
 389                        }
 390                }
 391        }
 392
 393        return 0;
 394}
 395
 396static int
 397cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx,
 398                     u8 *config_data, struct buffer_array *sg_data,
 399                     unsigned int assoclen)
 400{
 401        dev_dbg(dev, " handle additional data config set to DLLI\n");
 402        /* create sg for the current buffer */
 403        sg_init_one(&areq_ctx->ccm_adata_sg, config_data,
 404                    AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
 405        if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) {
 406                dev_err(dev, "dma_map_sg() config buffer failed\n");
 407                return -ENOMEM;
 408        }
 409        dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
 410                &sg_dma_address(&areq_ctx->ccm_adata_sg),
 411                sg_page(&areq_ctx->ccm_adata_sg),
 412                sg_virt(&areq_ctx->ccm_adata_sg),
 413                areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length);
 414        /* prepare for case of MLLI */
 415        if (assoclen > 0) {
 416                cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg,
 417                                (AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
 418                                0, false, NULL);
 419        }
 420        return 0;
 421}
 422
 423static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
 424                           u8 *curr_buff, u32 curr_buff_cnt,
 425                           struct buffer_array *sg_data)
 426{
 427        dev_dbg(dev, " handle curr buff %x set to   DLLI\n", curr_buff_cnt);
 428        /* create sg for the current buffer */
 429        sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
 430        if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) {
 431                dev_err(dev, "dma_map_sg() src buffer failed\n");
 432                return -ENOMEM;
 433        }
 434        dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
 435                &sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
 436                sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
 437                areq_ctx->buff_sg->length);
 438        areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
 439        areq_ctx->curr_sg = areq_ctx->buff_sg;
 440        areq_ctx->in_nents = 0;
 441        /* prepare for case of MLLI */
 442        cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0,
 443                        false, NULL);
 444        return 0;
 445}
 446
 447void cc_unmap_cipher_request(struct device *dev, void *ctx,
 448                                unsigned int ivsize, struct scatterlist *src,
 449                                struct scatterlist *dst)
 450{
 451        struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
 452
 453        if (req_ctx->gen_ctx.iv_dma_addr) {
 454                dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
 455                        &req_ctx->gen_ctx.iv_dma_addr, ivsize);
 456                dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
 457                                 ivsize, DMA_TO_DEVICE);
 458        }
 459        /* Release pool */
 460        if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
 461            req_ctx->mlli_params.mlli_virt_addr) {
 462                dma_pool_free(req_ctx->mlli_params.curr_pool,
 463                              req_ctx->mlli_params.mlli_virt_addr,
 464                              req_ctx->mlli_params.mlli_dma_addr);
 465        }
 466
 467        dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
 468        dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
 469
 470        if (src != dst) {
 471                dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
 472                dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
 473        }
 474}
 475
 476int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
 477                          unsigned int ivsize, unsigned int nbytes,
 478                          void *info, struct scatterlist *src,
 479                          struct scatterlist *dst, gfp_t flags)
 480{
 481        struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
 482        struct mlli_params *mlli_params = &req_ctx->mlli_params;
 483        struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
 484        struct device *dev = drvdata_to_dev(drvdata);
 485        struct buffer_array sg_data;
 486        u32 dummy = 0;
 487        int rc = 0;
 488        u32 mapped_nents = 0;
 489
 490        req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
 491        mlli_params->curr_pool = NULL;
 492        sg_data.num_of_buffers = 0;
 493
 494        /* Map IV buffer */
 495        if (ivsize) {
 496                dump_byte_array("iv", (u8 *)info, ivsize);
 497                req_ctx->gen_ctx.iv_dma_addr =
 498                        dma_map_single(dev, (void *)info,
 499                                       ivsize, DMA_TO_DEVICE);
 500                if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
 501                        dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
 502                                ivsize, info);
 503                        return -ENOMEM;
 504                }
 505                dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
 506                        ivsize, info, &req_ctx->gen_ctx.iv_dma_addr);
 507        } else {
 508                req_ctx->gen_ctx.iv_dma_addr = 0;
 509        }
 510
 511        /* Map the src SGL */
 512        rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
 513                       LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
 514        if (rc) {
 515                rc = -ENOMEM;
 516                goto cipher_exit;
 517        }
 518        if (mapped_nents > 1)
 519                req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
 520
 521        if (src == dst) {
 522                /* Handle inplace operation */
 523                if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
 524                        req_ctx->out_nents = 0;
 525                        cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
 526                                        nbytes, 0, true,
 527                                        &req_ctx->in_mlli_nents);
 528                }
 529        } else {
 530                /* Map the dst sg */
 531                if (cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
 532                              &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
 533                              &dummy, &mapped_nents)) {
 534                        rc = -ENOMEM;
 535                        goto cipher_exit;
 536                }
 537                if (mapped_nents > 1)
 538                        req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
 539
 540                if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
 541                        cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
 542                                        nbytes, 0, true,
 543                                        &req_ctx->in_mlli_nents);
 544                        cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst,
 545                                        nbytes, 0, true,
 546                                        &req_ctx->out_mlli_nents);
 547                }
 548        }
 549
 550        if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
 551                mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
 552                rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
 553                if (rc)
 554                        goto cipher_exit;
 555        }
 556
 557        dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n",
 558                cc_dma_buf_type(req_ctx->dma_buf_type));
 559
 560        return 0;
 561
 562cipher_exit:
 563        cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
 564        return rc;
 565}
 566
 567void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
 568{
 569        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 570        unsigned int hw_iv_size = areq_ctx->hw_iv_size;
 571        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 572        struct cc_drvdata *drvdata = dev_get_drvdata(dev);
 573        u32 dummy;
 574        bool chained;
 575        u32 size_to_unmap = 0;
 576
 577        if (areq_ctx->mac_buf_dma_addr) {
 578                dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
 579                                 MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
 580        }
 581
 582        if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
 583                if (areq_ctx->hkey_dma_addr) {
 584                        dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
 585                                         AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
 586                }
 587
 588                if (areq_ctx->gcm_block_len_dma_addr) {
 589                        dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
 590                                         AES_BLOCK_SIZE, DMA_TO_DEVICE);
 591                }
 592
 593                if (areq_ctx->gcm_iv_inc1_dma_addr) {
 594                        dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
 595                                         AES_BLOCK_SIZE, DMA_TO_DEVICE);
 596                }
 597
 598                if (areq_ctx->gcm_iv_inc2_dma_addr) {
 599                        dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
 600                                         AES_BLOCK_SIZE, DMA_TO_DEVICE);
 601                }
 602        }
 603
 604        if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
 605                if (areq_ctx->ccm_iv0_dma_addr) {
 606                        dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
 607                                         AES_BLOCK_SIZE, DMA_TO_DEVICE);
 608                }
 609
 610                dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
 611        }
 612        if (areq_ctx->gen_ctx.iv_dma_addr) {
 613                dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
 614                                 hw_iv_size, DMA_BIDIRECTIONAL);
 615        }
 616
 617        /*In case a pool was set, a table was
 618         *allocated and should be released
 619         */
 620        if (areq_ctx->mlli_params.curr_pool) {
 621                dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
 622                        &areq_ctx->mlli_params.mlli_dma_addr,
 623                        areq_ctx->mlli_params.mlli_virt_addr);
 624                dma_pool_free(areq_ctx->mlli_params.curr_pool,
 625                              areq_ctx->mlli_params.mlli_virt_addr,
 626                              areq_ctx->mlli_params.mlli_dma_addr);
 627        }
 628
 629        dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
 630                sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
 631                req->assoclen, req->cryptlen);
 632        size_to_unmap = req->assoclen + req->cryptlen;
 633        if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
 634                size_to_unmap += areq_ctx->req_authsize;
 635        if (areq_ctx->is_gcm4543)
 636                size_to_unmap += crypto_aead_ivsize(tfm);
 637
 638        dma_unmap_sg(dev, req->src,
 639                     cc_get_sgl_nents(dev, req->src, size_to_unmap,
 640                                      &dummy, &chained),
 641                     DMA_BIDIRECTIONAL);
 642        if (req->src != req->dst) {
 643                dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
 644                        sg_virt(req->dst));
 645                dma_unmap_sg(dev, req->dst,
 646                             cc_get_sgl_nents(dev, req->dst, size_to_unmap,
 647                                              &dummy, &chained),
 648                             DMA_BIDIRECTIONAL);
 649        }
 650        if (drvdata->coherent &&
 651            areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
 652            req->src == req->dst) {
 653                /* copy back mac from temporary location to deal with possible
 654                 * data memory overriding that caused by cache coherence
 655                 * problem.
 656                 */
 657                cc_copy_mac(dev, req, CC_SG_FROM_BUF);
 658        }
 659}
 660
 661static int cc_get_aead_icv_nents(struct device *dev, struct scatterlist *sgl,
 662                                 unsigned int sgl_nents, unsigned int authsize,
 663                                 u32 last_entry_data_size,
 664                                 bool *is_icv_fragmented)
 665{
 666        unsigned int icv_max_size = 0;
 667        unsigned int icv_required_size = authsize > last_entry_data_size ?
 668                                        (authsize - last_entry_data_size) :
 669                                        authsize;
 670        unsigned int nents;
 671        unsigned int i;
 672
 673        if (sgl_nents < MAX_ICV_NENTS_SUPPORTED) {
 674                *is_icv_fragmented = false;
 675                return 0;
 676        }
 677
 678        for (i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
 679                if (!sgl)
 680                        break;
 681                sgl = sg_next(sgl);
 682        }
 683
 684        if (sgl)
 685                icv_max_size = sgl->length;
 686
 687        if (last_entry_data_size > authsize) {
 688                /* ICV attached to data in last entry (not fragmented!) */
 689                nents = 0;
 690                *is_icv_fragmented = false;
 691        } else if (last_entry_data_size == authsize) {
 692                /* ICV placed in whole last entry (not fragmented!) */
 693                nents = 1;
 694                *is_icv_fragmented = false;
 695        } else if (icv_max_size > icv_required_size) {
 696                nents = 1;
 697                *is_icv_fragmented = true;
 698        } else if (icv_max_size == icv_required_size) {
 699                nents = 2;
 700                *is_icv_fragmented = true;
 701        } else {
 702                dev_err(dev, "Unsupported num. of ICV fragments (> %d)\n",
 703                        MAX_ICV_NENTS_SUPPORTED);
 704                nents = -1; /*unsupported*/
 705        }
 706        dev_dbg(dev, "is_frag=%s icv_nents=%u\n",
 707                (*is_icv_fragmented ? "true" : "false"), nents);
 708
 709        return nents;
 710}
 711
 712static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
 713                            struct aead_request *req,
 714                            struct buffer_array *sg_data,
 715                            bool is_last, bool do_chain)
 716{
 717        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 718        unsigned int hw_iv_size = areq_ctx->hw_iv_size;
 719        struct device *dev = drvdata_to_dev(drvdata);
 720        int rc = 0;
 721
 722        if (!req->iv) {
 723                areq_ctx->gen_ctx.iv_dma_addr = 0;
 724                goto chain_iv_exit;
 725        }
 726
 727        areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
 728                                                       hw_iv_size,
 729                                                       DMA_BIDIRECTIONAL);
 730        if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
 731                dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
 732                        hw_iv_size, req->iv);
 733                rc = -ENOMEM;
 734                goto chain_iv_exit;
 735        }
 736
 737        dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
 738                hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
 739        // TODO: what about CTR?? ask Ron
 740        if (do_chain && areq_ctx->plaintext_authenticate_only) {
 741                struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 742                unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
 743                unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
 744                /* Chain to given list */
 745                cc_add_buffer_entry(dev, sg_data,
 746                                    (areq_ctx->gen_ctx.iv_dma_addr + iv_ofs),
 747                                    iv_size_to_authenc, is_last,
 748                                    &areq_ctx->assoc.mlli_nents);
 749                areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
 750        }
 751
 752chain_iv_exit:
 753        return rc;
 754}
 755
 756static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
 757                               struct aead_request *req,
 758                               struct buffer_array *sg_data,
 759                               bool is_last, bool do_chain)
 760{
 761        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 762        int rc = 0;
 763        u32 mapped_nents = 0;
 764        struct scatterlist *current_sg = req->src;
 765        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 766        unsigned int sg_index = 0;
 767        u32 size_of_assoc = req->assoclen;
 768        struct device *dev = drvdata_to_dev(drvdata);
 769
 770        if (areq_ctx->is_gcm4543)
 771                size_of_assoc += crypto_aead_ivsize(tfm);
 772
 773        if (!sg_data) {
 774                rc = -EINVAL;
 775                goto chain_assoc_exit;
 776        }
 777
 778        if (req->assoclen == 0) {
 779                areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
 780                areq_ctx->assoc.nents = 0;
 781                areq_ctx->assoc.mlli_nents = 0;
 782                dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n",
 783                        cc_dma_buf_type(areq_ctx->assoc_buff_type),
 784                        areq_ctx->assoc.nents);
 785                goto chain_assoc_exit;
 786        }
 787
 788        //iterate over the sgl to see how many entries are for associated data
 789        //it is assumed that if we reach here , the sgl is already mapped
 790        sg_index = current_sg->length;
 791        //the first entry in the scatter list contains all the associated data
 792        if (sg_index > size_of_assoc) {
 793                mapped_nents++;
 794        } else {
 795                while (sg_index <= size_of_assoc) {
 796                        current_sg = sg_next(current_sg);
 797                        /* if have reached the end of the sgl, then this is
 798                         * unexpected
 799                         */
 800                        if (!current_sg) {
 801                                dev_err(dev, "reached end of sg list. unexpected\n");
 802                                return -EINVAL;
 803                        }
 804                        sg_index += current_sg->length;
 805                        mapped_nents++;
 806                }
 807        }
 808        if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
 809                dev_err(dev, "Too many fragments. current %d max %d\n",
 810                        mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
 811                return -ENOMEM;
 812        }
 813        areq_ctx->assoc.nents = mapped_nents;
 814
 815        /* in CCM case we have additional entry for
 816         * ccm header configurations
 817         */
 818        if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
 819                if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
 820                        dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n",
 821                                (areq_ctx->assoc.nents + 1),
 822                                LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
 823                        rc = -ENOMEM;
 824                        goto chain_assoc_exit;
 825                }
 826        }
 827
 828        if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null)
 829                areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI;
 830        else
 831                areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
 832
 833        if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
 834                dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n",
 835                        cc_dma_buf_type(areq_ctx->assoc_buff_type),
 836                        areq_ctx->assoc.nents);
 837                cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
 838                                req->assoclen, 0, is_last,
 839                                &areq_ctx->assoc.mlli_nents);
 840                areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
 841        }
 842
 843chain_assoc_exit:
 844        return rc;
 845}
 846
 847static void cc_prepare_aead_data_dlli(struct aead_request *req,
 848                                      u32 *src_last_bytes, u32 *dst_last_bytes)
 849{
 850        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 851        enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
 852        unsigned int authsize = areq_ctx->req_authsize;
 853
 854        areq_ctx->is_icv_fragmented = false;
 855        if (req->src == req->dst) {
 856                /*INPLACE*/
 857                areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
 858                        (*src_last_bytes - authsize);
 859                areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
 860                        (*src_last_bytes - authsize);
 861        } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 862                /*NON-INPLACE and DECRYPT*/
 863                areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
 864                        (*src_last_bytes - authsize);
 865                areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
 866                        (*src_last_bytes - authsize);
 867        } else {
 868                /*NON-INPLACE and ENCRYPT*/
 869                areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->dst_sgl) +
 870                        (*dst_last_bytes - authsize);
 871                areq_ctx->icv_virt_addr = sg_virt(areq_ctx->dst_sgl) +
 872                        (*dst_last_bytes - authsize);
 873        }
 874}
 875
 876static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
 877                                     struct aead_request *req,
 878                                     struct buffer_array *sg_data,
 879                                     u32 *src_last_bytes, u32 *dst_last_bytes,
 880                                     bool is_last_table)
 881{
 882        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 883        enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
 884        unsigned int authsize = areq_ctx->req_authsize;
 885        int rc = 0, icv_nents;
 886        struct device *dev = drvdata_to_dev(drvdata);
 887        struct scatterlist *sg;
 888
 889        if (req->src == req->dst) {
 890                /*INPLACE*/
 891                cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
 892                                areq_ctx->src_sgl, areq_ctx->cryptlen,
 893                                areq_ctx->src_offset, is_last_table,
 894                                &areq_ctx->src.mlli_nents);
 895
 896                icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
 897                                                  areq_ctx->src.nents,
 898                                                  authsize, *src_last_bytes,
 899                                                  &areq_ctx->is_icv_fragmented);
 900                if (icv_nents < 0) {
 901                        rc = -ENOTSUPP;
 902                        goto prepare_data_mlli_exit;
 903                }
 904
 905                if (areq_ctx->is_icv_fragmented) {
 906                        /* Backup happens only when ICV is fragmented, ICV
 907                         * verification is made by CPU compare in order to
 908                         * simplify MAC verification upon request completion
 909                         */
 910                        if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 911                                /* In coherent platforms (e.g. ACP)
 912                                 * already copying ICV for any
 913                                 * INPLACE-DECRYPT operation, hence
 914                                 * we must neglect this code.
 915                                 */
 916                                if (!drvdata->coherent)
 917                                        cc_copy_mac(dev, req, CC_SG_TO_BUF);
 918
 919                                areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
 920                        } else {
 921                                areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
 922                                areq_ctx->icv_dma_addr =
 923                                        areq_ctx->mac_buf_dma_addr;
 924                        }
 925                } else { /* Contig. ICV */
 926                        sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
 927                        /*Should hanlde if the sg is not contig.*/
 928                        areq_ctx->icv_dma_addr = sg_dma_address(sg) +
 929                                (*src_last_bytes - authsize);
 930                        areq_ctx->icv_virt_addr = sg_virt(sg) +
 931                                (*src_last_bytes - authsize);
 932                }
 933
 934        } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 935                /*NON-INPLACE and DECRYPT*/
 936                cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
 937                                areq_ctx->src_sgl, areq_ctx->cryptlen,
 938                                areq_ctx->src_offset, is_last_table,
 939                                &areq_ctx->src.mlli_nents);
 940                cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
 941                                areq_ctx->dst_sgl, areq_ctx->cryptlen,
 942                                areq_ctx->dst_offset, is_last_table,
 943                                &areq_ctx->dst.mlli_nents);
 944
 945                icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
 946                                                  areq_ctx->src.nents,
 947                                                  authsize, *src_last_bytes,
 948                                                  &areq_ctx->is_icv_fragmented);
 949                if (icv_nents < 0) {
 950                        rc = -ENOTSUPP;
 951                        goto prepare_data_mlli_exit;
 952                }
 953
 954                /* Backup happens only when ICV is fragmented, ICV
 955                 * verification is made by CPU compare in order to simplify
 956                 * MAC verification upon request completion
 957                 */
 958                if (areq_ctx->is_icv_fragmented) {
 959                        cc_copy_mac(dev, req, CC_SG_TO_BUF);
 960                        areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
 961
 962                } else { /* Contig. ICV */
 963                        sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
 964                        /*Should hanlde if the sg is not contig.*/
 965                        areq_ctx->icv_dma_addr = sg_dma_address(sg) +
 966                                (*src_last_bytes - authsize);
 967                        areq_ctx->icv_virt_addr = sg_virt(sg) +
 968                                (*src_last_bytes - authsize);
 969                }
 970
 971        } else {
 972                /*NON-INPLACE and ENCRYPT*/
 973                cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
 974                                areq_ctx->dst_sgl, areq_ctx->cryptlen,
 975                                areq_ctx->dst_offset, is_last_table,
 976                                &areq_ctx->dst.mlli_nents);
 977                cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
 978                                areq_ctx->src_sgl, areq_ctx->cryptlen,
 979                                areq_ctx->src_offset, is_last_table,
 980                                &areq_ctx->src.mlli_nents);
 981
 982                icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->dst_sgl,
 983                                                  areq_ctx->dst.nents,
 984                                                  authsize, *dst_last_bytes,
 985                                                  &areq_ctx->is_icv_fragmented);
 986                if (icv_nents < 0) {
 987                        rc = -ENOTSUPP;
 988                        goto prepare_data_mlli_exit;
 989                }
 990
 991                if (!areq_ctx->is_icv_fragmented) {
 992                        sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
 993                        /* Contig. ICV */
 994                        areq_ctx->icv_dma_addr = sg_dma_address(sg) +
 995                                (*dst_last_bytes - authsize);
 996                        areq_ctx->icv_virt_addr = sg_virt(sg) +
 997                                (*dst_last_bytes - authsize);
 998                } else {
 999                        areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
1000                        areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
1001                }
1002        }
1003
1004prepare_data_mlli_exit:
1005        return rc;
1006}
1007
1008static int cc_aead_chain_data(struct cc_drvdata *drvdata,
1009                              struct aead_request *req,
1010                              struct buffer_array *sg_data,
1011                              bool is_last_table, bool do_chain)
1012{
1013        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1014        struct device *dev = drvdata_to_dev(drvdata);
1015        enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
1016        unsigned int authsize = areq_ctx->req_authsize;
1017        unsigned int src_last_bytes = 0, dst_last_bytes = 0;
1018        int rc = 0;
1019        u32 src_mapped_nents = 0, dst_mapped_nents = 0;
1020        u32 offset = 0;
1021        /* non-inplace mode */
1022        unsigned int size_for_map = req->assoclen + req->cryptlen;
1023        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1024        u32 sg_index = 0;
1025        bool chained = false;
1026        bool is_gcm4543 = areq_ctx->is_gcm4543;
1027        u32 size_to_skip = req->assoclen;
1028
1029        if (is_gcm4543)
1030                size_to_skip += crypto_aead_ivsize(tfm);
1031
1032        offset = size_to_skip;
1033
1034        if (!sg_data)
1035                return -EINVAL;
1036
1037        areq_ctx->src_sgl = req->src;
1038        areq_ctx->dst_sgl = req->dst;
1039
1040        if (is_gcm4543)
1041                size_for_map += crypto_aead_ivsize(tfm);
1042
1043        size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1044                        authsize : 0;
1045        src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
1046                                            &src_last_bytes, &chained);
1047        sg_index = areq_ctx->src_sgl->length;
1048        //check where the data starts
1049        while (sg_index <= size_to_skip) {
1050                offset -= areq_ctx->src_sgl->length;
1051                areq_ctx->src_sgl = sg_next(areq_ctx->src_sgl);
1052                //if have reached the end of the sgl, then this is unexpected
1053                if (!areq_ctx->src_sgl) {
1054                        dev_err(dev, "reached end of sg list. unexpected\n");
1055                        return -EINVAL;
1056                }
1057                sg_index += areq_ctx->src_sgl->length;
1058                src_mapped_nents--;
1059        }
1060        if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
1061                dev_err(dev, "Too many fragments. current %d max %d\n",
1062                        src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
1063                return -ENOMEM;
1064        }
1065
1066        areq_ctx->src.nents = src_mapped_nents;
1067
1068        areq_ctx->src_offset = offset;
1069
1070        if (req->src != req->dst) {
1071                size_for_map = req->assoclen + req->cryptlen;
1072                size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1073                                authsize : 0;
1074                if (is_gcm4543)
1075                        size_for_map += crypto_aead_ivsize(tfm);
1076
1077                rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
1078                               &areq_ctx->dst.nents,
1079                               LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
1080                               &dst_mapped_nents);
1081                if (rc) {
1082                        rc = -ENOMEM;
1083                        goto chain_data_exit;
1084                }
1085        }
1086
1087        dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
1088                                            &dst_last_bytes, &chained);
1089        sg_index = areq_ctx->dst_sgl->length;
1090        offset = size_to_skip;
1091
1092        //check where the data starts
1093        while (sg_index <= size_to_skip) {
1094                offset -= areq_ctx->dst_sgl->length;
1095                areq_ctx->dst_sgl = sg_next(areq_ctx->dst_sgl);
1096                //if have reached the end of the sgl, then this is unexpected
1097                if (!areq_ctx->dst_sgl) {
1098                        dev_err(dev, "reached end of sg list. unexpected\n");
1099                        return -EINVAL;
1100                }
1101                sg_index += areq_ctx->dst_sgl->length;
1102                dst_mapped_nents--;
1103        }
1104        if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
1105                dev_err(dev, "Too many fragments. current %d max %d\n",
1106                        dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
1107                return -ENOMEM;
1108        }
1109        areq_ctx->dst.nents = dst_mapped_nents;
1110        areq_ctx->dst_offset = offset;
1111        if (src_mapped_nents > 1 ||
1112            dst_mapped_nents  > 1 ||
1113            do_chain) {
1114                areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
1115                rc = cc_prepare_aead_data_mlli(drvdata, req, sg_data,
1116                                               &src_last_bytes,
1117                                               &dst_last_bytes, is_last_table);
1118        } else {
1119                areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
1120                cc_prepare_aead_data_dlli(req, &src_last_bytes,
1121                                          &dst_last_bytes);
1122        }
1123
1124chain_data_exit:
1125        return rc;
1126}
1127
1128static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata,
1129                                      struct aead_request *req)
1130{
1131        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1132        u32 curr_mlli_size = 0;
1133
1134        if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
1135                areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
1136                curr_mlli_size = areq_ctx->assoc.mlli_nents *
1137                                                LLI_ENTRY_BYTE_SIZE;
1138        }
1139
1140        if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
1141                /*Inplace case dst nents equal to src nents*/
1142                if (req->src == req->dst) {
1143                        areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
1144                        areq_ctx->src.sram_addr = drvdata->mlli_sram_addr +
1145                                                                curr_mlli_size;
1146                        areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
1147                        if (!areq_ctx->is_single_pass)
1148                                areq_ctx->assoc.mlli_nents +=
1149                                        areq_ctx->src.mlli_nents;
1150                } else {
1151                        if (areq_ctx->gen_ctx.op_type ==
1152                                        DRV_CRYPTO_DIRECTION_DECRYPT) {
1153                                areq_ctx->src.sram_addr =
1154                                                drvdata->mlli_sram_addr +
1155                                                                curr_mlli_size;
1156                                areq_ctx->dst.sram_addr =
1157                                                areq_ctx->src.sram_addr +
1158                                                areq_ctx->src.mlli_nents *
1159                                                LLI_ENTRY_BYTE_SIZE;
1160                                if (!areq_ctx->is_single_pass)
1161                                        areq_ctx->assoc.mlli_nents +=
1162                                                areq_ctx->src.mlli_nents;
1163                        } else {
1164                                areq_ctx->dst.sram_addr =
1165                                                drvdata->mlli_sram_addr +
1166                                                                curr_mlli_size;
1167                                areq_ctx->src.sram_addr =
1168                                                areq_ctx->dst.sram_addr +
1169                                                areq_ctx->dst.mlli_nents *
1170                                                LLI_ENTRY_BYTE_SIZE;
1171                                if (!areq_ctx->is_single_pass)
1172                                        areq_ctx->assoc.mlli_nents +=
1173                                                areq_ctx->dst.mlli_nents;
1174                        }
1175                }
1176        }
1177}
1178
1179int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
1180{
1181        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1182        struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1183        struct device *dev = drvdata_to_dev(drvdata);
1184        struct buffer_array sg_data;
1185        unsigned int authsize = areq_ctx->req_authsize;
1186        struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1187        int rc = 0;
1188        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1189        bool is_gcm4543 = areq_ctx->is_gcm4543;
1190        dma_addr_t dma_addr;
1191        u32 mapped_nents = 0;
1192        u32 dummy = 0; /*used for the assoc data fragments */
1193        u32 size_to_map = 0;
1194        gfp_t flags = cc_gfp_flags(&req->base);
1195
1196        mlli_params->curr_pool = NULL;
1197        sg_data.num_of_buffers = 0;
1198
1199        /* copy mac to a temporary location to deal with possible
1200         * data memory overriding that caused by cache coherence problem.
1201         */
1202        if (drvdata->coherent &&
1203            areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
1204            req->src == req->dst)
1205                cc_copy_mac(dev, req, CC_SG_TO_BUF);
1206
1207        /* cacluate the size for cipher remove ICV in decrypt*/
1208        areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
1209                                 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1210                                req->cryptlen :
1211                                (req->cryptlen - authsize);
1212
1213        dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
1214                                  DMA_BIDIRECTIONAL);
1215        if (dma_mapping_error(dev, dma_addr)) {
1216                dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
1217                        MAX_MAC_SIZE, areq_ctx->mac_buf);
1218                rc = -ENOMEM;
1219                goto aead_map_failure;
1220        }
1221        areq_ctx->mac_buf_dma_addr = dma_addr;
1222
1223        if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
1224                void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
1225
1226                dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE,
1227                                          DMA_TO_DEVICE);
1228
1229                if (dma_mapping_error(dev, dma_addr)) {
1230                        dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
1231                                AES_BLOCK_SIZE, addr);
1232                        areq_ctx->ccm_iv0_dma_addr = 0;
1233                        rc = -ENOMEM;
1234                        goto aead_map_failure;
1235                }
1236                areq_ctx->ccm_iv0_dma_addr = dma_addr;
1237
1238                if (cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
1239                                         &sg_data, req->assoclen)) {
1240                        rc = -ENOMEM;
1241                        goto aead_map_failure;
1242                }
1243        }
1244
1245        if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
1246                dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
1247                                          DMA_BIDIRECTIONAL);
1248                if (dma_mapping_error(dev, dma_addr)) {
1249                        dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
1250                                AES_BLOCK_SIZE, areq_ctx->hkey);
1251                        rc = -ENOMEM;
1252                        goto aead_map_failure;
1253                }
1254                areq_ctx->hkey_dma_addr = dma_addr;
1255
1256                dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
1257                                          AES_BLOCK_SIZE, DMA_TO_DEVICE);
1258                if (dma_mapping_error(dev, dma_addr)) {
1259                        dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
1260                                AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
1261                        rc = -ENOMEM;
1262                        goto aead_map_failure;
1263                }
1264                areq_ctx->gcm_block_len_dma_addr = dma_addr;
1265
1266                dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1,
1267                                          AES_BLOCK_SIZE, DMA_TO_DEVICE);
1268
1269                if (dma_mapping_error(dev, dma_addr)) {
1270                        dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
1271                                AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
1272                        areq_ctx->gcm_iv_inc1_dma_addr = 0;
1273                        rc = -ENOMEM;
1274                        goto aead_map_failure;
1275                }
1276                areq_ctx->gcm_iv_inc1_dma_addr = dma_addr;
1277
1278                dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2,
1279                                          AES_BLOCK_SIZE, DMA_TO_DEVICE);
1280
1281                if (dma_mapping_error(dev, dma_addr)) {
1282                        dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
1283                                AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
1284                        areq_ctx->gcm_iv_inc2_dma_addr = 0;
1285                        rc = -ENOMEM;
1286                        goto aead_map_failure;
1287                }
1288                areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
1289        }
1290
1291        size_to_map = req->cryptlen + req->assoclen;
1292        if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
1293                size_to_map += authsize;
1294
1295        if (is_gcm4543)
1296                size_to_map += crypto_aead_ivsize(tfm);
1297        rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
1298                       &areq_ctx->src.nents,
1299                       (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
1300                        LLI_MAX_NUM_OF_DATA_ENTRIES),
1301                       &dummy, &mapped_nents);
1302        if (rc) {
1303                rc = -ENOMEM;
1304                goto aead_map_failure;
1305        }
1306
1307        if (areq_ctx->is_single_pass) {
1308                /*
1309                 * Create MLLI table for:
1310                 *   (1) Assoc. data
1311                 *   (2) Src/Dst SGLs
1312                 *   Note: IV is contg. buffer (not an SGL)
1313                 */
1314                rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false);
1315                if (rc)
1316                        goto aead_map_failure;
1317                rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false);
1318                if (rc)
1319                        goto aead_map_failure;
1320                rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false);
1321                if (rc)
1322                        goto aead_map_failure;
1323        } else { /* DOUBLE-PASS flow */
1324                /*
1325                 * Prepare MLLI table(s) in this order:
1326                 *
1327                 * If ENCRYPT/DECRYPT (inplace):
1328                 *   (1) MLLI table for assoc
1329                 *   (2) IV entry (chained right after end of assoc)
1330                 *   (3) MLLI for src/dst (inplace operation)
1331                 *
1332                 * If ENCRYPT (non-inplace)
1333                 *   (1) MLLI table for assoc
1334                 *   (2) IV entry (chained right after end of assoc)
1335                 *   (3) MLLI for dst
1336                 *   (4) MLLI for src
1337                 *
1338                 * If DECRYPT (non-inplace)
1339                 *   (1) MLLI table for assoc
1340                 *   (2) IV entry (chained right after end of assoc)
1341                 *   (3) MLLI for src
1342                 *   (4) MLLI for dst
1343                 */
1344                rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true);
1345                if (rc)
1346                        goto aead_map_failure;
1347                rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true);
1348                if (rc)
1349                        goto aead_map_failure;
1350                rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true);
1351                if (rc)
1352                        goto aead_map_failure;
1353        }
1354
1355        /* Mlli support -start building the MLLI according to the above
1356         * results
1357         */
1358        if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1359            areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
1360                mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1361                rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1362                if (rc)
1363                        goto aead_map_failure;
1364
1365                cc_update_aead_mlli_nents(drvdata, req);
1366                dev_dbg(dev, "assoc params mn %d\n",
1367                        areq_ctx->assoc.mlli_nents);
1368                dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents);
1369                dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents);
1370        }
1371        return 0;
1372
1373aead_map_failure:
1374        cc_unmap_aead_request(dev, req);
1375        return rc;
1376}
1377
1378int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
1379                              struct scatterlist *src, unsigned int nbytes,
1380                              bool do_update, gfp_t flags)
1381{
1382        struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1383        struct device *dev = drvdata_to_dev(drvdata);
1384        u8 *curr_buff = cc_hash_buf(areq_ctx);
1385        u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1386        struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1387        struct buffer_array sg_data;
1388        struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1389        u32 dummy = 0;
1390        u32 mapped_nents = 0;
1391
1392        dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
1393                curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1394        /* Init the type of the dma buffer */
1395        areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1396        mlli_params->curr_pool = NULL;
1397        sg_data.num_of_buffers = 0;
1398        areq_ctx->in_nents = 0;
1399
1400        if (nbytes == 0 && *curr_buff_cnt == 0) {
1401                /* nothing to do */
1402                return 0;
1403        }
1404
1405        /*TODO: copy data in case that buffer is enough for operation */
1406        /* map the previous buffer */
1407        if (*curr_buff_cnt) {
1408                if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1409                                    &sg_data)) {
1410                        return -ENOMEM;
1411                }
1412        }
1413
1414        if (src && nbytes > 0 && do_update) {
1415                if (cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
1416                              &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
1417                              &dummy, &mapped_nents)) {
1418                        goto unmap_curr_buff;
1419                }
1420                if (src && mapped_nents == 1 &&
1421                    areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1422                        memcpy(areq_ctx->buff_sg, src,
1423                               sizeof(struct scatterlist));
1424                        areq_ctx->buff_sg->length = nbytes;
1425                        areq_ctx->curr_sg = areq_ctx->buff_sg;
1426                        areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1427                } else {
1428                        areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1429                }
1430        }
1431
1432        /*build mlli */
1433        if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1434                mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1435                /* add the src data to the sg_data */
1436                cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
1437                                0, true, &areq_ctx->mlli_nents);
1438                if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
1439                        goto fail_unmap_din;
1440        }
1441        /* change the buffer index for the unmap function */
1442        areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
1443        dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n",
1444                cc_dma_buf_type(areq_ctx->data_dma_buf_type));
1445        return 0;
1446
1447fail_unmap_din:
1448        dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1449
1450unmap_curr_buff:
1451        if (*curr_buff_cnt)
1452                dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1453
1454        return -ENOMEM;
1455}
1456
1457int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
1458                               struct scatterlist *src, unsigned int nbytes,
1459                               unsigned int block_size, gfp_t flags)
1460{
1461        struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1462        struct device *dev = drvdata_to_dev(drvdata);
1463        u8 *curr_buff = cc_hash_buf(areq_ctx);
1464        u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1465        u8 *next_buff = cc_next_buf(areq_ctx);
1466        u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx);
1467        struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1468        unsigned int update_data_len;
1469        u32 total_in_len = nbytes + *curr_buff_cnt;
1470        struct buffer_array sg_data;
1471        struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1472        unsigned int swap_index = 0;
1473        u32 dummy = 0;
1474        u32 mapped_nents = 0;
1475
1476        dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
1477                curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1478        /* Init the type of the dma buffer */
1479        areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1480        mlli_params->curr_pool = NULL;
1481        areq_ctx->curr_sg = NULL;
1482        sg_data.num_of_buffers = 0;
1483        areq_ctx->in_nents = 0;
1484
1485        if (total_in_len < block_size) {
1486                dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
1487                        curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
1488                areq_ctx->in_nents =
1489                        cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL);
1490                sg_copy_to_buffer(src, areq_ctx->in_nents,
1491                                  &curr_buff[*curr_buff_cnt], nbytes);
1492                *curr_buff_cnt += nbytes;
1493                return 1;
1494        }
1495
1496        /* Calculate the residue size*/
1497        *next_buff_cnt = total_in_len & (block_size - 1);
1498        /* update data len */
1499        update_data_len = total_in_len - *next_buff_cnt;
1500
1501        dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n",
1502                *next_buff_cnt, update_data_len);
1503
1504        /* Copy the new residue to next buffer */
1505        if (*next_buff_cnt) {
1506                dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
1507                        next_buff, (update_data_len - *curr_buff_cnt),
1508                        *next_buff_cnt);
1509                cc_copy_sg_portion(dev, next_buff, src,
1510                                   (update_data_len - *curr_buff_cnt),
1511                                   nbytes, CC_SG_TO_BUF);
1512                /* change the buffer index for next operation */
1513                swap_index = 1;
1514        }
1515
1516        if (*curr_buff_cnt) {
1517                if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1518                                    &sg_data)) {
1519                        return -ENOMEM;
1520                }
1521                /* change the buffer index for next operation */
1522                swap_index = 1;
1523        }
1524
1525        if (update_data_len > *curr_buff_cnt) {
1526                if (cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
1527                              DMA_TO_DEVICE, &areq_ctx->in_nents,
1528                              LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
1529                              &mapped_nents)) {
1530                        goto unmap_curr_buff;
1531                }
1532                if (mapped_nents == 1 &&
1533                    areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1534                        /* only one entry in the SG and no previous data */
1535                        memcpy(areq_ctx->buff_sg, src,
1536                               sizeof(struct scatterlist));
1537                        areq_ctx->buff_sg->length = update_data_len;
1538                        areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1539                        areq_ctx->curr_sg = areq_ctx->buff_sg;
1540                } else {
1541                        areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1542                }
1543        }
1544
1545        if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1546                mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1547                /* add the src data to the sg_data */
1548                cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
1549                                (update_data_len - *curr_buff_cnt), 0, true,
1550                                &areq_ctx->mlli_nents);
1551                if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
1552                        goto fail_unmap_din;
1553        }
1554        areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
1555
1556        return 0;
1557
1558fail_unmap_din:
1559        dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1560
1561unmap_curr_buff:
1562        if (*curr_buff_cnt)
1563                dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1564
1565        return -ENOMEM;
1566}
1567
1568void cc_unmap_hash_request(struct device *dev, void *ctx,
1569                           struct scatterlist *src, bool do_revert)
1570{
1571        struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1572        u32 *prev_len = cc_next_buf_cnt(areq_ctx);
1573
1574        /*In case a pool was set, a table was
1575         *allocated and should be released
1576         */
1577        if (areq_ctx->mlli_params.curr_pool) {
1578                dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
1579                        &areq_ctx->mlli_params.mlli_dma_addr,
1580                        areq_ctx->mlli_params.mlli_virt_addr);
1581                dma_pool_free(areq_ctx->mlli_params.curr_pool,
1582                              areq_ctx->mlli_params.mlli_virt_addr,
1583                              areq_ctx->mlli_params.mlli_dma_addr);
1584        }
1585
1586        if (src && areq_ctx->in_nents) {
1587                dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
1588                        sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
1589                dma_unmap_sg(dev, src,
1590                             areq_ctx->in_nents, DMA_TO_DEVICE);
1591        }
1592
1593        if (*prev_len) {
1594                dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
1595                        sg_virt(areq_ctx->buff_sg),
1596                        &sg_dma_address(areq_ctx->buff_sg),
1597                        sg_dma_len(areq_ctx->buff_sg));
1598                dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1599                if (!do_revert) {
1600                        /* clean the previous data length for update
1601                         * operation
1602                         */
1603                        *prev_len = 0;
1604                } else {
1605                        areq_ctx->buff_index ^= 1;
1606                }
1607        }
1608}
1609
1610int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
1611{
1612        struct buff_mgr_handle *buff_mgr_handle;
1613        struct device *dev = drvdata_to_dev(drvdata);
1614
1615        buff_mgr_handle = kmalloc(sizeof(*buff_mgr_handle), GFP_KERNEL);
1616        if (!buff_mgr_handle)
1617                return -ENOMEM;
1618
1619        drvdata->buff_mgr_handle = buff_mgr_handle;
1620
1621        buff_mgr_handle->mlli_buffs_pool =
1622                dma_pool_create("dx_single_mlli_tables", dev,
1623                                MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
1624                                LLI_ENTRY_BYTE_SIZE,
1625                                MLLI_TABLE_MIN_ALIGNMENT, 0);
1626
1627        if (!buff_mgr_handle->mlli_buffs_pool)
1628                goto error;
1629
1630        return 0;
1631
1632error:
1633        cc_buffer_mgr_fini(drvdata);
1634        return -ENOMEM;
1635}
1636
1637int cc_buffer_mgr_fini(struct cc_drvdata *drvdata)
1638{
1639        struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
1640
1641        if (buff_mgr_handle) {
1642                dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool);
1643                kfree(drvdata->buff_mgr_handle);
1644                drvdata->buff_mgr_handle = NULL;
1645        }
1646        return 0;
1647}
1648