linux/drivers/crypto/ccree/cc_buffer_mgr.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
   3
   4#include <crypto/internal/aead.h>
   5#include <crypto/authenc.h>
   6#include <crypto/scatterwalk.h>
   7#include <linux/dmapool.h>
   8#include <linux/dma-mapping.h>
   9
  10#include "cc_buffer_mgr.h"
  11#include "cc_lli_defs.h"
  12#include "cc_cipher.h"
  13#include "cc_hash.h"
  14#include "cc_aead.h"
  15
  16enum dma_buffer_type {
  17        DMA_NULL_TYPE = -1,
  18        DMA_SGL_TYPE = 1,
  19        DMA_BUFF_TYPE = 2,
  20};
  21
  22struct buff_mgr_handle {
  23        struct dma_pool *mlli_buffs_pool;
  24};
  25
  26union buffer_array_entry {
  27        struct scatterlist *sgl;
  28        dma_addr_t buffer_dma;
  29};
  30
  31struct buffer_array {
  32        unsigned int num_of_buffers;
  33        union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI];
  34        unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
  35        int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
  36        int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
  37        enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI];
  38        bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
  39        u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
  40};
  41
  42static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type)
  43{
  44        switch (type) {
  45        case CC_DMA_BUF_NULL:
  46                return "BUF_NULL";
  47        case CC_DMA_BUF_DLLI:
  48                return "BUF_DLLI";
  49        case CC_DMA_BUF_MLLI:
  50                return "BUF_MLLI";
  51        default:
  52                return "BUF_INVALID";
  53        }
  54}
  55
  56/**
  57 * cc_copy_mac() - Copy MAC to temporary location
  58 *
  59 * @dev: device object
  60 * @req: aead request object
  61 * @dir: [IN] copy from/to sgl
  62 */
  63static void cc_copy_mac(struct device *dev, struct aead_request *req,
  64                        enum cc_sg_cpy_direct dir)
  65{
  66        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
  67        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  68        u32 skip = areq_ctx->assoclen + req->cryptlen;
  69
  70        if (areq_ctx->is_gcm4543)
  71                skip += crypto_aead_ivsize(tfm);
  72
  73        cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
  74                           (skip - areq_ctx->req_authsize), skip, dir);
  75}
  76
  77/**
  78 * cc_get_sgl_nents() - Get scatterlist number of entries.
  79 *
  80 * @sg_list: SG list
  81 * @nbytes: [IN] Total SGL data bytes.
  82 * @lbytes: [OUT] Returns the amount of bytes at the last entry
  83 */
  84static unsigned int cc_get_sgl_nents(struct device *dev,
  85                                     struct scatterlist *sg_list,
  86                                     unsigned int nbytes, u32 *lbytes)
  87{
  88        unsigned int nents = 0;
  89
  90        while (nbytes && sg_list) {
  91                nents++;
  92                /* get the number of bytes in the last entry */
  93                *lbytes = nbytes;
  94                nbytes -= (sg_list->length > nbytes) ?
  95                                nbytes : sg_list->length;
  96                sg_list = sg_next(sg_list);
  97        }
  98        dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
  99        return nents;
 100}
 101
 102/**
 103 * cc_zero_sgl() - Zero scatter scatter list data.
 104 *
 105 * @sgl:
 106 */
 107void cc_zero_sgl(struct scatterlist *sgl, u32 data_len)
 108{
 109        struct scatterlist *current_sg = sgl;
 110        int sg_index = 0;
 111
 112        while (sg_index <= data_len) {
 113                if (!current_sg) {
 114                        /* reached the end of the sgl --> just return back */
 115                        return;
 116                }
 117                memset(sg_virt(current_sg), 0, current_sg->length);
 118                sg_index += current_sg->length;
 119                current_sg = sg_next(current_sg);
 120        }
 121}
 122
 123/**
 124 * cc_copy_sg_portion() - Copy scatter list data,
 125 * from to_skip to end, to dest and vice versa
 126 *
 127 * @dest:
 128 * @sg:
 129 * @to_skip:
 130 * @end:
 131 * @direct:
 132 */
 133void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
 134                        u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
 135{
 136        u32 nents;
 137
 138        nents = sg_nents_for_len(sg, end);
 139        sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
 140                       (direct == CC_SG_TO_BUF));
 141}
 142
 143static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma,
 144                                  u32 buff_size, u32 *curr_nents,
 145                                  u32 **mlli_entry_pp)
 146{
 147        u32 *mlli_entry_p = *mlli_entry_pp;
 148        u32 new_nents;
 149
 150        /* Verify there is no memory overflow*/
 151        new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
 152        if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) {
 153                dev_err(dev, "Too many mlli entries. current %d max %d\n",
 154                        new_nents, MAX_NUM_OF_TOTAL_MLLI_ENTRIES);
 155                return -ENOMEM;
 156        }
 157
 158        /*handle buffer longer than 64 kbytes */
 159        while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
 160                cc_lli_set_addr(mlli_entry_p, buff_dma);
 161                cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
 162                dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
 163                        *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
 164                        mlli_entry_p[LLI_WORD1_OFFSET]);
 165                buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
 166                buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
 167                mlli_entry_p = mlli_entry_p + 2;
 168                (*curr_nents)++;
 169        }
 170        /*Last entry */
 171        cc_lli_set_addr(mlli_entry_p, buff_dma);
 172        cc_lli_set_size(mlli_entry_p, buff_size);
 173        dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
 174                *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
 175                mlli_entry_p[LLI_WORD1_OFFSET]);
 176        mlli_entry_p = mlli_entry_p + 2;
 177        *mlli_entry_pp = mlli_entry_p;
 178        (*curr_nents)++;
 179        return 0;
 180}
 181
 182static int cc_render_sg_to_mlli(struct device *dev, struct scatterlist *sgl,
 183                                u32 sgl_data_len, u32 sgl_offset,
 184                                u32 *curr_nents, u32 **mlli_entry_pp)
 185{
 186        struct scatterlist *curr_sgl = sgl;
 187        u32 *mlli_entry_p = *mlli_entry_pp;
 188        s32 rc = 0;
 189
 190        for ( ; (curr_sgl && sgl_data_len);
 191              curr_sgl = sg_next(curr_sgl)) {
 192                u32 entry_data_len =
 193                        (sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ?
 194                                sg_dma_len(curr_sgl) - sgl_offset :
 195                                sgl_data_len;
 196                sgl_data_len -= entry_data_len;
 197                rc = cc_render_buff_to_mlli(dev, sg_dma_address(curr_sgl) +
 198                                            sgl_offset, entry_data_len,
 199                                            curr_nents, &mlli_entry_p);
 200                if (rc)
 201                        return rc;
 202
 203                sgl_offset = 0;
 204        }
 205        *mlli_entry_pp = mlli_entry_p;
 206        return 0;
 207}
 208
 209static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
 210                            struct mlli_params *mlli_params, gfp_t flags)
 211{
 212        u32 *mlli_p;
 213        u32 total_nents = 0, prev_total_nents = 0;
 214        int rc = 0, i;
 215
 216        dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers);
 217
 218        /* Allocate memory from the pointed pool */
 219        mlli_params->mlli_virt_addr =
 220                dma_pool_alloc(mlli_params->curr_pool, flags,
 221                               &mlli_params->mlli_dma_addr);
 222        if (!mlli_params->mlli_virt_addr) {
 223                dev_err(dev, "dma_pool_alloc() failed\n");
 224                rc = -ENOMEM;
 225                goto build_mlli_exit;
 226        }
 227        /* Point to start of MLLI */
 228        mlli_p = (u32 *)mlli_params->mlli_virt_addr;
 229        /* go over all SG's and link it to one MLLI table */
 230        for (i = 0; i < sg_data->num_of_buffers; i++) {
 231                union buffer_array_entry *entry = &sg_data->entry[i];
 232                u32 tot_len = sg_data->total_data_len[i];
 233                u32 offset = sg_data->offset[i];
 234
 235                if (sg_data->type[i] == DMA_SGL_TYPE)
 236                        rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len,
 237                                                  offset, &total_nents,
 238                                                  &mlli_p);
 239                else /*DMA_BUFF_TYPE*/
 240                        rc = cc_render_buff_to_mlli(dev, entry->buffer_dma,
 241                                                    tot_len, &total_nents,
 242                                                    &mlli_p);
 243                if (rc)
 244                        return rc;
 245
 246                /* set last bit in the current table */
 247                if (sg_data->mlli_nents[i]) {
 248                        /*Calculate the current MLLI table length for the
 249                         *length field in the descriptor
 250                         */
 251                        *sg_data->mlli_nents[i] +=
 252                                (total_nents - prev_total_nents);
 253                        prev_total_nents = total_nents;
 254                }
 255        }
 256
 257        /* Set MLLI size for the bypass operation */
 258        mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
 259
 260        dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
 261                mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr,
 262                mlli_params->mlli_len);
 263
 264build_mlli_exit:
 265        return rc;
 266}
 267
 268static void cc_add_buffer_entry(struct device *dev,
 269                                struct buffer_array *sgl_data,
 270                                dma_addr_t buffer_dma, unsigned int buffer_len,
 271                                bool is_last_entry, u32 *mlli_nents)
 272{
 273        unsigned int index = sgl_data->num_of_buffers;
 274
 275        dev_dbg(dev, "index=%u single_buff=%pad buffer_len=0x%08X is_last=%d\n",
 276                index, &buffer_dma, buffer_len, is_last_entry);
 277        sgl_data->nents[index] = 1;
 278        sgl_data->entry[index].buffer_dma = buffer_dma;
 279        sgl_data->offset[index] = 0;
 280        sgl_data->total_data_len[index] = buffer_len;
 281        sgl_data->type[index] = DMA_BUFF_TYPE;
 282        sgl_data->is_last[index] = is_last_entry;
 283        sgl_data->mlli_nents[index] = mlli_nents;
 284        if (sgl_data->mlli_nents[index])
 285                *sgl_data->mlli_nents[index] = 0;
 286        sgl_data->num_of_buffers++;
 287}
 288
 289static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
 290                            unsigned int nents, struct scatterlist *sgl,
 291                            unsigned int data_len, unsigned int data_offset,
 292                            bool is_last_table, u32 *mlli_nents)
 293{
 294        unsigned int index = sgl_data->num_of_buffers;
 295
 296        dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
 297                index, nents, sgl, data_len, is_last_table);
 298        sgl_data->nents[index] = nents;
 299        sgl_data->entry[index].sgl = sgl;
 300        sgl_data->offset[index] = data_offset;
 301        sgl_data->total_data_len[index] = data_len;
 302        sgl_data->type[index] = DMA_SGL_TYPE;
 303        sgl_data->is_last[index] = is_last_table;
 304        sgl_data->mlli_nents[index] = mlli_nents;
 305        if (sgl_data->mlli_nents[index])
 306                *sgl_data->mlli_nents[index] = 0;
 307        sgl_data->num_of_buffers++;
 308}
 309
 310static int cc_map_sg(struct device *dev, struct scatterlist *sg,
 311                     unsigned int nbytes, int direction, u32 *nents,
 312                     u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
 313{
 314        if (sg_is_last(sg)) {
 315                /* One entry only case -set to DLLI */
 316                if (dma_map_sg(dev, sg, 1, direction) != 1) {
 317                        dev_err(dev, "dma_map_sg() single buffer failed\n");
 318                        return -ENOMEM;
 319                }
 320                dev_dbg(dev, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
 321                        &sg_dma_address(sg), sg_page(sg), sg_virt(sg),
 322                        sg->offset, sg->length);
 323                *lbytes = nbytes;
 324                *nents = 1;
 325                *mapped_nents = 1;
 326        } else {  /*sg_is_last*/
 327                *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
 328                if (*nents > max_sg_nents) {
 329                        *nents = 0;
 330                        dev_err(dev, "Too many fragments. current %d max %d\n",
 331                                *nents, max_sg_nents);
 332                        return -ENOMEM;
 333                }
 334                /* In case of mmu the number of mapped nents might
 335                 * be changed from the original sgl nents
 336                 */
 337                *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
 338                if (*mapped_nents == 0) {
 339                        *nents = 0;
 340                        dev_err(dev, "dma_map_sg() sg buffer failed\n");
 341                        return -ENOMEM;
 342                }
 343        }
 344
 345        return 0;
 346}
 347
 348static int
 349cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx,
 350                     u8 *config_data, struct buffer_array *sg_data,
 351                     unsigned int assoclen)
 352{
 353        dev_dbg(dev, " handle additional data config set to DLLI\n");
 354        /* create sg for the current buffer */
 355        sg_init_one(&areq_ctx->ccm_adata_sg, config_data,
 356                    AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
 357        if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) {
 358                dev_err(dev, "dma_map_sg() config buffer failed\n");
 359                return -ENOMEM;
 360        }
 361        dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
 362                &sg_dma_address(&areq_ctx->ccm_adata_sg),
 363                sg_page(&areq_ctx->ccm_adata_sg),
 364                sg_virt(&areq_ctx->ccm_adata_sg),
 365                areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length);
 366        /* prepare for case of MLLI */
 367        if (assoclen > 0) {
 368                cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg,
 369                                (AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
 370                                0, false, NULL);
 371        }
 372        return 0;
 373}
 374
 375static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
 376                           u8 *curr_buff, u32 curr_buff_cnt,
 377                           struct buffer_array *sg_data)
 378{
 379        dev_dbg(dev, " handle curr buff %x set to   DLLI\n", curr_buff_cnt);
 380        /* create sg for the current buffer */
 381        sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
 382        if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) {
 383                dev_err(dev, "dma_map_sg() src buffer failed\n");
 384                return -ENOMEM;
 385        }
 386        dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
 387                &sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
 388                sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
 389                areq_ctx->buff_sg->length);
 390        areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
 391        areq_ctx->curr_sg = areq_ctx->buff_sg;
 392        areq_ctx->in_nents = 0;
 393        /* prepare for case of MLLI */
 394        cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0,
 395                        false, NULL);
 396        return 0;
 397}
 398
 399void cc_unmap_cipher_request(struct device *dev, void *ctx,
 400                                unsigned int ivsize, struct scatterlist *src,
 401                                struct scatterlist *dst)
 402{
 403        struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
 404
 405        if (req_ctx->gen_ctx.iv_dma_addr) {
 406                dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
 407                        &req_ctx->gen_ctx.iv_dma_addr, ivsize);
 408                dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
 409                                 ivsize, DMA_BIDIRECTIONAL);
 410        }
 411        /* Release pool */
 412        if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
 413            req_ctx->mlli_params.mlli_virt_addr) {
 414                dma_pool_free(req_ctx->mlli_params.curr_pool,
 415                              req_ctx->mlli_params.mlli_virt_addr,
 416                              req_ctx->mlli_params.mlli_dma_addr);
 417        }
 418
 419        dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
 420        dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
 421
 422        if (src != dst) {
 423                dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
 424                dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
 425        }
 426}
 427
 428int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
 429                          unsigned int ivsize, unsigned int nbytes,
 430                          void *info, struct scatterlist *src,
 431                          struct scatterlist *dst, gfp_t flags)
 432{
 433        struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
 434        struct mlli_params *mlli_params = &req_ctx->mlli_params;
 435        struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
 436        struct device *dev = drvdata_to_dev(drvdata);
 437        struct buffer_array sg_data;
 438        u32 dummy = 0;
 439        int rc = 0;
 440        u32 mapped_nents = 0;
 441
 442        req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
 443        mlli_params->curr_pool = NULL;
 444        sg_data.num_of_buffers = 0;
 445
 446        /* Map IV buffer */
 447        if (ivsize) {
 448                dump_byte_array("iv", (u8 *)info, ivsize);
 449                req_ctx->gen_ctx.iv_dma_addr =
 450                        dma_map_single(dev, (void *)info,
 451                                       ivsize, DMA_BIDIRECTIONAL);
 452                if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
 453                        dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
 454                                ivsize, info);
 455                        return -ENOMEM;
 456                }
 457                dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
 458                        ivsize, info, &req_ctx->gen_ctx.iv_dma_addr);
 459        } else {
 460                req_ctx->gen_ctx.iv_dma_addr = 0;
 461        }
 462
 463        /* Map the src SGL */
 464        rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
 465                       LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
 466        if (rc)
 467                goto cipher_exit;
 468        if (mapped_nents > 1)
 469                req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
 470
 471        if (src == dst) {
 472                /* Handle inplace operation */
 473                if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
 474                        req_ctx->out_nents = 0;
 475                        cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
 476                                        nbytes, 0, true,
 477                                        &req_ctx->in_mlli_nents);
 478                }
 479        } else {
 480                /* Map the dst sg */
 481                rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
 482                               &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
 483                               &dummy, &mapped_nents);
 484                if (rc)
 485                        goto cipher_exit;
 486                if (mapped_nents > 1)
 487                        req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
 488
 489                if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
 490                        cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
 491                                        nbytes, 0, true,
 492                                        &req_ctx->in_mlli_nents);
 493                        cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst,
 494                                        nbytes, 0, true,
 495                                        &req_ctx->out_mlli_nents);
 496                }
 497        }
 498
 499        if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
 500                mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
 501                rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
 502                if (rc)
 503                        goto cipher_exit;
 504        }
 505
 506        dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n",
 507                cc_dma_buf_type(req_ctx->dma_buf_type));
 508
 509        return 0;
 510
 511cipher_exit:
 512        cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
 513        return rc;
 514}
 515
 516void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
 517{
 518        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 519        unsigned int hw_iv_size = areq_ctx->hw_iv_size;
 520        struct cc_drvdata *drvdata = dev_get_drvdata(dev);
 521
 522        if (areq_ctx->mac_buf_dma_addr) {
 523                dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
 524                                 MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
 525        }
 526
 527        if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
 528                if (areq_ctx->hkey_dma_addr) {
 529                        dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
 530                                         AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
 531                }
 532
 533                if (areq_ctx->gcm_block_len_dma_addr) {
 534                        dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
 535                                         AES_BLOCK_SIZE, DMA_TO_DEVICE);
 536                }
 537
 538                if (areq_ctx->gcm_iv_inc1_dma_addr) {
 539                        dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
 540                                         AES_BLOCK_SIZE, DMA_TO_DEVICE);
 541                }
 542
 543                if (areq_ctx->gcm_iv_inc2_dma_addr) {
 544                        dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
 545                                         AES_BLOCK_SIZE, DMA_TO_DEVICE);
 546                }
 547        }
 548
 549        if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
 550                if (areq_ctx->ccm_iv0_dma_addr) {
 551                        dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
 552                                         AES_BLOCK_SIZE, DMA_TO_DEVICE);
 553                }
 554
 555                dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
 556        }
 557        if (areq_ctx->gen_ctx.iv_dma_addr) {
 558                dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
 559                                 hw_iv_size, DMA_BIDIRECTIONAL);
 560                kzfree(areq_ctx->gen_ctx.iv);
 561        }
 562
 563        /* Release pool */
 564        if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
 565             areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) &&
 566            (areq_ctx->mlli_params.mlli_virt_addr)) {
 567                dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
 568                        &areq_ctx->mlli_params.mlli_dma_addr,
 569                        areq_ctx->mlli_params.mlli_virt_addr);
 570                dma_pool_free(areq_ctx->mlli_params.curr_pool,
 571                              areq_ctx->mlli_params.mlli_virt_addr,
 572                              areq_ctx->mlli_params.mlli_dma_addr);
 573        }
 574
 575        dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
 576                sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
 577                areq_ctx->assoclen, req->cryptlen);
 578
 579        dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL);
 580        if (req->src != req->dst) {
 581                dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
 582                        sg_virt(req->dst));
 583                dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
 584                             DMA_BIDIRECTIONAL);
 585        }
 586        if (drvdata->coherent &&
 587            areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
 588            req->src == req->dst) {
 589                /* copy back mac from temporary location to deal with possible
 590                 * data memory overriding that caused by cache coherence
 591                 * problem.
 592                 */
 593                cc_copy_mac(dev, req, CC_SG_FROM_BUF);
 594        }
 595}
 596
 597static bool cc_is_icv_frag(unsigned int sgl_nents, unsigned int authsize,
 598                           u32 last_entry_data_size)
 599{
 600        return ((sgl_nents > 1) && (last_entry_data_size < authsize));
 601}
 602
 603static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
 604                            struct aead_request *req,
 605                            struct buffer_array *sg_data,
 606                            bool is_last, bool do_chain)
 607{
 608        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 609        unsigned int hw_iv_size = areq_ctx->hw_iv_size;
 610        struct device *dev = drvdata_to_dev(drvdata);
 611        gfp_t flags = cc_gfp_flags(&req->base);
 612        int rc = 0;
 613
 614        if (!req->iv) {
 615                areq_ctx->gen_ctx.iv_dma_addr = 0;
 616                areq_ctx->gen_ctx.iv = NULL;
 617                goto chain_iv_exit;
 618        }
 619
 620        areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags);
 621        if (!areq_ctx->gen_ctx.iv)
 622                return -ENOMEM;
 623
 624        areq_ctx->gen_ctx.iv_dma_addr =
 625                dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size,
 626                               DMA_BIDIRECTIONAL);
 627        if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
 628                dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
 629                        hw_iv_size, req->iv);
 630                kzfree(areq_ctx->gen_ctx.iv);
 631                areq_ctx->gen_ctx.iv = NULL;
 632                rc = -ENOMEM;
 633                goto chain_iv_exit;
 634        }
 635
 636        dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
 637                hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
 638        // TODO: what about CTR?? ask Ron
 639        if (do_chain && areq_ctx->plaintext_authenticate_only) {
 640                struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 641                unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
 642                unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
 643                /* Chain to given list */
 644                cc_add_buffer_entry(dev, sg_data,
 645                                    (areq_ctx->gen_ctx.iv_dma_addr + iv_ofs),
 646                                    iv_size_to_authenc, is_last,
 647                                    &areq_ctx->assoc.mlli_nents);
 648                areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
 649        }
 650
 651chain_iv_exit:
 652        return rc;
 653}
 654
 655static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
 656                               struct aead_request *req,
 657                               struct buffer_array *sg_data,
 658                               bool is_last, bool do_chain)
 659{
 660        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 661        int rc = 0;
 662        int mapped_nents = 0;
 663        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 664        unsigned int size_of_assoc = areq_ctx->assoclen;
 665        struct device *dev = drvdata_to_dev(drvdata);
 666
 667        if (areq_ctx->is_gcm4543)
 668                size_of_assoc += crypto_aead_ivsize(tfm);
 669
 670        if (!sg_data) {
 671                rc = -EINVAL;
 672                goto chain_assoc_exit;
 673        }
 674
 675        if (areq_ctx->assoclen == 0) {
 676                areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
 677                areq_ctx->assoc.nents = 0;
 678                areq_ctx->assoc.mlli_nents = 0;
 679                dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n",
 680                        cc_dma_buf_type(areq_ctx->assoc_buff_type),
 681                        areq_ctx->assoc.nents);
 682                goto chain_assoc_exit;
 683        }
 684
 685        mapped_nents = sg_nents_for_len(req->src, size_of_assoc);
 686        if (mapped_nents < 0)
 687                return mapped_nents;
 688
 689        if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
 690                dev_err(dev, "Too many fragments. current %d max %d\n",
 691                        mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
 692                return -ENOMEM;
 693        }
 694        areq_ctx->assoc.nents = mapped_nents;
 695
 696        /* in CCM case we have additional entry for
 697         * ccm header configurations
 698         */
 699        if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
 700                if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
 701                        dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n",
 702                                (areq_ctx->assoc.nents + 1),
 703                                LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
 704                        rc = -ENOMEM;
 705                        goto chain_assoc_exit;
 706                }
 707        }
 708
 709        if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null)
 710                areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI;
 711        else
 712                areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
 713
 714        if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
 715                dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n",
 716                        cc_dma_buf_type(areq_ctx->assoc_buff_type),
 717                        areq_ctx->assoc.nents);
 718                cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
 719                                areq_ctx->assoclen, 0, is_last,
 720                                &areq_ctx->assoc.mlli_nents);
 721                areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
 722        }
 723
 724chain_assoc_exit:
 725        return rc;
 726}
 727
 728static void cc_prepare_aead_data_dlli(struct aead_request *req,
 729                                      u32 *src_last_bytes, u32 *dst_last_bytes)
 730{
 731        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 732        enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
 733        unsigned int authsize = areq_ctx->req_authsize;
 734        struct scatterlist *sg;
 735        ssize_t offset;
 736
 737        areq_ctx->is_icv_fragmented = false;
 738
 739        if ((req->src == req->dst) || direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 740                sg = areq_ctx->src_sgl;
 741                offset = *src_last_bytes - authsize;
 742        } else {
 743                sg = areq_ctx->dst_sgl;
 744                offset = *dst_last_bytes - authsize;
 745        }
 746
 747        areq_ctx->icv_dma_addr = sg_dma_address(sg) + offset;
 748        areq_ctx->icv_virt_addr = sg_virt(sg) + offset;
 749}
 750
 751static void cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
 752                                      struct aead_request *req,
 753                                      struct buffer_array *sg_data,
 754                                      u32 *src_last_bytes, u32 *dst_last_bytes,
 755                                      bool is_last_table)
 756{
 757        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 758        enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
 759        unsigned int authsize = areq_ctx->req_authsize;
 760        struct device *dev = drvdata_to_dev(drvdata);
 761        struct scatterlist *sg;
 762
 763        if (req->src == req->dst) {
 764                /*INPLACE*/
 765                cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
 766                                areq_ctx->src_sgl, areq_ctx->cryptlen,
 767                                areq_ctx->src_offset, is_last_table,
 768                                &areq_ctx->src.mlli_nents);
 769
 770                areq_ctx->is_icv_fragmented =
 771                        cc_is_icv_frag(areq_ctx->src.nents, authsize,
 772                                       *src_last_bytes);
 773
 774                if (areq_ctx->is_icv_fragmented) {
 775                        /* Backup happens only when ICV is fragmented, ICV
 776                         * verification is made by CPU compare in order to
 777                         * simplify MAC verification upon request completion
 778                         */
 779                        if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 780                                /* In coherent platforms (e.g. ACP)
 781                                 * already copying ICV for any
 782                                 * INPLACE-DECRYPT operation, hence
 783                                 * we must neglect this code.
 784                                 */
 785                                if (!drvdata->coherent)
 786                                        cc_copy_mac(dev, req, CC_SG_TO_BUF);
 787
 788                                areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
 789                        } else {
 790                                areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
 791                                areq_ctx->icv_dma_addr =
 792                                        areq_ctx->mac_buf_dma_addr;
 793                        }
 794                } else { /* Contig. ICV */
 795                        sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
 796                        /*Should hanlde if the sg is not contig.*/
 797                        areq_ctx->icv_dma_addr = sg_dma_address(sg) +
 798                                (*src_last_bytes - authsize);
 799                        areq_ctx->icv_virt_addr = sg_virt(sg) +
 800                                (*src_last_bytes - authsize);
 801                }
 802
 803        } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
 804                /*NON-INPLACE and DECRYPT*/
 805                cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
 806                                areq_ctx->src_sgl, areq_ctx->cryptlen,
 807                                areq_ctx->src_offset, is_last_table,
 808                                &areq_ctx->src.mlli_nents);
 809                cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
 810                                areq_ctx->dst_sgl, areq_ctx->cryptlen,
 811                                areq_ctx->dst_offset, is_last_table,
 812                                &areq_ctx->dst.mlli_nents);
 813
 814                areq_ctx->is_icv_fragmented =
 815                        cc_is_icv_frag(areq_ctx->src.nents, authsize,
 816                                       *src_last_bytes);
 817                /* Backup happens only when ICV is fragmented, ICV
 818
 819                 * verification is made by CPU compare in order to simplify
 820                 * MAC verification upon request completion
 821                 */
 822                if (areq_ctx->is_icv_fragmented) {
 823                        cc_copy_mac(dev, req, CC_SG_TO_BUF);
 824                        areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
 825
 826                } else { /* Contig. ICV */
 827                        sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
 828                        /*Should hanlde if the sg is not contig.*/
 829                        areq_ctx->icv_dma_addr = sg_dma_address(sg) +
 830                                (*src_last_bytes - authsize);
 831                        areq_ctx->icv_virt_addr = sg_virt(sg) +
 832                                (*src_last_bytes - authsize);
 833                }
 834
 835        } else {
 836                /*NON-INPLACE and ENCRYPT*/
 837                cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
 838                                areq_ctx->dst_sgl, areq_ctx->cryptlen,
 839                                areq_ctx->dst_offset, is_last_table,
 840                                &areq_ctx->dst.mlli_nents);
 841                cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
 842                                areq_ctx->src_sgl, areq_ctx->cryptlen,
 843                                areq_ctx->src_offset, is_last_table,
 844                                &areq_ctx->src.mlli_nents);
 845
 846                areq_ctx->is_icv_fragmented =
 847                        cc_is_icv_frag(areq_ctx->dst.nents, authsize,
 848                                       *dst_last_bytes);
 849
 850                if (!areq_ctx->is_icv_fragmented) {
 851                        sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
 852                        /* Contig. ICV */
 853                        areq_ctx->icv_dma_addr = sg_dma_address(sg) +
 854                                (*dst_last_bytes - authsize);
 855                        areq_ctx->icv_virt_addr = sg_virt(sg) +
 856                                (*dst_last_bytes - authsize);
 857                } else {
 858                        areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
 859                        areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
 860                }
 861        }
 862}
 863
 864static int cc_aead_chain_data(struct cc_drvdata *drvdata,
 865                              struct aead_request *req,
 866                              struct buffer_array *sg_data,
 867                              bool is_last_table, bool do_chain)
 868{
 869        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 870        struct device *dev = drvdata_to_dev(drvdata);
 871        enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
 872        unsigned int authsize = areq_ctx->req_authsize;
 873        unsigned int src_last_bytes = 0, dst_last_bytes = 0;
 874        int rc = 0;
 875        u32 src_mapped_nents = 0, dst_mapped_nents = 0;
 876        u32 offset = 0;
 877        /* non-inplace mode */
 878        unsigned int size_for_map = areq_ctx->assoclen + req->cryptlen;
 879        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 880        u32 sg_index = 0;
 881        bool is_gcm4543 = areq_ctx->is_gcm4543;
 882        u32 size_to_skip = areq_ctx->assoclen;
 883        struct scatterlist *sgl;
 884
 885        if (is_gcm4543)
 886                size_to_skip += crypto_aead_ivsize(tfm);
 887
 888        offset = size_to_skip;
 889
 890        if (!sg_data)
 891                return -EINVAL;
 892
 893        areq_ctx->src_sgl = req->src;
 894        areq_ctx->dst_sgl = req->dst;
 895
 896        if (is_gcm4543)
 897                size_for_map += crypto_aead_ivsize(tfm);
 898
 899        size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 900                        authsize : 0;
 901        src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
 902                                            &src_last_bytes);
 903        sg_index = areq_ctx->src_sgl->length;
 904        //check where the data starts
 905        while (sg_index <= size_to_skip) {
 906                src_mapped_nents--;
 907                offset -= areq_ctx->src_sgl->length;
 908                sgl = sg_next(areq_ctx->src_sgl);
 909                if (!sgl)
 910                        break;
 911                areq_ctx->src_sgl = sgl;
 912                sg_index += areq_ctx->src_sgl->length;
 913        }
 914        if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
 915                dev_err(dev, "Too many fragments. current %d max %d\n",
 916                        src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
 917                return -ENOMEM;
 918        }
 919
 920        areq_ctx->src.nents = src_mapped_nents;
 921
 922        areq_ctx->src_offset = offset;
 923
 924        if (req->src != req->dst) {
 925                size_for_map = areq_ctx->assoclen + req->cryptlen;
 926                size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 927                                authsize : 0;
 928                if (is_gcm4543)
 929                        size_for_map += crypto_aead_ivsize(tfm);
 930
 931                rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
 932                               &areq_ctx->dst.nents,
 933                               LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
 934                               &dst_mapped_nents);
 935                if (rc)
 936                        goto chain_data_exit;
 937        }
 938
 939        dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
 940                                            &dst_last_bytes);
 941        sg_index = areq_ctx->dst_sgl->length;
 942        offset = size_to_skip;
 943
 944        //check where the data starts
 945        while (sg_index <= size_to_skip) {
 946                dst_mapped_nents--;
 947                offset -= areq_ctx->dst_sgl->length;
 948                sgl = sg_next(areq_ctx->dst_sgl);
 949                if (!sgl)
 950                        break;
 951                areq_ctx->dst_sgl = sgl;
 952                sg_index += areq_ctx->dst_sgl->length;
 953        }
 954        if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
 955                dev_err(dev, "Too many fragments. current %d max %d\n",
 956                        dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
 957                return -ENOMEM;
 958        }
 959        areq_ctx->dst.nents = dst_mapped_nents;
 960        areq_ctx->dst_offset = offset;
 961        if (src_mapped_nents > 1 ||
 962            dst_mapped_nents  > 1 ||
 963            do_chain) {
 964                areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
 965                cc_prepare_aead_data_mlli(drvdata, req, sg_data,
 966                                          &src_last_bytes, &dst_last_bytes,
 967                                          is_last_table);
 968        } else {
 969                areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
 970                cc_prepare_aead_data_dlli(req, &src_last_bytes,
 971                                          &dst_last_bytes);
 972        }
 973
 974chain_data_exit:
 975        return rc;
 976}
 977
 978static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata,
 979                                      struct aead_request *req)
 980{
 981        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
 982        u32 curr_mlli_size = 0;
 983
 984        if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
 985                areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
 986                curr_mlli_size = areq_ctx->assoc.mlli_nents *
 987                                                LLI_ENTRY_BYTE_SIZE;
 988        }
 989
 990        if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
 991                /*Inplace case dst nents equal to src nents*/
 992                if (req->src == req->dst) {
 993                        areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
 994                        areq_ctx->src.sram_addr = drvdata->mlli_sram_addr +
 995                                                                curr_mlli_size;
 996                        areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
 997                        if (!areq_ctx->is_single_pass)
 998                                areq_ctx->assoc.mlli_nents +=
 999                                        areq_ctx->src.mlli_nents;
1000                } else {
1001                        if (areq_ctx->gen_ctx.op_type ==
1002                                        DRV_CRYPTO_DIRECTION_DECRYPT) {
1003                                areq_ctx->src.sram_addr =
1004                                                drvdata->mlli_sram_addr +
1005                                                                curr_mlli_size;
1006                                areq_ctx->dst.sram_addr =
1007                                                areq_ctx->src.sram_addr +
1008                                                areq_ctx->src.mlli_nents *
1009                                                LLI_ENTRY_BYTE_SIZE;
1010                                if (!areq_ctx->is_single_pass)
1011                                        areq_ctx->assoc.mlli_nents +=
1012                                                areq_ctx->src.mlli_nents;
1013                        } else {
1014                                areq_ctx->dst.sram_addr =
1015                                                drvdata->mlli_sram_addr +
1016                                                                curr_mlli_size;
1017                                areq_ctx->src.sram_addr =
1018                                                areq_ctx->dst.sram_addr +
1019                                                areq_ctx->dst.mlli_nents *
1020                                                LLI_ENTRY_BYTE_SIZE;
1021                                if (!areq_ctx->is_single_pass)
1022                                        areq_ctx->assoc.mlli_nents +=
1023                                                areq_ctx->dst.mlli_nents;
1024                        }
1025                }
1026        }
1027}
1028
1029int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
1030{
1031        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1032        struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1033        struct device *dev = drvdata_to_dev(drvdata);
1034        struct buffer_array sg_data;
1035        unsigned int authsize = areq_ctx->req_authsize;
1036        struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1037        int rc = 0;
1038        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1039        bool is_gcm4543 = areq_ctx->is_gcm4543;
1040        dma_addr_t dma_addr;
1041        u32 mapped_nents = 0;
1042        u32 dummy = 0; /*used for the assoc data fragments */
1043        u32 size_to_map = 0;
1044        gfp_t flags = cc_gfp_flags(&req->base);
1045
1046        mlli_params->curr_pool = NULL;
1047        sg_data.num_of_buffers = 0;
1048
1049        /* copy mac to a temporary location to deal with possible
1050         * data memory overriding that caused by cache coherence problem.
1051         */
1052        if (drvdata->coherent &&
1053            areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
1054            req->src == req->dst)
1055                cc_copy_mac(dev, req, CC_SG_TO_BUF);
1056
1057        /* cacluate the size for cipher remove ICV in decrypt*/
1058        areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
1059                                 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1060                                req->cryptlen :
1061                                (req->cryptlen - authsize);
1062
1063        dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
1064                                  DMA_BIDIRECTIONAL);
1065        if (dma_mapping_error(dev, dma_addr)) {
1066                dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
1067                        MAX_MAC_SIZE, areq_ctx->mac_buf);
1068                rc = -ENOMEM;
1069                goto aead_map_failure;
1070        }
1071        areq_ctx->mac_buf_dma_addr = dma_addr;
1072
1073        if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
1074                void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
1075
1076                dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE,
1077                                          DMA_TO_DEVICE);
1078
1079                if (dma_mapping_error(dev, dma_addr)) {
1080                        dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
1081                                AES_BLOCK_SIZE, addr);
1082                        areq_ctx->ccm_iv0_dma_addr = 0;
1083                        rc = -ENOMEM;
1084                        goto aead_map_failure;
1085                }
1086                areq_ctx->ccm_iv0_dma_addr = dma_addr;
1087
1088                rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
1089                                          &sg_data, areq_ctx->assoclen);
1090                if (rc)
1091                        goto aead_map_failure;
1092        }
1093
1094        if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
1095                dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
1096                                          DMA_BIDIRECTIONAL);
1097                if (dma_mapping_error(dev, dma_addr)) {
1098                        dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
1099                                AES_BLOCK_SIZE, areq_ctx->hkey);
1100                        rc = -ENOMEM;
1101                        goto aead_map_failure;
1102                }
1103                areq_ctx->hkey_dma_addr = dma_addr;
1104
1105                dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
1106                                          AES_BLOCK_SIZE, DMA_TO_DEVICE);
1107                if (dma_mapping_error(dev, dma_addr)) {
1108                        dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
1109                                AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
1110                        rc = -ENOMEM;
1111                        goto aead_map_failure;
1112                }
1113                areq_ctx->gcm_block_len_dma_addr = dma_addr;
1114
1115                dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1,
1116                                          AES_BLOCK_SIZE, DMA_TO_DEVICE);
1117
1118                if (dma_mapping_error(dev, dma_addr)) {
1119                        dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
1120                                AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
1121                        areq_ctx->gcm_iv_inc1_dma_addr = 0;
1122                        rc = -ENOMEM;
1123                        goto aead_map_failure;
1124                }
1125                areq_ctx->gcm_iv_inc1_dma_addr = dma_addr;
1126
1127                dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2,
1128                                          AES_BLOCK_SIZE, DMA_TO_DEVICE);
1129
1130                if (dma_mapping_error(dev, dma_addr)) {
1131                        dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
1132                                AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
1133                        areq_ctx->gcm_iv_inc2_dma_addr = 0;
1134                        rc = -ENOMEM;
1135                        goto aead_map_failure;
1136                }
1137                areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
1138        }
1139
1140        size_to_map = req->cryptlen + areq_ctx->assoclen;
1141        if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
1142                size_to_map += authsize;
1143
1144        if (is_gcm4543)
1145                size_to_map += crypto_aead_ivsize(tfm);
1146        rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
1147                       &areq_ctx->src.nents,
1148                       (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
1149                        LLI_MAX_NUM_OF_DATA_ENTRIES),
1150                       &dummy, &mapped_nents);
1151        if (rc)
1152                goto aead_map_failure;
1153
1154        if (areq_ctx->is_single_pass) {
1155                /*
1156                 * Create MLLI table for:
1157                 *   (1) Assoc. data
1158                 *   (2) Src/Dst SGLs
1159                 *   Note: IV is contg. buffer (not an SGL)
1160                 */
1161                rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false);
1162                if (rc)
1163                        goto aead_map_failure;
1164                rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false);
1165                if (rc)
1166                        goto aead_map_failure;
1167                rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false);
1168                if (rc)
1169                        goto aead_map_failure;
1170        } else { /* DOUBLE-PASS flow */
1171                /*
1172                 * Prepare MLLI table(s) in this order:
1173                 *
1174                 * If ENCRYPT/DECRYPT (inplace):
1175                 *   (1) MLLI table for assoc
1176                 *   (2) IV entry (chained right after end of assoc)
1177                 *   (3) MLLI for src/dst (inplace operation)
1178                 *
1179                 * If ENCRYPT (non-inplace)
1180                 *   (1) MLLI table for assoc
1181                 *   (2) IV entry (chained right after end of assoc)
1182                 *   (3) MLLI for dst
1183                 *   (4) MLLI for src
1184                 *
1185                 * If DECRYPT (non-inplace)
1186                 *   (1) MLLI table for assoc
1187                 *   (2) IV entry (chained right after end of assoc)
1188                 *   (3) MLLI for src
1189                 *   (4) MLLI for dst
1190                 */
1191                rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true);
1192                if (rc)
1193                        goto aead_map_failure;
1194                rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true);
1195                if (rc)
1196                        goto aead_map_failure;
1197                rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true);
1198                if (rc)
1199                        goto aead_map_failure;
1200        }
1201
1202        /* Mlli support -start building the MLLI according to the above
1203         * results
1204         */
1205        if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1206            areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
1207                mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1208                rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1209                if (rc)
1210                        goto aead_map_failure;
1211
1212                cc_update_aead_mlli_nents(drvdata, req);
1213                dev_dbg(dev, "assoc params mn %d\n",
1214                        areq_ctx->assoc.mlli_nents);
1215                dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents);
1216                dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents);
1217        }
1218        return 0;
1219
1220aead_map_failure:
1221        cc_unmap_aead_request(dev, req);
1222        return rc;
1223}
1224
1225int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
1226                              struct scatterlist *src, unsigned int nbytes,
1227                              bool do_update, gfp_t flags)
1228{
1229        struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1230        struct device *dev = drvdata_to_dev(drvdata);
1231        u8 *curr_buff = cc_hash_buf(areq_ctx);
1232        u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1233        struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1234        struct buffer_array sg_data;
1235        struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1236        int rc = 0;
1237        u32 dummy = 0;
1238        u32 mapped_nents = 0;
1239
1240        dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
1241                curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1242        /* Init the type of the dma buffer */
1243        areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1244        mlli_params->curr_pool = NULL;
1245        sg_data.num_of_buffers = 0;
1246        areq_ctx->in_nents = 0;
1247
1248        if (nbytes == 0 && *curr_buff_cnt == 0) {
1249                /* nothing to do */
1250                return 0;
1251        }
1252
1253        /*TODO: copy data in case that buffer is enough for operation */
1254        /* map the previous buffer */
1255        if (*curr_buff_cnt) {
1256                rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1257                                     &sg_data);
1258                if (rc)
1259                        return rc;
1260        }
1261
1262        if (src && nbytes > 0 && do_update) {
1263                rc = cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
1264                               &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
1265                               &dummy, &mapped_nents);
1266                if (rc)
1267                        goto unmap_curr_buff;
1268                if (src && mapped_nents == 1 &&
1269                    areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1270                        memcpy(areq_ctx->buff_sg, src,
1271                               sizeof(struct scatterlist));
1272                        areq_ctx->buff_sg->length = nbytes;
1273                        areq_ctx->curr_sg = areq_ctx->buff_sg;
1274                        areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1275                } else {
1276                        areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1277                }
1278        }
1279
1280        /*build mlli */
1281        if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1282                mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1283                /* add the src data to the sg_data */
1284                cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
1285                                0, true, &areq_ctx->mlli_nents);
1286                rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1287                if (rc)
1288                        goto fail_unmap_din;
1289        }
1290        /* change the buffer index for the unmap function */
1291        areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
1292        dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n",
1293                cc_dma_buf_type(areq_ctx->data_dma_buf_type));
1294        return 0;
1295
1296fail_unmap_din:
1297        dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1298
1299unmap_curr_buff:
1300        if (*curr_buff_cnt)
1301                dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1302
1303        return rc;
1304}
1305
1306int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
1307                               struct scatterlist *src, unsigned int nbytes,
1308                               unsigned int block_size, gfp_t flags)
1309{
1310        struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1311        struct device *dev = drvdata_to_dev(drvdata);
1312        u8 *curr_buff = cc_hash_buf(areq_ctx);
1313        u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1314        u8 *next_buff = cc_next_buf(areq_ctx);
1315        u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx);
1316        struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1317        unsigned int update_data_len;
1318        u32 total_in_len = nbytes + *curr_buff_cnt;
1319        struct buffer_array sg_data;
1320        struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1321        unsigned int swap_index = 0;
1322        int rc = 0;
1323        u32 dummy = 0;
1324        u32 mapped_nents = 0;
1325
1326        dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
1327                curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1328        /* Init the type of the dma buffer */
1329        areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1330        mlli_params->curr_pool = NULL;
1331        areq_ctx->curr_sg = NULL;
1332        sg_data.num_of_buffers = 0;
1333        areq_ctx->in_nents = 0;
1334
1335        if (total_in_len < block_size) {
1336                dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
1337                        curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
1338                areq_ctx->in_nents = sg_nents_for_len(src, nbytes);
1339                sg_copy_to_buffer(src, areq_ctx->in_nents,
1340                                  &curr_buff[*curr_buff_cnt], nbytes);
1341                *curr_buff_cnt += nbytes;
1342                return 1;
1343        }
1344
1345        /* Calculate the residue size*/
1346        *next_buff_cnt = total_in_len & (block_size - 1);
1347        /* update data len */
1348        update_data_len = total_in_len - *next_buff_cnt;
1349
1350        dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n",
1351                *next_buff_cnt, update_data_len);
1352
1353        /* Copy the new residue to next buffer */
1354        if (*next_buff_cnt) {
1355                dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
1356                        next_buff, (update_data_len - *curr_buff_cnt),
1357                        *next_buff_cnt);
1358                cc_copy_sg_portion(dev, next_buff, src,
1359                                   (update_data_len - *curr_buff_cnt),
1360                                   nbytes, CC_SG_TO_BUF);
1361                /* change the buffer index for next operation */
1362                swap_index = 1;
1363        }
1364
1365        if (*curr_buff_cnt) {
1366                rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1367                                     &sg_data);
1368                if (rc)
1369                        return rc;
1370                /* change the buffer index for next operation */
1371                swap_index = 1;
1372        }
1373
1374        if (update_data_len > *curr_buff_cnt) {
1375                rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
1376                               DMA_TO_DEVICE, &areq_ctx->in_nents,
1377                               LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
1378                               &mapped_nents);
1379                if (rc)
1380                        goto unmap_curr_buff;
1381                if (mapped_nents == 1 &&
1382                    areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1383                        /* only one entry in the SG and no previous data */
1384                        memcpy(areq_ctx->buff_sg, src,
1385                               sizeof(struct scatterlist));
1386                        areq_ctx->buff_sg->length = update_data_len;
1387                        areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1388                        areq_ctx->curr_sg = areq_ctx->buff_sg;
1389                } else {
1390                        areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1391                }
1392        }
1393
1394        if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1395                mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1396                /* add the src data to the sg_data */
1397                cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
1398                                (update_data_len - *curr_buff_cnt), 0, true,
1399                                &areq_ctx->mlli_nents);
1400                rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1401                if (rc)
1402                        goto fail_unmap_din;
1403        }
1404        areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
1405
1406        return 0;
1407
1408fail_unmap_din:
1409        dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1410
1411unmap_curr_buff:
1412        if (*curr_buff_cnt)
1413                dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1414
1415        return rc;
1416}
1417
1418void cc_unmap_hash_request(struct device *dev, void *ctx,
1419                           struct scatterlist *src, bool do_revert)
1420{
1421        struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1422        u32 *prev_len = cc_next_buf_cnt(areq_ctx);
1423
1424        /*In case a pool was set, a table was
1425         *allocated and should be released
1426         */
1427        if (areq_ctx->mlli_params.curr_pool) {
1428                dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
1429                        &areq_ctx->mlli_params.mlli_dma_addr,
1430                        areq_ctx->mlli_params.mlli_virt_addr);
1431                dma_pool_free(areq_ctx->mlli_params.curr_pool,
1432                              areq_ctx->mlli_params.mlli_virt_addr,
1433                              areq_ctx->mlli_params.mlli_dma_addr);
1434        }
1435
1436        if (src && areq_ctx->in_nents) {
1437                dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
1438                        sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
1439                dma_unmap_sg(dev, src,
1440                             areq_ctx->in_nents, DMA_TO_DEVICE);
1441        }
1442
1443        if (*prev_len) {
1444                dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
1445                        sg_virt(areq_ctx->buff_sg),
1446                        &sg_dma_address(areq_ctx->buff_sg),
1447                        sg_dma_len(areq_ctx->buff_sg));
1448                dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1449                if (!do_revert) {
1450                        /* clean the previous data length for update
1451                         * operation
1452                         */
1453                        *prev_len = 0;
1454                } else {
1455                        areq_ctx->buff_index ^= 1;
1456                }
1457        }
1458}
1459
1460int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
1461{
1462        struct buff_mgr_handle *buff_mgr_handle;
1463        struct device *dev = drvdata_to_dev(drvdata);
1464
1465        buff_mgr_handle = kmalloc(sizeof(*buff_mgr_handle), GFP_KERNEL);
1466        if (!buff_mgr_handle)
1467                return -ENOMEM;
1468
1469        drvdata->buff_mgr_handle = buff_mgr_handle;
1470
1471        buff_mgr_handle->mlli_buffs_pool =
1472                dma_pool_create("dx_single_mlli_tables", dev,
1473                                MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
1474                                LLI_ENTRY_BYTE_SIZE,
1475                                MLLI_TABLE_MIN_ALIGNMENT, 0);
1476
1477        if (!buff_mgr_handle->mlli_buffs_pool)
1478                goto error;
1479
1480        return 0;
1481
1482error:
1483        cc_buffer_mgr_fini(drvdata);
1484        return -ENOMEM;
1485}
1486
1487int cc_buffer_mgr_fini(struct cc_drvdata *drvdata)
1488{
1489        struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
1490
1491        if (buff_mgr_handle) {
1492                dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool);
1493                kfree(drvdata->buff_mgr_handle);
1494                drvdata->buff_mgr_handle = NULL;
1495        }
1496        return 0;
1497}
1498