linux/drivers/crypto/marvell/hash.c
<<
>>
Prefs
   1/*
   2 * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256.
   3 *
   4 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
   5 * Author: Arnaud Ebalard <arno@natisbad.org>
   6 *
   7 * This work is based on an initial version written by
   8 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
   9 *
  10 * This program is free software; you can redistribute it and/or modify it
  11 * under the terms of the GNU General Public License version 2 as published
  12 * by the Free Software Foundation.
  13 */
  14
  15#include <crypto/md5.h>
  16#include <crypto/sha.h>
  17
  18#include "cesa.h"
  19
  20struct mv_cesa_ahash_dma_iter {
  21        struct mv_cesa_dma_iter base;
  22        struct mv_cesa_sg_dma_iter src;
  23};
  24
  25static inline void
  26mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter,
  27                            struct ahash_request *req)
  28{
  29        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
  30        unsigned int len = req->nbytes + creq->cache_ptr;
  31
  32        if (!creq->last_req)
  33                len &= ~CESA_HASH_BLOCK_SIZE_MSK;
  34
  35        mv_cesa_req_dma_iter_init(&iter->base, len);
  36        mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
  37        iter->src.op_offset = creq->cache_ptr;
  38}
  39
  40static inline bool
  41mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter)
  42{
  43        iter->src.op_offset = 0;
  44
  45        return mv_cesa_req_dma_iter_next_op(&iter->base);
  46}
  47
  48static inline int
  49mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags)
  50{
  51        req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags,
  52                                    &req->cache_dma);
  53        if (!req->cache)
  54                return -ENOMEM;
  55
  56        return 0;
  57}
  58
  59static inline void
  60mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req)
  61{
  62        if (!req->cache)
  63                return;
  64
  65        dma_pool_free(cesa_dev->dma->cache_pool, req->cache,
  66                      req->cache_dma);
  67}
  68
  69static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req,
  70                                           gfp_t flags)
  71{
  72        if (req->padding)
  73                return 0;
  74
  75        req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags,
  76                                      &req->padding_dma);
  77        if (!req->padding)
  78                return -ENOMEM;
  79
  80        return 0;
  81}
  82
  83static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req)
  84{
  85        if (!req->padding)
  86                return;
  87
  88        dma_pool_free(cesa_dev->dma->padding_pool, req->padding,
  89                      req->padding_dma);
  90        req->padding = NULL;
  91}
  92
  93static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req)
  94{
  95        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
  96
  97        mv_cesa_ahash_dma_free_padding(&creq->req.dma);
  98}
  99
 100static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
 101{
 102        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 103
 104        dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
 105        mv_cesa_ahash_dma_free_cache(&creq->req.dma);
 106        mv_cesa_dma_cleanup(&creq->base);
 107}
 108
 109static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
 110{
 111        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 112
 113        if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
 114                mv_cesa_ahash_dma_cleanup(req);
 115}
 116
 117static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
 118{
 119        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 120
 121        if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
 122                mv_cesa_ahash_dma_last_cleanup(req);
 123}
 124
 125static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
 126{
 127        unsigned int index, padlen;
 128
 129        index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
 130        padlen = (index < 56) ? (56 - index) : (64 + 56 - index);
 131
 132        return padlen;
 133}
 134
 135static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
 136{
 137        unsigned int index, padlen;
 138
 139        buf[0] = 0x80;
 140        /* Pad out to 56 mod 64 */
 141        index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
 142        padlen = mv_cesa_ahash_pad_len(creq);
 143        memset(buf + 1, 0, padlen - 1);
 144
 145        if (creq->algo_le) {
 146                __le64 bits = cpu_to_le64(creq->len << 3);
 147                memcpy(buf + padlen, &bits, sizeof(bits));
 148        } else {
 149                __be64 bits = cpu_to_be64(creq->len << 3);
 150                memcpy(buf + padlen, &bits, sizeof(bits));
 151        }
 152
 153        return padlen + 8;
 154}
 155
 156static void mv_cesa_ahash_std_step(struct ahash_request *req)
 157{
 158        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 159        struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
 160        struct mv_cesa_engine *engine = creq->base.engine;
 161        struct mv_cesa_op_ctx *op;
 162        unsigned int new_cache_ptr = 0;
 163        u32 frag_mode;
 164        size_t  len;
 165        unsigned int digsize;
 166        int i;
 167
 168        mv_cesa_adjust_op(engine, &creq->op_tmpl);
 169        memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
 170
 171        if (!sreq->offset) {
 172                digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
 173                for (i = 0; i < digsize / 4; i++)
 174                        writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
 175        }
 176
 177        if (creq->cache_ptr)
 178                memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
 179                            creq->cache, creq->cache_ptr);
 180
 181        len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
 182                    CESA_SA_SRAM_PAYLOAD_SIZE);
 183
 184        if (!creq->last_req) {
 185                new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK;
 186                len &= ~CESA_HASH_BLOCK_SIZE_MSK;
 187        }
 188
 189        if (len - creq->cache_ptr)
 190                sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents,
 191                                                   engine->sram +
 192                                                   CESA_SA_DATA_SRAM_OFFSET +
 193                                                   creq->cache_ptr,
 194                                                   len - creq->cache_ptr,
 195                                                   sreq->offset);
 196
 197        op = &creq->op_tmpl;
 198
 199        frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK;
 200
 201        if (creq->last_req && sreq->offset == req->nbytes &&
 202            creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
 203                if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
 204                        frag_mode = CESA_SA_DESC_CFG_NOT_FRAG;
 205                else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG)
 206                        frag_mode = CESA_SA_DESC_CFG_LAST_FRAG;
 207        }
 208
 209        if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG ||
 210            frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) {
 211                if (len &&
 212                    creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
 213                        mv_cesa_set_mac_op_total_len(op, creq->len);
 214                } else {
 215                        int trailerlen = mv_cesa_ahash_pad_len(creq) + 8;
 216
 217                        if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) {
 218                                len &= CESA_HASH_BLOCK_SIZE_MSK;
 219                                new_cache_ptr = 64 - trailerlen;
 220                                memcpy_fromio(creq->cache,
 221                                              engine->sram +
 222                                              CESA_SA_DATA_SRAM_OFFSET + len,
 223                                              new_cache_ptr);
 224                        } else {
 225                                len += mv_cesa_ahash_pad_req(creq,
 226                                                engine->sram + len +
 227                                                CESA_SA_DATA_SRAM_OFFSET);
 228                        }
 229
 230                        if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG)
 231                                frag_mode = CESA_SA_DESC_CFG_MID_FRAG;
 232                        else
 233                                frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG;
 234                }
 235        }
 236
 237        mv_cesa_set_mac_op_frag_len(op, len);
 238        mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK);
 239
 240        /* FIXME: only update enc_len field */
 241        memcpy_toio(engine->sram, op, sizeof(*op));
 242
 243        if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
 244                mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
 245                                      CESA_SA_DESC_CFG_FRAG_MSK);
 246
 247        creq->cache_ptr = new_cache_ptr;
 248
 249        mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
 250        writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
 251        BUG_ON(readl(engine->regs + CESA_SA_CMD) &
 252               CESA_SA_CMD_EN_CESA_SA_ACCL0);
 253        writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
 254}
 255
 256static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status)
 257{
 258        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 259        struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
 260
 261        if (sreq->offset < (req->nbytes - creq->cache_ptr))
 262                return -EINPROGRESS;
 263
 264        return 0;
 265}
 266
 267static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req)
 268{
 269        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 270        struct mv_cesa_req *basereq = &creq->base;
 271
 272        mv_cesa_dma_prepare(basereq, basereq->engine);
 273}
 274
 275static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
 276{
 277        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 278        struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
 279
 280        sreq->offset = 0;
 281}
 282
 283static void mv_cesa_ahash_dma_step(struct ahash_request *req)
 284{
 285        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 286        struct mv_cesa_req *base = &creq->base;
 287
 288        /* We must explicitly set the digest state. */
 289        if (base->chain.first->flags & CESA_TDMA_SET_STATE) {
 290                struct mv_cesa_engine *engine = base->engine;
 291                int i;
 292
 293                /* Set the hash state in the IVDIG regs. */
 294                for (i = 0; i < ARRAY_SIZE(creq->state); i++)
 295                        writel_relaxed(creq->state[i], engine->regs +
 296                                       CESA_IVDIG(i));
 297        }
 298
 299        mv_cesa_dma_step(base);
 300}
 301
 302static void mv_cesa_ahash_step(struct crypto_async_request *req)
 303{
 304        struct ahash_request *ahashreq = ahash_request_cast(req);
 305        struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
 306
 307        if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
 308                mv_cesa_ahash_dma_step(ahashreq);
 309        else
 310                mv_cesa_ahash_std_step(ahashreq);
 311}
 312
 313static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
 314{
 315        struct ahash_request *ahashreq = ahash_request_cast(req);
 316        struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
 317
 318        if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
 319                return mv_cesa_dma_process(&creq->base, status);
 320
 321        return mv_cesa_ahash_std_process(ahashreq, status);
 322}
 323
 324static void mv_cesa_ahash_complete(struct crypto_async_request *req)
 325{
 326        struct ahash_request *ahashreq = ahash_request_cast(req);
 327        struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
 328        struct mv_cesa_engine *engine = creq->base.engine;
 329        unsigned int digsize;
 330        int i;
 331
 332        digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
 333
 334        if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
 335            (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_RESULT) {
 336                __le32 *data = NULL;
 337
 338                /*
 339                 * Result is already in the correct endianess when the SA is
 340                 * used
 341                 */
 342                data = creq->base.chain.last->op->ctx.hash.hash;
 343                for (i = 0; i < digsize / 4; i++)
 344                        creq->state[i] = cpu_to_le32(data[i]);
 345
 346                memcpy(ahashreq->result, data, digsize);
 347        } else {
 348                for (i = 0; i < digsize / 4; i++)
 349                        creq->state[i] = readl_relaxed(engine->regs +
 350                                                       CESA_IVDIG(i));
 351                if (creq->last_req) {
 352                        /*
 353                        * Hardware's MD5 digest is in little endian format, but
 354                        * SHA in big endian format
 355                        */
 356                        if (creq->algo_le) {
 357                                __le32 *result = (void *)ahashreq->result;
 358
 359                                for (i = 0; i < digsize / 4; i++)
 360                                        result[i] = cpu_to_le32(creq->state[i]);
 361                        } else {
 362                                __be32 *result = (void *)ahashreq->result;
 363
 364                                for (i = 0; i < digsize / 4; i++)
 365                                        result[i] = cpu_to_be32(creq->state[i]);
 366                        }
 367                }
 368        }
 369
 370        atomic_sub(ahashreq->nbytes, &engine->load);
 371}
 372
 373static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
 374                                  struct mv_cesa_engine *engine)
 375{
 376        struct ahash_request *ahashreq = ahash_request_cast(req);
 377        struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
 378
 379        creq->base.engine = engine;
 380
 381        if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
 382                mv_cesa_ahash_dma_prepare(ahashreq);
 383        else
 384                mv_cesa_ahash_std_prepare(ahashreq);
 385}
 386
 387static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
 388{
 389        struct ahash_request *ahashreq = ahash_request_cast(req);
 390        struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
 391
 392        if (creq->last_req)
 393                mv_cesa_ahash_last_cleanup(ahashreq);
 394
 395        mv_cesa_ahash_cleanup(ahashreq);
 396
 397        if (creq->cache_ptr)
 398                sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
 399                                   creq->cache,
 400                                   creq->cache_ptr,
 401                                   ahashreq->nbytes - creq->cache_ptr);
 402}
 403
 404static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {
 405        .step = mv_cesa_ahash_step,
 406        .process = mv_cesa_ahash_process,
 407        .cleanup = mv_cesa_ahash_req_cleanup,
 408        .complete = mv_cesa_ahash_complete,
 409};
 410
 411static void mv_cesa_ahash_init(struct ahash_request *req,
 412                              struct mv_cesa_op_ctx *tmpl, bool algo_le)
 413{
 414        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 415
 416        memset(creq, 0, sizeof(*creq));
 417        mv_cesa_update_op_cfg(tmpl,
 418                              CESA_SA_DESC_CFG_OP_MAC_ONLY |
 419                              CESA_SA_DESC_CFG_FIRST_FRAG,
 420                              CESA_SA_DESC_CFG_OP_MSK |
 421                              CESA_SA_DESC_CFG_FRAG_MSK);
 422        mv_cesa_set_mac_op_total_len(tmpl, 0);
 423        mv_cesa_set_mac_op_frag_len(tmpl, 0);
 424        creq->op_tmpl = *tmpl;
 425        creq->len = 0;
 426        creq->algo_le = algo_le;
 427}
 428
 429static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm)
 430{
 431        struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 432
 433        ctx->base.ops = &mv_cesa_ahash_req_ops;
 434
 435        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 436                                 sizeof(struct mv_cesa_ahash_req));
 437        return 0;
 438}
 439
 440static bool mv_cesa_ahash_cache_req(struct ahash_request *req)
 441{
 442        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 443        bool cached = false;
 444
 445        if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE && !creq->last_req) {
 446                cached = true;
 447
 448                if (!req->nbytes)
 449                        return cached;
 450
 451                sg_pcopy_to_buffer(req->src, creq->src_nents,
 452                                   creq->cache + creq->cache_ptr,
 453                                   req->nbytes, 0);
 454
 455                creq->cache_ptr += req->nbytes;
 456        }
 457
 458        return cached;
 459}
 460
 461static struct mv_cesa_op_ctx *
 462mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain,
 463                     struct mv_cesa_op_ctx *tmpl, unsigned int frag_len,
 464                     gfp_t flags)
 465{
 466        struct mv_cesa_op_ctx *op;
 467        int ret;
 468
 469        op = mv_cesa_dma_add_op(chain, tmpl, false, flags);
 470        if (IS_ERR(op))
 471                return op;
 472
 473        /* Set the operation block fragment length. */
 474        mv_cesa_set_mac_op_frag_len(op, frag_len);
 475
 476        /* Append dummy desc to launch operation */
 477        ret = mv_cesa_dma_add_dummy_launch(chain, flags);
 478        if (ret)
 479                return ERR_PTR(ret);
 480
 481        if (mv_cesa_mac_op_is_first_frag(tmpl))
 482                mv_cesa_update_op_cfg(tmpl,
 483                                      CESA_SA_DESC_CFG_MID_FRAG,
 484                                      CESA_SA_DESC_CFG_FRAG_MSK);
 485
 486        return op;
 487}
 488
 489static int
 490mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain,
 491                            struct mv_cesa_ahash_req *creq,
 492                            gfp_t flags)
 493{
 494        struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
 495        int ret;
 496
 497        if (!creq->cache_ptr)
 498                return 0;
 499
 500        ret = mv_cesa_ahash_dma_alloc_cache(ahashdreq, flags);
 501        if (ret)
 502                return ret;
 503
 504        memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr);
 505
 506        return mv_cesa_dma_add_data_transfer(chain,
 507                                             CESA_SA_DATA_SRAM_OFFSET,
 508                                             ahashdreq->cache_dma,
 509                                             creq->cache_ptr,
 510                                             CESA_TDMA_DST_IN_SRAM,
 511                                             flags);
 512}
 513
 514static struct mv_cesa_op_ctx *
 515mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
 516                           struct mv_cesa_ahash_dma_iter *dma_iter,
 517                           struct mv_cesa_ahash_req *creq,
 518                           unsigned int frag_len, gfp_t flags)
 519{
 520        struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
 521        unsigned int len, trailerlen, padoff = 0;
 522        struct mv_cesa_op_ctx *op;
 523        int ret;
 524
 525        /*
 526         * If the transfer is smaller than our maximum length, and we have
 527         * some data outstanding, we can ask the engine to finish the hash.
 528         */
 529        if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) {
 530                op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len,
 531                                          flags);
 532                if (IS_ERR(op))
 533                        return op;
 534
 535                mv_cesa_set_mac_op_total_len(op, creq->len);
 536                mv_cesa_update_op_cfg(op, mv_cesa_mac_op_is_first_frag(op) ?
 537                                                CESA_SA_DESC_CFG_NOT_FRAG :
 538                                                CESA_SA_DESC_CFG_LAST_FRAG,
 539                                      CESA_SA_DESC_CFG_FRAG_MSK);
 540
 541                ret = mv_cesa_dma_add_result_op(chain,
 542                                                CESA_SA_CFG_SRAM_OFFSET,
 543                                                CESA_SA_DATA_SRAM_OFFSET,
 544                                                CESA_TDMA_SRC_IN_SRAM, flags);
 545                if (ret)
 546                        return ERR_PTR(-ENOMEM);
 547                return op;
 548        }
 549
 550        /*
 551         * The request is longer than the engine can handle, or we have
 552         * no data outstanding. Manually generate the padding, adding it
 553         * as a "mid" fragment.
 554         */
 555        ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags);
 556        if (ret)
 557                return ERR_PTR(ret);
 558
 559        trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding);
 560
 561        len = min(CESA_SA_SRAM_PAYLOAD_SIZE - frag_len, trailerlen);
 562        if (len) {
 563                ret = mv_cesa_dma_add_data_transfer(chain,
 564                                                CESA_SA_DATA_SRAM_OFFSET +
 565                                                frag_len,
 566                                                ahashdreq->padding_dma,
 567                                                len, CESA_TDMA_DST_IN_SRAM,
 568                                                flags);
 569                if (ret)
 570                        return ERR_PTR(ret);
 571
 572                op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len,
 573                                          flags);
 574                if (IS_ERR(op))
 575                        return op;
 576
 577                if (len == trailerlen)
 578                        return op;
 579
 580                padoff += len;
 581        }
 582
 583        ret = mv_cesa_dma_add_data_transfer(chain,
 584                                            CESA_SA_DATA_SRAM_OFFSET,
 585                                            ahashdreq->padding_dma +
 586                                            padoff,
 587                                            trailerlen - padoff,
 588                                            CESA_TDMA_DST_IN_SRAM,
 589                                            flags);
 590        if (ret)
 591                return ERR_PTR(ret);
 592
 593        return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff,
 594                                    flags);
 595}
 596
 597static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
 598{
 599        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 600        gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
 601                      GFP_KERNEL : GFP_ATOMIC;
 602        struct mv_cesa_req *basereq = &creq->base;
 603        struct mv_cesa_ahash_dma_iter iter;
 604        struct mv_cesa_op_ctx *op = NULL;
 605        unsigned int frag_len;
 606        bool set_state = false;
 607        int ret;
 608        u32 type;
 609
 610        basereq->chain.first = NULL;
 611        basereq->chain.last = NULL;
 612
 613        if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
 614                set_state = true;
 615
 616        if (creq->src_nents) {
 617                ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
 618                                 DMA_TO_DEVICE);
 619                if (!ret) {
 620                        ret = -ENOMEM;
 621                        goto err;
 622                }
 623        }
 624
 625        mv_cesa_tdma_desc_iter_init(&basereq->chain);
 626        mv_cesa_ahash_req_iter_init(&iter, req);
 627
 628        /*
 629         * Add the cache (left-over data from a previous block) first.
 630         * This will never overflow the SRAM size.
 631         */
 632        ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, creq, flags);
 633        if (ret)
 634                goto err_free_tdma;
 635
 636        if (iter.src.sg) {
 637                /*
 638                 * Add all the new data, inserting an operation block and
 639                 * launch command between each full SRAM block-worth of
 640                 * data. We intentionally do not add the final op block.
 641                 */
 642                while (true) {
 643                        ret = mv_cesa_dma_add_op_transfers(&basereq->chain,
 644                                                           &iter.base,
 645                                                           &iter.src, flags);
 646                        if (ret)
 647                                goto err_free_tdma;
 648
 649                        frag_len = iter.base.op_len;
 650
 651                        if (!mv_cesa_ahash_req_iter_next_op(&iter))
 652                                break;
 653
 654                        op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
 655                                                  frag_len, flags);
 656                        if (IS_ERR(op)) {
 657                                ret = PTR_ERR(op);
 658                                goto err_free_tdma;
 659                        }
 660                }
 661        } else {
 662                /* Account for the data that was in the cache. */
 663                frag_len = iter.base.op_len;
 664        }
 665
 666        /*
 667         * At this point, frag_len indicates whether we have any data
 668         * outstanding which needs an operation.  Queue up the final
 669         * operation, which depends whether this is the final request.
 670         */
 671        if (creq->last_req)
 672                op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq,
 673                                                frag_len, flags);
 674        else if (frag_len)
 675                op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
 676                                          frag_len, flags);
 677
 678        if (IS_ERR(op)) {
 679                ret = PTR_ERR(op);
 680                goto err_free_tdma;
 681        }
 682
 683        /*
 684         * If results are copied via DMA, this means that this
 685         * request can be directly processed by the engine,
 686         * without partial updates. So we can chain it at the
 687         * DMA level with other requests.
 688         */
 689        type = basereq->chain.last->flags & CESA_TDMA_TYPE_MSK;
 690
 691        if (op && type != CESA_TDMA_RESULT) {
 692                /* Add dummy desc to wait for crypto operation end */
 693                ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags);
 694                if (ret)
 695                        goto err_free_tdma;
 696        }
 697
 698        if (!creq->last_req)
 699                creq->cache_ptr = req->nbytes + creq->cache_ptr -
 700                                  iter.base.len;
 701        else
 702                creq->cache_ptr = 0;
 703
 704        basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
 705
 706        if (type != CESA_TDMA_RESULT)
 707                basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
 708
 709        if (set_state) {
 710                /*
 711                 * Put the CESA_TDMA_SET_STATE flag on the first tdma desc to
 712                 * let the step logic know that the IVDIG registers should be
 713                 * explicitly set before launching a TDMA chain.
 714                 */
 715                basereq->chain.first->flags |= CESA_TDMA_SET_STATE;
 716        }
 717
 718        return 0;
 719
 720err_free_tdma:
 721        mv_cesa_dma_cleanup(basereq);
 722        dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
 723
 724err:
 725        mv_cesa_ahash_last_cleanup(req);
 726
 727        return ret;
 728}
 729
 730static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
 731{
 732        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 733
 734        creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
 735        if (creq->src_nents < 0) {
 736                dev_err(cesa_dev->dev, "Invalid number of src SG");
 737                return creq->src_nents;
 738        }
 739
 740        *cached = mv_cesa_ahash_cache_req(req);
 741
 742        if (*cached)
 743                return 0;
 744
 745        if (cesa_dev->caps->has_tdma)
 746                return mv_cesa_ahash_dma_req_init(req);
 747        else
 748                return 0;
 749}
 750
 751static int mv_cesa_ahash_queue_req(struct ahash_request *req)
 752{
 753        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 754        struct mv_cesa_engine *engine;
 755        bool cached = false;
 756        int ret;
 757
 758        ret = mv_cesa_ahash_req_init(req, &cached);
 759        if (ret)
 760                return ret;
 761
 762        if (cached)
 763                return 0;
 764
 765        engine = mv_cesa_select_engine(req->nbytes);
 766        mv_cesa_ahash_prepare(&req->base, engine);
 767
 768        ret = mv_cesa_queue_req(&req->base, &creq->base);
 769
 770        if (mv_cesa_req_needs_cleanup(&req->base, ret))
 771                mv_cesa_ahash_cleanup(req);
 772
 773        return ret;
 774}
 775
 776static int mv_cesa_ahash_update(struct ahash_request *req)
 777{
 778        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 779
 780        creq->len += req->nbytes;
 781
 782        return mv_cesa_ahash_queue_req(req);
 783}
 784
 785static int mv_cesa_ahash_final(struct ahash_request *req)
 786{
 787        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 788        struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
 789
 790        mv_cesa_set_mac_op_total_len(tmpl, creq->len);
 791        creq->last_req = true;
 792        req->nbytes = 0;
 793
 794        return mv_cesa_ahash_queue_req(req);
 795}
 796
 797static int mv_cesa_ahash_finup(struct ahash_request *req)
 798{
 799        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 800        struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
 801
 802        creq->len += req->nbytes;
 803        mv_cesa_set_mac_op_total_len(tmpl, creq->len);
 804        creq->last_req = true;
 805
 806        return mv_cesa_ahash_queue_req(req);
 807}
 808
 809static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
 810                                u64 *len, void *cache)
 811{
 812        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
 813        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 814        unsigned int digsize = crypto_ahash_digestsize(ahash);
 815        unsigned int blocksize;
 816
 817        blocksize = crypto_ahash_blocksize(ahash);
 818
 819        *len = creq->len;
 820        memcpy(hash, creq->state, digsize);
 821        memset(cache, 0, blocksize);
 822        memcpy(cache, creq->cache, creq->cache_ptr);
 823
 824        return 0;
 825}
 826
 827static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash,
 828                                u64 len, const void *cache)
 829{
 830        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
 831        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 832        unsigned int digsize = crypto_ahash_digestsize(ahash);
 833        unsigned int blocksize;
 834        unsigned int cache_ptr;
 835        int ret;
 836
 837        ret = crypto_ahash_init(req);
 838        if (ret)
 839                return ret;
 840
 841        blocksize = crypto_ahash_blocksize(ahash);
 842        if (len >= blocksize)
 843                mv_cesa_update_op_cfg(&creq->op_tmpl,
 844                                      CESA_SA_DESC_CFG_MID_FRAG,
 845                                      CESA_SA_DESC_CFG_FRAG_MSK);
 846
 847        creq->len = len;
 848        memcpy(creq->state, hash, digsize);
 849        creq->cache_ptr = 0;
 850
 851        cache_ptr = do_div(len, blocksize);
 852        if (!cache_ptr)
 853                return 0;
 854
 855        memcpy(creq->cache, cache, cache_ptr);
 856        creq->cache_ptr = cache_ptr;
 857
 858        return 0;
 859}
 860
 861static int mv_cesa_md5_init(struct ahash_request *req)
 862{
 863        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 864        struct mv_cesa_op_ctx tmpl = { };
 865
 866        mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5);
 867
 868        mv_cesa_ahash_init(req, &tmpl, true);
 869
 870        creq->state[0] = MD5_H0;
 871        creq->state[1] = MD5_H1;
 872        creq->state[2] = MD5_H2;
 873        creq->state[3] = MD5_H3;
 874
 875        return 0;
 876}
 877
 878static int mv_cesa_md5_export(struct ahash_request *req, void *out)
 879{
 880        struct md5_state *out_state = out;
 881
 882        return mv_cesa_ahash_export(req, out_state->hash,
 883                                    &out_state->byte_count, out_state->block);
 884}
 885
 886static int mv_cesa_md5_import(struct ahash_request *req, const void *in)
 887{
 888        const struct md5_state *in_state = in;
 889
 890        return mv_cesa_ahash_import(req, in_state->hash, in_state->byte_count,
 891                                    in_state->block);
 892}
 893
 894static int mv_cesa_md5_digest(struct ahash_request *req)
 895{
 896        int ret;
 897
 898        ret = mv_cesa_md5_init(req);
 899        if (ret)
 900                return ret;
 901
 902        return mv_cesa_ahash_finup(req);
 903}
 904
 905struct ahash_alg mv_md5_alg = {
 906        .init = mv_cesa_md5_init,
 907        .update = mv_cesa_ahash_update,
 908        .final = mv_cesa_ahash_final,
 909        .finup = mv_cesa_ahash_finup,
 910        .digest = mv_cesa_md5_digest,
 911        .export = mv_cesa_md5_export,
 912        .import = mv_cesa_md5_import,
 913        .halg = {
 914                .digestsize = MD5_DIGEST_SIZE,
 915                .statesize = sizeof(struct md5_state),
 916                .base = {
 917                        .cra_name = "md5",
 918                        .cra_driver_name = "mv-md5",
 919                        .cra_priority = 300,
 920                        .cra_flags = CRYPTO_ALG_ASYNC |
 921                                     CRYPTO_ALG_KERN_DRIVER_ONLY,
 922                        .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
 923                        .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
 924                        .cra_init = mv_cesa_ahash_cra_init,
 925                        .cra_module = THIS_MODULE,
 926                 }
 927        }
 928};
 929
 930static int mv_cesa_sha1_init(struct ahash_request *req)
 931{
 932        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 933        struct mv_cesa_op_ctx tmpl = { };
 934
 935        mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1);
 936
 937        mv_cesa_ahash_init(req, &tmpl, false);
 938
 939        creq->state[0] = SHA1_H0;
 940        creq->state[1] = SHA1_H1;
 941        creq->state[2] = SHA1_H2;
 942        creq->state[3] = SHA1_H3;
 943        creq->state[4] = SHA1_H4;
 944
 945        return 0;
 946}
 947
 948static int mv_cesa_sha1_export(struct ahash_request *req, void *out)
 949{
 950        struct sha1_state *out_state = out;
 951
 952        return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
 953                                    out_state->buffer);
 954}
 955
 956static int mv_cesa_sha1_import(struct ahash_request *req, const void *in)
 957{
 958        const struct sha1_state *in_state = in;
 959
 960        return mv_cesa_ahash_import(req, in_state->state, in_state->count,
 961                                    in_state->buffer);
 962}
 963
 964static int mv_cesa_sha1_digest(struct ahash_request *req)
 965{
 966        int ret;
 967
 968        ret = mv_cesa_sha1_init(req);
 969        if (ret)
 970                return ret;
 971
 972        return mv_cesa_ahash_finup(req);
 973}
 974
 975struct ahash_alg mv_sha1_alg = {
 976        .init = mv_cesa_sha1_init,
 977        .update = mv_cesa_ahash_update,
 978        .final = mv_cesa_ahash_final,
 979        .finup = mv_cesa_ahash_finup,
 980        .digest = mv_cesa_sha1_digest,
 981        .export = mv_cesa_sha1_export,
 982        .import = mv_cesa_sha1_import,
 983        .halg = {
 984                .digestsize = SHA1_DIGEST_SIZE,
 985                .statesize = sizeof(struct sha1_state),
 986                .base = {
 987                        .cra_name = "sha1",
 988                        .cra_driver_name = "mv-sha1",
 989                        .cra_priority = 300,
 990                        .cra_flags = CRYPTO_ALG_ASYNC |
 991                                     CRYPTO_ALG_KERN_DRIVER_ONLY,
 992                        .cra_blocksize = SHA1_BLOCK_SIZE,
 993                        .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
 994                        .cra_init = mv_cesa_ahash_cra_init,
 995                        .cra_module = THIS_MODULE,
 996                 }
 997        }
 998};
 999
1000static int mv_cesa_sha256_init(struct ahash_request *req)
1001{
1002        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
1003        struct mv_cesa_op_ctx tmpl = { };
1004
1005        mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256);
1006
1007        mv_cesa_ahash_init(req, &tmpl, false);
1008
1009        creq->state[0] = SHA256_H0;
1010        creq->state[1] = SHA256_H1;
1011        creq->state[2] = SHA256_H2;
1012        creq->state[3] = SHA256_H3;
1013        creq->state[4] = SHA256_H4;
1014        creq->state[5] = SHA256_H5;
1015        creq->state[6] = SHA256_H6;
1016        creq->state[7] = SHA256_H7;
1017
1018        return 0;
1019}
1020
1021static int mv_cesa_sha256_digest(struct ahash_request *req)
1022{
1023        int ret;
1024
1025        ret = mv_cesa_sha256_init(req);
1026        if (ret)
1027                return ret;
1028
1029        return mv_cesa_ahash_finup(req);
1030}
1031
1032static int mv_cesa_sha256_export(struct ahash_request *req, void *out)
1033{
1034        struct sha256_state *out_state = out;
1035
1036        return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
1037                                    out_state->buf);
1038}
1039
1040static int mv_cesa_sha256_import(struct ahash_request *req, const void *in)
1041{
1042        const struct sha256_state *in_state = in;
1043
1044        return mv_cesa_ahash_import(req, in_state->state, in_state->count,
1045                                    in_state->buf);
1046}
1047
1048struct ahash_alg mv_sha256_alg = {
1049        .init = mv_cesa_sha256_init,
1050        .update = mv_cesa_ahash_update,
1051        .final = mv_cesa_ahash_final,
1052        .finup = mv_cesa_ahash_finup,
1053        .digest = mv_cesa_sha256_digest,
1054        .export = mv_cesa_sha256_export,
1055        .import = mv_cesa_sha256_import,
1056        .halg = {
1057                .digestsize = SHA256_DIGEST_SIZE,
1058                .statesize = sizeof(struct sha256_state),
1059                .base = {
1060                        .cra_name = "sha256",
1061                        .cra_driver_name = "mv-sha256",
1062                        .cra_priority = 300,
1063                        .cra_flags = CRYPTO_ALG_ASYNC |
1064                                     CRYPTO_ALG_KERN_DRIVER_ONLY,
1065                        .cra_blocksize = SHA256_BLOCK_SIZE,
1066                        .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
1067                        .cra_init = mv_cesa_ahash_cra_init,
1068                        .cra_module = THIS_MODULE,
1069                 }
1070        }
1071};
1072
1073struct mv_cesa_ahash_result {
1074        struct completion completion;
1075        int error;
1076};
1077
1078static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req,
1079                                        int error)
1080{
1081        struct mv_cesa_ahash_result *result = req->data;
1082
1083        if (error == -EINPROGRESS)
1084                return;
1085
1086        result->error = error;
1087        complete(&result->completion);
1088}
1089
1090static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad,
1091                                       void *state, unsigned int blocksize)
1092{
1093        struct mv_cesa_ahash_result result;
1094        struct scatterlist sg;
1095        int ret;
1096
1097        ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1098                                   mv_cesa_hmac_ahash_complete, &result);
1099        sg_init_one(&sg, pad, blocksize);
1100        ahash_request_set_crypt(req, &sg, pad, blocksize);
1101        init_completion(&result.completion);
1102
1103        ret = crypto_ahash_init(req);
1104        if (ret)
1105                return ret;
1106
1107        ret = crypto_ahash_update(req);
1108        if (ret && ret != -EINPROGRESS)
1109                return ret;
1110
1111        wait_for_completion_interruptible(&result.completion);
1112        if (result.error)
1113                return result.error;
1114
1115        ret = crypto_ahash_export(req, state);
1116        if (ret)
1117                return ret;
1118
1119        return 0;
1120}
1121
1122static int mv_cesa_ahmac_pad_init(struct ahash_request *req,
1123                                  const u8 *key, unsigned int keylen,
1124                                  u8 *ipad, u8 *opad,
1125                                  unsigned int blocksize)
1126{
1127        struct mv_cesa_ahash_result result;
1128        struct scatterlist sg;
1129        int ret;
1130        int i;
1131
1132        if (keylen <= blocksize) {
1133                memcpy(ipad, key, keylen);
1134        } else {
1135                u8 *keydup = kmemdup(key, keylen, GFP_KERNEL);
1136
1137                if (!keydup)
1138                        return -ENOMEM;
1139
1140                ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1141                                           mv_cesa_hmac_ahash_complete,
1142                                           &result);
1143                sg_init_one(&sg, keydup, keylen);
1144                ahash_request_set_crypt(req, &sg, ipad, keylen);
1145                init_completion(&result.completion);
1146
1147                ret = crypto_ahash_digest(req);
1148                if (ret == -EINPROGRESS) {
1149                        wait_for_completion_interruptible(&result.completion);
1150                        ret = result.error;
1151                }
1152
1153                /* Set the memory region to 0 to avoid any leak. */
1154                memset(keydup, 0, keylen);
1155                kfree(keydup);
1156
1157                if (ret)
1158                        return ret;
1159
1160                keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
1161        }
1162
1163        memset(ipad + keylen, 0, blocksize - keylen);
1164        memcpy(opad, ipad, blocksize);
1165
1166        for (i = 0; i < blocksize; i++) {
1167                ipad[i] ^= 0x36;
1168                opad[i] ^= 0x5c;
1169        }
1170
1171        return 0;
1172}
1173
1174static int mv_cesa_ahmac_setkey(const char *hash_alg_name,
1175                                const u8 *key, unsigned int keylen,
1176                                void *istate, void *ostate)
1177{
1178        struct ahash_request *req;
1179        struct crypto_ahash *tfm;
1180        unsigned int blocksize;
1181        u8 *ipad = NULL;
1182        u8 *opad;
1183        int ret;
1184
1185        tfm = crypto_alloc_ahash(hash_alg_name, CRYPTO_ALG_TYPE_AHASH,
1186                                 CRYPTO_ALG_TYPE_AHASH_MASK);
1187        if (IS_ERR(tfm))
1188                return PTR_ERR(tfm);
1189
1190        req = ahash_request_alloc(tfm, GFP_KERNEL);
1191        if (!req) {
1192                ret = -ENOMEM;
1193                goto free_ahash;
1194        }
1195
1196        crypto_ahash_clear_flags(tfm, ~0);
1197
1198        blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1199
1200        ipad = kzalloc(2 * blocksize, GFP_KERNEL);
1201        if (!ipad) {
1202                ret = -ENOMEM;
1203                goto free_req;
1204        }
1205
1206        opad = ipad + blocksize;
1207
1208        ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize);
1209        if (ret)
1210                goto free_ipad;
1211
1212        ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize);
1213        if (ret)
1214                goto free_ipad;
1215
1216        ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize);
1217
1218free_ipad:
1219        kfree(ipad);
1220free_req:
1221        ahash_request_free(req);
1222free_ahash:
1223        crypto_free_ahash(tfm);
1224
1225        return ret;
1226}
1227
1228static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm)
1229{
1230        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm);
1231
1232        ctx->base.ops = &mv_cesa_ahash_req_ops;
1233
1234        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1235                                 sizeof(struct mv_cesa_ahash_req));
1236        return 0;
1237}
1238
1239static int mv_cesa_ahmac_md5_init(struct ahash_request *req)
1240{
1241        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1242        struct mv_cesa_op_ctx tmpl = { };
1243
1244        mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5);
1245        memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1246
1247        mv_cesa_ahash_init(req, &tmpl, true);
1248
1249        return 0;
1250}
1251
1252static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
1253                                    unsigned int keylen)
1254{
1255        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1256        struct md5_state istate, ostate;
1257        int ret, i;
1258
1259        ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate);
1260        if (ret)
1261                return ret;
1262
1263        for (i = 0; i < ARRAY_SIZE(istate.hash); i++)
1264                ctx->iv[i] = be32_to_cpu(istate.hash[i]);
1265
1266        for (i = 0; i < ARRAY_SIZE(ostate.hash); i++)
1267                ctx->iv[i + 8] = be32_to_cpu(ostate.hash[i]);
1268
1269        return 0;
1270}
1271
1272static int mv_cesa_ahmac_md5_digest(struct ahash_request *req)
1273{
1274        int ret;
1275
1276        ret = mv_cesa_ahmac_md5_init(req);
1277        if (ret)
1278                return ret;
1279
1280        return mv_cesa_ahash_finup(req);
1281}
1282
1283struct ahash_alg mv_ahmac_md5_alg = {
1284        .init = mv_cesa_ahmac_md5_init,
1285        .update = mv_cesa_ahash_update,
1286        .final = mv_cesa_ahash_final,
1287        .finup = mv_cesa_ahash_finup,
1288        .digest = mv_cesa_ahmac_md5_digest,
1289        .setkey = mv_cesa_ahmac_md5_setkey,
1290        .export = mv_cesa_md5_export,
1291        .import = mv_cesa_md5_import,
1292        .halg = {
1293                .digestsize = MD5_DIGEST_SIZE,
1294                .statesize = sizeof(struct md5_state),
1295                .base = {
1296                        .cra_name = "hmac(md5)",
1297                        .cra_driver_name = "mv-hmac-md5",
1298                        .cra_priority = 300,
1299                        .cra_flags = CRYPTO_ALG_ASYNC |
1300                                     CRYPTO_ALG_KERN_DRIVER_ONLY,
1301                        .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1302                        .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1303                        .cra_init = mv_cesa_ahmac_cra_init,
1304                        .cra_module = THIS_MODULE,
1305                 }
1306        }
1307};
1308
1309static int mv_cesa_ahmac_sha1_init(struct ahash_request *req)
1310{
1311        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1312        struct mv_cesa_op_ctx tmpl = { };
1313
1314        mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1);
1315        memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1316
1317        mv_cesa_ahash_init(req, &tmpl, false);
1318
1319        return 0;
1320}
1321
1322static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
1323                                     unsigned int keylen)
1324{
1325        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1326        struct sha1_state istate, ostate;
1327        int ret, i;
1328
1329        ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate);
1330        if (ret)
1331                return ret;
1332
1333        for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1334                ctx->iv[i] = be32_to_cpu(istate.state[i]);
1335
1336        for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1337                ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]);
1338
1339        return 0;
1340}
1341
1342static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req)
1343{
1344        int ret;
1345
1346        ret = mv_cesa_ahmac_sha1_init(req);
1347        if (ret)
1348                return ret;
1349
1350        return mv_cesa_ahash_finup(req);
1351}
1352
1353struct ahash_alg mv_ahmac_sha1_alg = {
1354        .init = mv_cesa_ahmac_sha1_init,
1355        .update = mv_cesa_ahash_update,
1356        .final = mv_cesa_ahash_final,
1357        .finup = mv_cesa_ahash_finup,
1358        .digest = mv_cesa_ahmac_sha1_digest,
1359        .setkey = mv_cesa_ahmac_sha1_setkey,
1360        .export = mv_cesa_sha1_export,
1361        .import = mv_cesa_sha1_import,
1362        .halg = {
1363                .digestsize = SHA1_DIGEST_SIZE,
1364                .statesize = sizeof(struct sha1_state),
1365                .base = {
1366                        .cra_name = "hmac(sha1)",
1367                        .cra_driver_name = "mv-hmac-sha1",
1368                        .cra_priority = 300,
1369                        .cra_flags = CRYPTO_ALG_ASYNC |
1370                                     CRYPTO_ALG_KERN_DRIVER_ONLY,
1371                        .cra_blocksize = SHA1_BLOCK_SIZE,
1372                        .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1373                        .cra_init = mv_cesa_ahmac_cra_init,
1374                        .cra_module = THIS_MODULE,
1375                 }
1376        }
1377};
1378
1379static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
1380                                       unsigned int keylen)
1381{
1382        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1383        struct sha256_state istate, ostate;
1384        int ret, i;
1385
1386        ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate);
1387        if (ret)
1388                return ret;
1389
1390        for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1391                ctx->iv[i] = be32_to_cpu(istate.state[i]);
1392
1393        for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1394                ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]);
1395
1396        return 0;
1397}
1398
1399static int mv_cesa_ahmac_sha256_init(struct ahash_request *req)
1400{
1401        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1402        struct mv_cesa_op_ctx tmpl = { };
1403
1404        mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256);
1405        memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1406
1407        mv_cesa_ahash_init(req, &tmpl, false);
1408
1409        return 0;
1410}
1411
1412static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req)
1413{
1414        int ret;
1415
1416        ret = mv_cesa_ahmac_sha256_init(req);
1417        if (ret)
1418                return ret;
1419
1420        return mv_cesa_ahash_finup(req);
1421}
1422
1423struct ahash_alg mv_ahmac_sha256_alg = {
1424        .init = mv_cesa_ahmac_sha256_init,
1425        .update = mv_cesa_ahash_update,
1426        .final = mv_cesa_ahash_final,
1427        .finup = mv_cesa_ahash_finup,
1428        .digest = mv_cesa_ahmac_sha256_digest,
1429        .setkey = mv_cesa_ahmac_sha256_setkey,
1430        .export = mv_cesa_sha256_export,
1431        .import = mv_cesa_sha256_import,
1432        .halg = {
1433                .digestsize = SHA256_DIGEST_SIZE,
1434                .statesize = sizeof(struct sha256_state),
1435                .base = {
1436                        .cra_name = "hmac(sha256)",
1437                        .cra_driver_name = "mv-hmac-sha256",
1438                        .cra_priority = 300,
1439                        .cra_flags = CRYPTO_ALG_ASYNC |
1440                                     CRYPTO_ALG_KERN_DRIVER_ONLY,
1441                        .cra_blocksize = SHA256_BLOCK_SIZE,
1442                        .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1443                        .cra_init = mv_cesa_ahmac_cra_init,
1444                        .cra_module = THIS_MODULE,
1445                 }
1446        }
1447};
1448