linux/drivers/crypto/marvell/hash.c
<<
>>
Prefs
   1/*
   2 * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256.
   3 *
   4 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
   5 * Author: Arnaud Ebalard <arno@natisbad.org>
   6 *
   7 * This work is based on an initial version written by
   8 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
   9 *
  10 * This program is free software; you can redistribute it and/or modify it
  11 * under the terms of the GNU General Public License version 2 as published
  12 * by the Free Software Foundation.
  13 */
  14
  15#include <crypto/md5.h>
  16#include <crypto/sha.h>
  17
  18#include "cesa.h"
  19
  20struct mv_cesa_ahash_dma_iter {
  21        struct mv_cesa_dma_iter base;
  22        struct mv_cesa_sg_dma_iter src;
  23};
  24
  25static inline void
  26mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter,
  27                            struct ahash_request *req)
  28{
  29        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
  30        unsigned int len = req->nbytes + creq->cache_ptr;
  31
  32        if (!creq->last_req)
  33                len &= ~CESA_HASH_BLOCK_SIZE_MSK;
  34
  35        mv_cesa_req_dma_iter_init(&iter->base, len);
  36        mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
  37        iter->src.op_offset = creq->cache_ptr;
  38}
  39
  40static inline bool
  41mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter)
  42{
  43        iter->src.op_offset = 0;
  44
  45        return mv_cesa_req_dma_iter_next_op(&iter->base);
  46}
  47
  48static inline int
  49mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags)
  50{
  51        req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags,
  52                                    &req->cache_dma);
  53        if (!req->cache)
  54                return -ENOMEM;
  55
  56        return 0;
  57}
  58
  59static inline void
  60mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req)
  61{
  62        if (!req->cache)
  63                return;
  64
  65        dma_pool_free(cesa_dev->dma->cache_pool, req->cache,
  66                      req->cache_dma);
  67}
  68
  69static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req,
  70                                           gfp_t flags)
  71{
  72        if (req->padding)
  73                return 0;
  74
  75        req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags,
  76                                      &req->padding_dma);
  77        if (!req->padding)
  78                return -ENOMEM;
  79
  80        return 0;
  81}
  82
  83static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req)
  84{
  85        if (!req->padding)
  86                return;
  87
  88        dma_pool_free(cesa_dev->dma->padding_pool, req->padding,
  89                      req->padding_dma);
  90        req->padding = NULL;
  91}
  92
  93static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req)
  94{
  95        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
  96
  97        mv_cesa_ahash_dma_free_padding(&creq->req.dma);
  98}
  99
 100static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
 101{
 102        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 103
 104        dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
 105        mv_cesa_ahash_dma_free_cache(&creq->req.dma);
 106        mv_cesa_dma_cleanup(&creq->req.dma.base);
 107}
 108
 109static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
 110{
 111        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 112
 113        if (creq->req.base.type == CESA_DMA_REQ)
 114                mv_cesa_ahash_dma_cleanup(req);
 115}
 116
 117static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
 118{
 119        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 120
 121        if (creq->req.base.type == CESA_DMA_REQ)
 122                mv_cesa_ahash_dma_last_cleanup(req);
 123}
 124
 125static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
 126{
 127        unsigned int index, padlen;
 128
 129        index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
 130        padlen = (index < 56) ? (56 - index) : (64 + 56 - index);
 131
 132        return padlen;
 133}
 134
 135static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
 136{
 137        unsigned int index, padlen;
 138
 139        buf[0] = 0x80;
 140        /* Pad out to 56 mod 64 */
 141        index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
 142        padlen = mv_cesa_ahash_pad_len(creq);
 143        memset(buf + 1, 0, padlen - 1);
 144
 145        if (creq->algo_le) {
 146                __le64 bits = cpu_to_le64(creq->len << 3);
 147                memcpy(buf + padlen, &bits, sizeof(bits));
 148        } else {
 149                __be64 bits = cpu_to_be64(creq->len << 3);
 150                memcpy(buf + padlen, &bits, sizeof(bits));
 151        }
 152
 153        return padlen + 8;
 154}
 155
 156static void mv_cesa_ahash_std_step(struct ahash_request *req)
 157{
 158        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 159        struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
 160        struct mv_cesa_engine *engine = sreq->base.engine;
 161        struct mv_cesa_op_ctx *op;
 162        unsigned int new_cache_ptr = 0;
 163        u32 frag_mode;
 164        size_t  len;
 165
 166        if (creq->cache_ptr)
 167                memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
 168                            creq->cache, creq->cache_ptr);
 169
 170        len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
 171                    CESA_SA_SRAM_PAYLOAD_SIZE);
 172
 173        if (!creq->last_req) {
 174                new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK;
 175                len &= ~CESA_HASH_BLOCK_SIZE_MSK;
 176        }
 177
 178        if (len - creq->cache_ptr)
 179                sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents,
 180                                                   engine->sram +
 181                                                   CESA_SA_DATA_SRAM_OFFSET +
 182                                                   creq->cache_ptr,
 183                                                   len - creq->cache_ptr,
 184                                                   sreq->offset);
 185
 186        op = &creq->op_tmpl;
 187
 188        frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK;
 189
 190        if (creq->last_req && sreq->offset == req->nbytes &&
 191            creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
 192                if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
 193                        frag_mode = CESA_SA_DESC_CFG_NOT_FRAG;
 194                else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG)
 195                        frag_mode = CESA_SA_DESC_CFG_LAST_FRAG;
 196        }
 197
 198        if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG ||
 199            frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) {
 200                if (len &&
 201                    creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
 202                        mv_cesa_set_mac_op_total_len(op, creq->len);
 203                } else {
 204                        int trailerlen = mv_cesa_ahash_pad_len(creq) + 8;
 205
 206                        if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) {
 207                                len &= CESA_HASH_BLOCK_SIZE_MSK;
 208                                new_cache_ptr = 64 - trailerlen;
 209                                memcpy_fromio(creq->cache,
 210                                              engine->sram +
 211                                              CESA_SA_DATA_SRAM_OFFSET + len,
 212                                              new_cache_ptr);
 213                        } else {
 214                                len += mv_cesa_ahash_pad_req(creq,
 215                                                engine->sram + len +
 216                                                CESA_SA_DATA_SRAM_OFFSET);
 217                        }
 218
 219                        if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG)
 220                                frag_mode = CESA_SA_DESC_CFG_MID_FRAG;
 221                        else
 222                                frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG;
 223                }
 224        }
 225
 226        mv_cesa_set_mac_op_frag_len(op, len);
 227        mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK);
 228
 229        /* FIXME: only update enc_len field */
 230        memcpy_toio(engine->sram, op, sizeof(*op));
 231
 232        if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
 233                mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
 234                                      CESA_SA_DESC_CFG_FRAG_MSK);
 235
 236        creq->cache_ptr = new_cache_ptr;
 237
 238        mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
 239        writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
 240        writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
 241}
 242
 243static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status)
 244{
 245        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 246        struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
 247
 248        if (sreq->offset < (req->nbytes - creq->cache_ptr))
 249                return -EINPROGRESS;
 250
 251        return 0;
 252}
 253
 254static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req)
 255{
 256        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 257        struct mv_cesa_tdma_req *dreq = &creq->req.dma.base;
 258
 259        mv_cesa_dma_prepare(dreq, dreq->base.engine);
 260}
 261
 262static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
 263{
 264        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 265        struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
 266        struct mv_cesa_engine *engine = sreq->base.engine;
 267
 268        sreq->offset = 0;
 269        mv_cesa_adjust_op(engine, &creq->op_tmpl);
 270        memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
 271}
 272
 273static void mv_cesa_ahash_step(struct crypto_async_request *req)
 274{
 275        struct ahash_request *ahashreq = ahash_request_cast(req);
 276        struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
 277
 278        if (creq->req.base.type == CESA_DMA_REQ)
 279                mv_cesa_dma_step(&creq->req.dma.base);
 280        else
 281                mv_cesa_ahash_std_step(ahashreq);
 282}
 283
 284static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
 285{
 286        struct ahash_request *ahashreq = ahash_request_cast(req);
 287        struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
 288        struct mv_cesa_engine *engine = creq->req.base.engine;
 289        unsigned int digsize;
 290        int ret, i;
 291
 292        if (creq->req.base.type == CESA_DMA_REQ)
 293                ret = mv_cesa_dma_process(&creq->req.dma.base, status);
 294        else
 295                ret = mv_cesa_ahash_std_process(ahashreq, status);
 296
 297        if (ret == -EINPROGRESS)
 298                return ret;
 299
 300        digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
 301        for (i = 0; i < digsize / 4; i++)
 302                creq->state[i] = readl_relaxed(engine->regs + CESA_IVDIG(i));
 303
 304        if (creq->cache_ptr)
 305                sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
 306                                   creq->cache,
 307                                   creq->cache_ptr,
 308                                   ahashreq->nbytes - creq->cache_ptr);
 309
 310        if (creq->last_req) {
 311                /*
 312                 * Hardware's MD5 digest is in little endian format, but
 313                 * SHA in big endian format
 314                 */
 315                if (creq->algo_le) {
 316                        __le32 *result = (void *)ahashreq->result;
 317
 318                        for (i = 0; i < digsize / 4; i++)
 319                                result[i] = cpu_to_le32(creq->state[i]);
 320                } else {
 321                        __be32 *result = (void *)ahashreq->result;
 322
 323                        for (i = 0; i < digsize / 4; i++)
 324                                result[i] = cpu_to_be32(creq->state[i]);
 325                }
 326        }
 327
 328        return ret;
 329}
 330
 331static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
 332                                  struct mv_cesa_engine *engine)
 333{
 334        struct ahash_request *ahashreq = ahash_request_cast(req);
 335        struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
 336        unsigned int digsize;
 337        int i;
 338
 339        creq->req.base.engine = engine;
 340
 341        if (creq->req.base.type == CESA_DMA_REQ)
 342                mv_cesa_ahash_dma_prepare(ahashreq);
 343        else
 344                mv_cesa_ahash_std_prepare(ahashreq);
 345
 346        digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
 347        for (i = 0; i < digsize / 4; i++)
 348                writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
 349}
 350
 351static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
 352{
 353        struct ahash_request *ahashreq = ahash_request_cast(req);
 354        struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
 355
 356        if (creq->last_req)
 357                mv_cesa_ahash_last_cleanup(ahashreq);
 358
 359        mv_cesa_ahash_cleanup(ahashreq);
 360}
 361
 362static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {
 363        .step = mv_cesa_ahash_step,
 364        .process = mv_cesa_ahash_process,
 365        .prepare = mv_cesa_ahash_prepare,
 366        .cleanup = mv_cesa_ahash_req_cleanup,
 367};
 368
 369static int mv_cesa_ahash_init(struct ahash_request *req,
 370                              struct mv_cesa_op_ctx *tmpl, bool algo_le)
 371{
 372        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 373
 374        memset(creq, 0, sizeof(*creq));
 375        mv_cesa_update_op_cfg(tmpl,
 376                              CESA_SA_DESC_CFG_OP_MAC_ONLY |
 377                              CESA_SA_DESC_CFG_FIRST_FRAG,
 378                              CESA_SA_DESC_CFG_OP_MSK |
 379                              CESA_SA_DESC_CFG_FRAG_MSK);
 380        mv_cesa_set_mac_op_total_len(tmpl, 0);
 381        mv_cesa_set_mac_op_frag_len(tmpl, 0);
 382        creq->op_tmpl = *tmpl;
 383        creq->len = 0;
 384        creq->algo_le = algo_le;
 385
 386        return 0;
 387}
 388
 389static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm)
 390{
 391        struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 392
 393        ctx->base.ops = &mv_cesa_ahash_req_ops;
 394
 395        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 396                                 sizeof(struct mv_cesa_ahash_req));
 397        return 0;
 398}
 399
 400static int mv_cesa_ahash_cache_req(struct ahash_request *req, bool *cached)
 401{
 402        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 403
 404        if (creq->cache_ptr + req->nbytes < 64 && !creq->last_req) {
 405                *cached = true;
 406
 407                if (!req->nbytes)
 408                        return 0;
 409
 410                sg_pcopy_to_buffer(req->src, creq->src_nents,
 411                                   creq->cache + creq->cache_ptr,
 412                                   req->nbytes, 0);
 413
 414                creq->cache_ptr += req->nbytes;
 415        }
 416
 417        return 0;
 418}
 419
 420static struct mv_cesa_op_ctx *
 421mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain,
 422                     struct mv_cesa_op_ctx *tmpl, unsigned int frag_len,
 423                     gfp_t flags)
 424{
 425        struct mv_cesa_op_ctx *op;
 426        int ret;
 427
 428        op = mv_cesa_dma_add_op(chain, tmpl, false, flags);
 429        if (IS_ERR(op))
 430                return op;
 431
 432        /* Set the operation block fragment length. */
 433        mv_cesa_set_mac_op_frag_len(op, frag_len);
 434
 435        /* Append dummy desc to launch operation */
 436        ret = mv_cesa_dma_add_dummy_launch(chain, flags);
 437        if (ret)
 438                return ERR_PTR(ret);
 439
 440        if (mv_cesa_mac_op_is_first_frag(tmpl))
 441                mv_cesa_update_op_cfg(tmpl,
 442                                      CESA_SA_DESC_CFG_MID_FRAG,
 443                                      CESA_SA_DESC_CFG_FRAG_MSK);
 444
 445        return op;
 446}
 447
 448static int
 449mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain,
 450                            struct mv_cesa_ahash_dma_iter *dma_iter,
 451                            struct mv_cesa_ahash_req *creq,
 452                            gfp_t flags)
 453{
 454        struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
 455        int ret;
 456
 457        if (!creq->cache_ptr)
 458                return 0;
 459
 460        ret = mv_cesa_ahash_dma_alloc_cache(ahashdreq, flags);
 461        if (ret)
 462                return ret;
 463
 464        memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr);
 465
 466        return mv_cesa_dma_add_data_transfer(chain,
 467                                             CESA_SA_DATA_SRAM_OFFSET,
 468                                             ahashdreq->cache_dma,
 469                                             creq->cache_ptr,
 470                                             CESA_TDMA_DST_IN_SRAM,
 471                                             flags);
 472}
 473
 474static struct mv_cesa_op_ctx *
 475mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
 476                           struct mv_cesa_ahash_dma_iter *dma_iter,
 477                           struct mv_cesa_ahash_req *creq,
 478                           unsigned int frag_len, gfp_t flags)
 479{
 480        struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
 481        unsigned int len, trailerlen, padoff = 0;
 482        struct mv_cesa_op_ctx *op;
 483        int ret;
 484
 485        /*
 486         * If the transfer is smaller than our maximum length, and we have
 487         * some data outstanding, we can ask the engine to finish the hash.
 488         */
 489        if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) {
 490                op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len,
 491                                          flags);
 492                if (IS_ERR(op))
 493                        return op;
 494
 495                mv_cesa_set_mac_op_total_len(op, creq->len);
 496                mv_cesa_update_op_cfg(op, mv_cesa_mac_op_is_first_frag(op) ?
 497                                                CESA_SA_DESC_CFG_NOT_FRAG :
 498                                                CESA_SA_DESC_CFG_LAST_FRAG,
 499                                      CESA_SA_DESC_CFG_FRAG_MSK);
 500
 501                return op;
 502        }
 503
 504        /*
 505         * The request is longer than the engine can handle, or we have
 506         * no data outstanding. Manually generate the padding, adding it
 507         * as a "mid" fragment.
 508         */
 509        ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags);
 510        if (ret)
 511                return ERR_PTR(ret);
 512
 513        trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding);
 514
 515        len = min(CESA_SA_SRAM_PAYLOAD_SIZE - frag_len, trailerlen);
 516        if (len) {
 517                ret = mv_cesa_dma_add_data_transfer(chain,
 518                                                CESA_SA_DATA_SRAM_OFFSET +
 519                                                frag_len,
 520                                                ahashdreq->padding_dma,
 521                                                len, CESA_TDMA_DST_IN_SRAM,
 522                                                flags);
 523                if (ret)
 524                        return ERR_PTR(ret);
 525
 526                op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len,
 527                                          flags);
 528                if (IS_ERR(op))
 529                        return op;
 530
 531                if (len == trailerlen)
 532                        return op;
 533
 534                padoff += len;
 535        }
 536
 537        ret = mv_cesa_dma_add_data_transfer(chain,
 538                                            CESA_SA_DATA_SRAM_OFFSET,
 539                                            ahashdreq->padding_dma +
 540                                            padoff,
 541                                            trailerlen - padoff,
 542                                            CESA_TDMA_DST_IN_SRAM,
 543                                            flags);
 544        if (ret)
 545                return ERR_PTR(ret);
 546
 547        return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff,
 548                                    flags);
 549}
 550
 551static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
 552{
 553        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 554        gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
 555                      GFP_KERNEL : GFP_ATOMIC;
 556        struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
 557        struct mv_cesa_tdma_req *dreq = &ahashdreq->base;
 558        struct mv_cesa_ahash_dma_iter iter;
 559        struct mv_cesa_op_ctx *op = NULL;
 560        unsigned int frag_len;
 561        int ret;
 562
 563        dreq->chain.first = NULL;
 564        dreq->chain.last = NULL;
 565
 566        if (creq->src_nents) {
 567                ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
 568                                 DMA_TO_DEVICE);
 569                if (!ret) {
 570                        ret = -ENOMEM;
 571                        goto err;
 572                }
 573        }
 574
 575        mv_cesa_tdma_desc_iter_init(&dreq->chain);
 576        mv_cesa_ahash_req_iter_init(&iter, req);
 577
 578        /*
 579         * Add the cache (left-over data from a previous block) first.
 580         * This will never overflow the SRAM size.
 581         */
 582        ret = mv_cesa_ahash_dma_add_cache(&dreq->chain, &iter, creq, flags);
 583        if (ret)
 584                goto err_free_tdma;
 585
 586        if (iter.src.sg) {
 587                /*
 588                 * Add all the new data, inserting an operation block and
 589                 * launch command between each full SRAM block-worth of
 590                 * data. We intentionally do not add the final op block.
 591                 */
 592                while (true) {
 593                        ret = mv_cesa_dma_add_op_transfers(&dreq->chain,
 594                                                           &iter.base,
 595                                                           &iter.src, flags);
 596                        if (ret)
 597                                goto err_free_tdma;
 598
 599                        frag_len = iter.base.op_len;
 600
 601                        if (!mv_cesa_ahash_req_iter_next_op(&iter))
 602                                break;
 603
 604                        op = mv_cesa_dma_add_frag(&dreq->chain, &creq->op_tmpl,
 605                                                  frag_len, flags);
 606                        if (IS_ERR(op)) {
 607                                ret = PTR_ERR(op);
 608                                goto err_free_tdma;
 609                        }
 610                }
 611        } else {
 612                /* Account for the data that was in the cache. */
 613                frag_len = iter.base.op_len;
 614        }
 615
 616        /*
 617         * At this point, frag_len indicates whether we have any data
 618         * outstanding which needs an operation.  Queue up the final
 619         * operation, which depends whether this is the final request.
 620         */
 621        if (creq->last_req)
 622                op = mv_cesa_ahash_dma_last_req(&dreq->chain, &iter, creq,
 623                                                frag_len, flags);
 624        else if (frag_len)
 625                op = mv_cesa_dma_add_frag(&dreq->chain, &creq->op_tmpl,
 626                                          frag_len, flags);
 627
 628        if (IS_ERR(op)) {
 629                ret = PTR_ERR(op);
 630                goto err_free_tdma;
 631        }
 632
 633        if (op) {
 634                /* Add dummy desc to wait for crypto operation end */
 635                ret = mv_cesa_dma_add_dummy_end(&dreq->chain, flags);
 636                if (ret)
 637                        goto err_free_tdma;
 638        }
 639
 640        if (!creq->last_req)
 641                creq->cache_ptr = req->nbytes + creq->cache_ptr -
 642                                  iter.base.len;
 643        else
 644                creq->cache_ptr = 0;
 645
 646        return 0;
 647
 648err_free_tdma:
 649        mv_cesa_dma_cleanup(dreq);
 650        dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
 651
 652err:
 653        mv_cesa_ahash_last_cleanup(req);
 654
 655        return ret;
 656}
 657
 658static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
 659{
 660        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 661        int ret;
 662
 663        if (cesa_dev->caps->has_tdma)
 664                creq->req.base.type = CESA_DMA_REQ;
 665        else
 666                creq->req.base.type = CESA_STD_REQ;
 667
 668        creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
 669        if (creq->src_nents < 0) {
 670                dev_err(cesa_dev->dev, "Invalid number of src SG");
 671                return creq->src_nents;
 672        }
 673
 674        ret = mv_cesa_ahash_cache_req(req, cached);
 675        if (ret)
 676                return ret;
 677
 678        if (*cached)
 679                return 0;
 680
 681        if (creq->req.base.type == CESA_DMA_REQ)
 682                ret = mv_cesa_ahash_dma_req_init(req);
 683
 684        return ret;
 685}
 686
 687static int mv_cesa_ahash_update(struct ahash_request *req)
 688{
 689        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 690        bool cached = false;
 691        int ret;
 692
 693        creq->len += req->nbytes;
 694        ret = mv_cesa_ahash_req_init(req, &cached);
 695        if (ret)
 696                return ret;
 697
 698        if (cached)
 699                return 0;
 700
 701        ret = mv_cesa_queue_req(&req->base);
 702        if (mv_cesa_req_needs_cleanup(&req->base, ret))
 703                mv_cesa_ahash_cleanup(req);
 704
 705        return ret;
 706}
 707
 708static int mv_cesa_ahash_final(struct ahash_request *req)
 709{
 710        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 711        struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
 712        bool cached = false;
 713        int ret;
 714
 715        mv_cesa_set_mac_op_total_len(tmpl, creq->len);
 716        creq->last_req = true;
 717        req->nbytes = 0;
 718
 719        ret = mv_cesa_ahash_req_init(req, &cached);
 720        if (ret)
 721                return ret;
 722
 723        if (cached)
 724                return 0;
 725
 726        ret = mv_cesa_queue_req(&req->base);
 727        if (mv_cesa_req_needs_cleanup(&req->base, ret))
 728                mv_cesa_ahash_cleanup(req);
 729
 730        return ret;
 731}
 732
 733static int mv_cesa_ahash_finup(struct ahash_request *req)
 734{
 735        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 736        struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
 737        bool cached = false;
 738        int ret;
 739
 740        creq->len += req->nbytes;
 741        mv_cesa_set_mac_op_total_len(tmpl, creq->len);
 742        creq->last_req = true;
 743
 744        ret = mv_cesa_ahash_req_init(req, &cached);
 745        if (ret)
 746                return ret;
 747
 748        if (cached)
 749                return 0;
 750
 751        ret = mv_cesa_queue_req(&req->base);
 752        if (mv_cesa_req_needs_cleanup(&req->base, ret))
 753                mv_cesa_ahash_cleanup(req);
 754
 755        return ret;
 756}
 757
 758static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
 759                                u64 *len, void *cache)
 760{
 761        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
 762        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 763        unsigned int digsize = crypto_ahash_digestsize(ahash);
 764        unsigned int blocksize;
 765
 766        blocksize = crypto_ahash_blocksize(ahash);
 767
 768        *len = creq->len;
 769        memcpy(hash, creq->state, digsize);
 770        memset(cache, 0, blocksize);
 771        if (creq->cache)
 772                memcpy(cache, creq->cache, creq->cache_ptr);
 773
 774        return 0;
 775}
 776
 777static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash,
 778                                u64 len, const void *cache)
 779{
 780        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
 781        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 782        unsigned int digsize = crypto_ahash_digestsize(ahash);
 783        unsigned int blocksize;
 784        unsigned int cache_ptr;
 785        int ret;
 786
 787        ret = crypto_ahash_init(req);
 788        if (ret)
 789                return ret;
 790
 791        blocksize = crypto_ahash_blocksize(ahash);
 792        if (len >= blocksize)
 793                mv_cesa_update_op_cfg(&creq->op_tmpl,
 794                                      CESA_SA_DESC_CFG_MID_FRAG,
 795                                      CESA_SA_DESC_CFG_FRAG_MSK);
 796
 797        creq->len = len;
 798        memcpy(creq->state, hash, digsize);
 799        creq->cache_ptr = 0;
 800
 801        cache_ptr = do_div(len, blocksize);
 802        if (!cache_ptr)
 803                return 0;
 804
 805        memcpy(creq->cache, cache, cache_ptr);
 806        creq->cache_ptr = cache_ptr;
 807
 808        return 0;
 809}
 810
 811static int mv_cesa_md5_init(struct ahash_request *req)
 812{
 813        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 814        struct mv_cesa_op_ctx tmpl = { };
 815
 816        mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5);
 817        creq->state[0] = MD5_H0;
 818        creq->state[1] = MD5_H1;
 819        creq->state[2] = MD5_H2;
 820        creq->state[3] = MD5_H3;
 821
 822        mv_cesa_ahash_init(req, &tmpl, true);
 823
 824        return 0;
 825}
 826
 827static int mv_cesa_md5_export(struct ahash_request *req, void *out)
 828{
 829        struct md5_state *out_state = out;
 830
 831        return mv_cesa_ahash_export(req, out_state->hash,
 832                                    &out_state->byte_count, out_state->block);
 833}
 834
 835static int mv_cesa_md5_import(struct ahash_request *req, const void *in)
 836{
 837        const struct md5_state *in_state = in;
 838
 839        return mv_cesa_ahash_import(req, in_state->hash, in_state->byte_count,
 840                                    in_state->block);
 841}
 842
 843static int mv_cesa_md5_digest(struct ahash_request *req)
 844{
 845        int ret;
 846
 847        ret = mv_cesa_md5_init(req);
 848        if (ret)
 849                return ret;
 850
 851        return mv_cesa_ahash_finup(req);
 852}
 853
 854struct ahash_alg mv_md5_alg = {
 855        .init = mv_cesa_md5_init,
 856        .update = mv_cesa_ahash_update,
 857        .final = mv_cesa_ahash_final,
 858        .finup = mv_cesa_ahash_finup,
 859        .digest = mv_cesa_md5_digest,
 860        .export = mv_cesa_md5_export,
 861        .import = mv_cesa_md5_import,
 862        .halg = {
 863                .digestsize = MD5_DIGEST_SIZE,
 864                .statesize = sizeof(struct md5_state),
 865                .base = {
 866                        .cra_name = "md5",
 867                        .cra_driver_name = "mv-md5",
 868                        .cra_priority = 300,
 869                        .cra_flags = CRYPTO_ALG_ASYNC |
 870                                     CRYPTO_ALG_KERN_DRIVER_ONLY,
 871                        .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
 872                        .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
 873                        .cra_init = mv_cesa_ahash_cra_init,
 874                        .cra_module = THIS_MODULE,
 875                 }
 876        }
 877};
 878
 879static int mv_cesa_sha1_init(struct ahash_request *req)
 880{
 881        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 882        struct mv_cesa_op_ctx tmpl = { };
 883
 884        mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1);
 885        creq->state[0] = SHA1_H0;
 886        creq->state[1] = SHA1_H1;
 887        creq->state[2] = SHA1_H2;
 888        creq->state[3] = SHA1_H3;
 889        creq->state[4] = SHA1_H4;
 890
 891        mv_cesa_ahash_init(req, &tmpl, false);
 892
 893        return 0;
 894}
 895
 896static int mv_cesa_sha1_export(struct ahash_request *req, void *out)
 897{
 898        struct sha1_state *out_state = out;
 899
 900        return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
 901                                    out_state->buffer);
 902}
 903
 904static int mv_cesa_sha1_import(struct ahash_request *req, const void *in)
 905{
 906        const struct sha1_state *in_state = in;
 907
 908        return mv_cesa_ahash_import(req, in_state->state, in_state->count,
 909                                    in_state->buffer);
 910}
 911
 912static int mv_cesa_sha1_digest(struct ahash_request *req)
 913{
 914        int ret;
 915
 916        ret = mv_cesa_sha1_init(req);
 917        if (ret)
 918                return ret;
 919
 920        return mv_cesa_ahash_finup(req);
 921}
 922
 923struct ahash_alg mv_sha1_alg = {
 924        .init = mv_cesa_sha1_init,
 925        .update = mv_cesa_ahash_update,
 926        .final = mv_cesa_ahash_final,
 927        .finup = mv_cesa_ahash_finup,
 928        .digest = mv_cesa_sha1_digest,
 929        .export = mv_cesa_sha1_export,
 930        .import = mv_cesa_sha1_import,
 931        .halg = {
 932                .digestsize = SHA1_DIGEST_SIZE,
 933                .statesize = sizeof(struct sha1_state),
 934                .base = {
 935                        .cra_name = "sha1",
 936                        .cra_driver_name = "mv-sha1",
 937                        .cra_priority = 300,
 938                        .cra_flags = CRYPTO_ALG_ASYNC |
 939                                     CRYPTO_ALG_KERN_DRIVER_ONLY,
 940                        .cra_blocksize = SHA1_BLOCK_SIZE,
 941                        .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
 942                        .cra_init = mv_cesa_ahash_cra_init,
 943                        .cra_module = THIS_MODULE,
 944                 }
 945        }
 946};
 947
 948static int mv_cesa_sha256_init(struct ahash_request *req)
 949{
 950        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 951        struct mv_cesa_op_ctx tmpl = { };
 952
 953        mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256);
 954        creq->state[0] = SHA256_H0;
 955        creq->state[1] = SHA256_H1;
 956        creq->state[2] = SHA256_H2;
 957        creq->state[3] = SHA256_H3;
 958        creq->state[4] = SHA256_H4;
 959        creq->state[5] = SHA256_H5;
 960        creq->state[6] = SHA256_H6;
 961        creq->state[7] = SHA256_H7;
 962
 963        mv_cesa_ahash_init(req, &tmpl, false);
 964
 965        return 0;
 966}
 967
 968static int mv_cesa_sha256_digest(struct ahash_request *req)
 969{
 970        int ret;
 971
 972        ret = mv_cesa_sha256_init(req);
 973        if (ret)
 974                return ret;
 975
 976        return mv_cesa_ahash_finup(req);
 977}
 978
 979static int mv_cesa_sha256_export(struct ahash_request *req, void *out)
 980{
 981        struct sha256_state *out_state = out;
 982
 983        return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
 984                                    out_state->buf);
 985}
 986
 987static int mv_cesa_sha256_import(struct ahash_request *req, const void *in)
 988{
 989        const struct sha256_state *in_state = in;
 990
 991        return mv_cesa_ahash_import(req, in_state->state, in_state->count,
 992                                    in_state->buf);
 993}
 994
 995struct ahash_alg mv_sha256_alg = {
 996        .init = mv_cesa_sha256_init,
 997        .update = mv_cesa_ahash_update,
 998        .final = mv_cesa_ahash_final,
 999        .finup = mv_cesa_ahash_finup,
1000        .digest = mv_cesa_sha256_digest,
1001        .export = mv_cesa_sha256_export,
1002        .import = mv_cesa_sha256_import,
1003        .halg = {
1004                .digestsize = SHA256_DIGEST_SIZE,
1005                .statesize = sizeof(struct sha256_state),
1006                .base = {
1007                        .cra_name = "sha256",
1008                        .cra_driver_name = "mv-sha256",
1009                        .cra_priority = 300,
1010                        .cra_flags = CRYPTO_ALG_ASYNC |
1011                                     CRYPTO_ALG_KERN_DRIVER_ONLY,
1012                        .cra_blocksize = SHA256_BLOCK_SIZE,
1013                        .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
1014                        .cra_init = mv_cesa_ahash_cra_init,
1015                        .cra_module = THIS_MODULE,
1016                 }
1017        }
1018};
1019
1020struct mv_cesa_ahash_result {
1021        struct completion completion;
1022        int error;
1023};
1024
1025static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req,
1026                                        int error)
1027{
1028        struct mv_cesa_ahash_result *result = req->data;
1029
1030        if (error == -EINPROGRESS)
1031                return;
1032
1033        result->error = error;
1034        complete(&result->completion);
1035}
1036
1037static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad,
1038                                       void *state, unsigned int blocksize)
1039{
1040        struct mv_cesa_ahash_result result;
1041        struct scatterlist sg;
1042        int ret;
1043
1044        ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1045                                   mv_cesa_hmac_ahash_complete, &result);
1046        sg_init_one(&sg, pad, blocksize);
1047        ahash_request_set_crypt(req, &sg, pad, blocksize);
1048        init_completion(&result.completion);
1049
1050        ret = crypto_ahash_init(req);
1051        if (ret)
1052                return ret;
1053
1054        ret = crypto_ahash_update(req);
1055        if (ret && ret != -EINPROGRESS)
1056                return ret;
1057
1058        wait_for_completion_interruptible(&result.completion);
1059        if (result.error)
1060                return result.error;
1061
1062        ret = crypto_ahash_export(req, state);
1063        if (ret)
1064                return ret;
1065
1066        return 0;
1067}
1068
1069static int mv_cesa_ahmac_pad_init(struct ahash_request *req,
1070                                  const u8 *key, unsigned int keylen,
1071                                  u8 *ipad, u8 *opad,
1072                                  unsigned int blocksize)
1073{
1074        struct mv_cesa_ahash_result result;
1075        struct scatterlist sg;
1076        int ret;
1077        int i;
1078
1079        if (keylen <= blocksize) {
1080                memcpy(ipad, key, keylen);
1081        } else {
1082                u8 *keydup = kmemdup(key, keylen, GFP_KERNEL);
1083
1084                if (!keydup)
1085                        return -ENOMEM;
1086
1087                ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1088                                           mv_cesa_hmac_ahash_complete,
1089                                           &result);
1090                sg_init_one(&sg, keydup, keylen);
1091                ahash_request_set_crypt(req, &sg, ipad, keylen);
1092                init_completion(&result.completion);
1093
1094                ret = crypto_ahash_digest(req);
1095                if (ret == -EINPROGRESS) {
1096                        wait_for_completion_interruptible(&result.completion);
1097                        ret = result.error;
1098                }
1099
1100                /* Set the memory region to 0 to avoid any leak. */
1101                memset(keydup, 0, keylen);
1102                kfree(keydup);
1103
1104                if (ret)
1105                        return ret;
1106
1107                keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
1108        }
1109
1110        memset(ipad + keylen, 0, blocksize - keylen);
1111        memcpy(opad, ipad, blocksize);
1112
1113        for (i = 0; i < blocksize; i++) {
1114                ipad[i] ^= 0x36;
1115                opad[i] ^= 0x5c;
1116        }
1117
1118        return 0;
1119}
1120
1121static int mv_cesa_ahmac_setkey(const char *hash_alg_name,
1122                                const u8 *key, unsigned int keylen,
1123                                void *istate, void *ostate)
1124{
1125        struct ahash_request *req;
1126        struct crypto_ahash *tfm;
1127        unsigned int blocksize;
1128        u8 *ipad = NULL;
1129        u8 *opad;
1130        int ret;
1131
1132        tfm = crypto_alloc_ahash(hash_alg_name, CRYPTO_ALG_TYPE_AHASH,
1133                                 CRYPTO_ALG_TYPE_AHASH_MASK);
1134        if (IS_ERR(tfm))
1135                return PTR_ERR(tfm);
1136
1137        req = ahash_request_alloc(tfm, GFP_KERNEL);
1138        if (!req) {
1139                ret = -ENOMEM;
1140                goto free_ahash;
1141        }
1142
1143        crypto_ahash_clear_flags(tfm, ~0);
1144
1145        blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1146
1147        ipad = kzalloc(2 * blocksize, GFP_KERNEL);
1148        if (!ipad) {
1149                ret = -ENOMEM;
1150                goto free_req;
1151        }
1152
1153        opad = ipad + blocksize;
1154
1155        ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize);
1156        if (ret)
1157                goto free_ipad;
1158
1159        ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize);
1160        if (ret)
1161                goto free_ipad;
1162
1163        ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize);
1164
1165free_ipad:
1166        kfree(ipad);
1167free_req:
1168        ahash_request_free(req);
1169free_ahash:
1170        crypto_free_ahash(tfm);
1171
1172        return ret;
1173}
1174
1175static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm)
1176{
1177        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm);
1178
1179        ctx->base.ops = &mv_cesa_ahash_req_ops;
1180
1181        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1182                                 sizeof(struct mv_cesa_ahash_req));
1183        return 0;
1184}
1185
1186static int mv_cesa_ahmac_md5_init(struct ahash_request *req)
1187{
1188        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1189        struct mv_cesa_op_ctx tmpl = { };
1190
1191        mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5);
1192        memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1193
1194        mv_cesa_ahash_init(req, &tmpl, true);
1195
1196        return 0;
1197}
1198
1199static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
1200                                    unsigned int keylen)
1201{
1202        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1203        struct md5_state istate, ostate;
1204        int ret, i;
1205
1206        ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate);
1207        if (ret)
1208                return ret;
1209
1210        for (i = 0; i < ARRAY_SIZE(istate.hash); i++)
1211                ctx->iv[i] = be32_to_cpu(istate.hash[i]);
1212
1213        for (i = 0; i < ARRAY_SIZE(ostate.hash); i++)
1214                ctx->iv[i + 8] = be32_to_cpu(ostate.hash[i]);
1215
1216        return 0;
1217}
1218
1219static int mv_cesa_ahmac_md5_digest(struct ahash_request *req)
1220{
1221        int ret;
1222
1223        ret = mv_cesa_ahmac_md5_init(req);
1224        if (ret)
1225                return ret;
1226
1227        return mv_cesa_ahash_finup(req);
1228}
1229
1230struct ahash_alg mv_ahmac_md5_alg = {
1231        .init = mv_cesa_ahmac_md5_init,
1232        .update = mv_cesa_ahash_update,
1233        .final = mv_cesa_ahash_final,
1234        .finup = mv_cesa_ahash_finup,
1235        .digest = mv_cesa_ahmac_md5_digest,
1236        .setkey = mv_cesa_ahmac_md5_setkey,
1237        .export = mv_cesa_md5_export,
1238        .import = mv_cesa_md5_import,
1239        .halg = {
1240                .digestsize = MD5_DIGEST_SIZE,
1241                .statesize = sizeof(struct md5_state),
1242                .base = {
1243                        .cra_name = "hmac(md5)",
1244                        .cra_driver_name = "mv-hmac-md5",
1245                        .cra_priority = 300,
1246                        .cra_flags = CRYPTO_ALG_ASYNC |
1247                                     CRYPTO_ALG_KERN_DRIVER_ONLY,
1248                        .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1249                        .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1250                        .cra_init = mv_cesa_ahmac_cra_init,
1251                        .cra_module = THIS_MODULE,
1252                 }
1253        }
1254};
1255
1256static int mv_cesa_ahmac_sha1_init(struct ahash_request *req)
1257{
1258        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1259        struct mv_cesa_op_ctx tmpl = { };
1260
1261        mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1);
1262        memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1263
1264        mv_cesa_ahash_init(req, &tmpl, false);
1265
1266        return 0;
1267}
1268
1269static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
1270                                     unsigned int keylen)
1271{
1272        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1273        struct sha1_state istate, ostate;
1274        int ret, i;
1275
1276        ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate);
1277        if (ret)
1278                return ret;
1279
1280        for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1281                ctx->iv[i] = be32_to_cpu(istate.state[i]);
1282
1283        for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1284                ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]);
1285
1286        return 0;
1287}
1288
1289static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req)
1290{
1291        int ret;
1292
1293        ret = mv_cesa_ahmac_sha1_init(req);
1294        if (ret)
1295                return ret;
1296
1297        return mv_cesa_ahash_finup(req);
1298}
1299
1300struct ahash_alg mv_ahmac_sha1_alg = {
1301        .init = mv_cesa_ahmac_sha1_init,
1302        .update = mv_cesa_ahash_update,
1303        .final = mv_cesa_ahash_final,
1304        .finup = mv_cesa_ahash_finup,
1305        .digest = mv_cesa_ahmac_sha1_digest,
1306        .setkey = mv_cesa_ahmac_sha1_setkey,
1307        .export = mv_cesa_sha1_export,
1308        .import = mv_cesa_sha1_import,
1309        .halg = {
1310                .digestsize = SHA1_DIGEST_SIZE,
1311                .statesize = sizeof(struct sha1_state),
1312                .base = {
1313                        .cra_name = "hmac(sha1)",
1314                        .cra_driver_name = "mv-hmac-sha1",
1315                        .cra_priority = 300,
1316                        .cra_flags = CRYPTO_ALG_ASYNC |
1317                                     CRYPTO_ALG_KERN_DRIVER_ONLY,
1318                        .cra_blocksize = SHA1_BLOCK_SIZE,
1319                        .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1320                        .cra_init = mv_cesa_ahmac_cra_init,
1321                        .cra_module = THIS_MODULE,
1322                 }
1323        }
1324};
1325
1326static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
1327                                       unsigned int keylen)
1328{
1329        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1330        struct sha256_state istate, ostate;
1331        int ret, i;
1332
1333        ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate);
1334        if (ret)
1335                return ret;
1336
1337        for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1338                ctx->iv[i] = be32_to_cpu(istate.state[i]);
1339
1340        for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1341                ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]);
1342
1343        return 0;
1344}
1345
1346static int mv_cesa_ahmac_sha256_init(struct ahash_request *req)
1347{
1348        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1349        struct mv_cesa_op_ctx tmpl = { };
1350
1351        mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256);
1352        memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1353
1354        mv_cesa_ahash_init(req, &tmpl, false);
1355
1356        return 0;
1357}
1358
1359static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req)
1360{
1361        int ret;
1362
1363        ret = mv_cesa_ahmac_sha256_init(req);
1364        if (ret)
1365                return ret;
1366
1367        return mv_cesa_ahash_finup(req);
1368}
1369
1370struct ahash_alg mv_ahmac_sha256_alg = {
1371        .init = mv_cesa_ahmac_sha256_init,
1372        .update = mv_cesa_ahash_update,
1373        .final = mv_cesa_ahash_final,
1374        .finup = mv_cesa_ahash_finup,
1375        .digest = mv_cesa_ahmac_sha256_digest,
1376        .setkey = mv_cesa_ahmac_sha256_setkey,
1377        .export = mv_cesa_sha256_export,
1378        .import = mv_cesa_sha256_import,
1379        .halg = {
1380                .digestsize = SHA256_DIGEST_SIZE,
1381                .statesize = sizeof(struct sha256_state),
1382                .base = {
1383                        .cra_name = "hmac(sha256)",
1384                        .cra_driver_name = "mv-hmac-sha256",
1385                        .cra_priority = 300,
1386                        .cra_flags = CRYPTO_ALG_ASYNC |
1387                                     CRYPTO_ALG_KERN_DRIVER_ONLY,
1388                        .cra_blocksize = SHA256_BLOCK_SIZE,
1389                        .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1390                        .cra_init = mv_cesa_ahmac_cra_init,
1391                        .cra_module = THIS_MODULE,
1392                 }
1393        }
1394};
1395