linux/drivers/crypto/marvell/hash.c
<<
>>
Prefs
   1/*
   2 * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256.
   3 *
   4 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
   5 * Author: Arnaud Ebalard <arno@natisbad.org>
   6 *
   7 * This work is based on an initial version written by
   8 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
   9 *
  10 * This program is free software; you can redistribute it and/or modify it
  11 * under the terms of the GNU General Public License version 2 as published
  12 * by the Free Software Foundation.
  13 */
  14
  15#include <crypto/md5.h>
  16#include <crypto/sha.h>
  17
  18#include "cesa.h"
  19
  20struct mv_cesa_ahash_dma_iter {
  21        struct mv_cesa_dma_iter base;
  22        struct mv_cesa_sg_dma_iter src;
  23};
  24
  25static inline void
  26mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter,
  27                            struct ahash_request *req)
  28{
  29        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
  30        unsigned int len = req->nbytes + creq->cache_ptr;
  31
  32        if (!creq->last_req)
  33                len &= ~CESA_HASH_BLOCK_SIZE_MSK;
  34
  35        mv_cesa_req_dma_iter_init(&iter->base, len);
  36        mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
  37        iter->src.op_offset = creq->cache_ptr;
  38}
  39
  40static inline bool
  41mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter)
  42{
  43        iter->src.op_offset = 0;
  44
  45        return mv_cesa_req_dma_iter_next_op(&iter->base);
  46}
  47
  48static inline int
  49mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags)
  50{
  51        req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags,
  52                                    &req->cache_dma);
  53        if (!req->cache)
  54                return -ENOMEM;
  55
  56        return 0;
  57}
  58
  59static inline void
  60mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req)
  61{
  62        if (!req->cache)
  63                return;
  64
  65        dma_pool_free(cesa_dev->dma->cache_pool, req->cache,
  66                      req->cache_dma);
  67}
  68
  69static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req,
  70                                           gfp_t flags)
  71{
  72        if (req->padding)
  73                return 0;
  74
  75        req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags,
  76                                      &req->padding_dma);
  77        if (!req->padding)
  78                return -ENOMEM;
  79
  80        return 0;
  81}
  82
  83static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req)
  84{
  85        if (!req->padding)
  86                return;
  87
  88        dma_pool_free(cesa_dev->dma->padding_pool, req->padding,
  89                      req->padding_dma);
  90        req->padding = NULL;
  91}
  92
  93static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req)
  94{
  95        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
  96
  97        mv_cesa_ahash_dma_free_padding(&creq->req.dma);
  98}
  99
 100static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
 101{
 102        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 103
 104        dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
 105        mv_cesa_ahash_dma_free_cache(&creq->req.dma);
 106        mv_cesa_dma_cleanup(&creq->base);
 107}
 108
 109static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
 110{
 111        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 112
 113        if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
 114                mv_cesa_ahash_dma_cleanup(req);
 115}
 116
 117static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
 118{
 119        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 120
 121        if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
 122                mv_cesa_ahash_dma_last_cleanup(req);
 123}
 124
 125static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
 126{
 127        unsigned int index, padlen;
 128
 129        index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
 130        padlen = (index < 56) ? (56 - index) : (64 + 56 - index);
 131
 132        return padlen;
 133}
 134
 135static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
 136{
 137        unsigned int index, padlen;
 138
 139        buf[0] = 0x80;
 140        /* Pad out to 56 mod 64 */
 141        index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
 142        padlen = mv_cesa_ahash_pad_len(creq);
 143        memset(buf + 1, 0, padlen - 1);
 144
 145        if (creq->algo_le) {
 146                __le64 bits = cpu_to_le64(creq->len << 3);
 147                memcpy(buf + padlen, &bits, sizeof(bits));
 148        } else {
 149                __be64 bits = cpu_to_be64(creq->len << 3);
 150                memcpy(buf + padlen, &bits, sizeof(bits));
 151        }
 152
 153        return padlen + 8;
 154}
 155
 156static void mv_cesa_ahash_std_step(struct ahash_request *req)
 157{
 158        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 159        struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
 160        struct mv_cesa_engine *engine = creq->base.engine;
 161        struct mv_cesa_op_ctx *op;
 162        unsigned int new_cache_ptr = 0;
 163        u32 frag_mode;
 164        size_t  len;
 165        unsigned int digsize;
 166        int i;
 167
 168        mv_cesa_adjust_op(engine, &creq->op_tmpl);
 169        memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
 170
 171        if (!sreq->offset) {
 172                digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
 173                for (i = 0; i < digsize / 4; i++)
 174                        writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
 175        }
 176
 177        if (creq->cache_ptr)
 178                memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
 179                            creq->cache, creq->cache_ptr);
 180
 181        len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
 182                    CESA_SA_SRAM_PAYLOAD_SIZE);
 183
 184        if (!creq->last_req) {
 185                new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK;
 186                len &= ~CESA_HASH_BLOCK_SIZE_MSK;
 187        }
 188
 189        if (len - creq->cache_ptr)
 190                sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents,
 191                                                   engine->sram +
 192                                                   CESA_SA_DATA_SRAM_OFFSET +
 193                                                   creq->cache_ptr,
 194                                                   len - creq->cache_ptr,
 195                                                   sreq->offset);
 196
 197        op = &creq->op_tmpl;
 198
 199        frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK;
 200
 201        if (creq->last_req && sreq->offset == req->nbytes &&
 202            creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
 203                if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
 204                        frag_mode = CESA_SA_DESC_CFG_NOT_FRAG;
 205                else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG)
 206                        frag_mode = CESA_SA_DESC_CFG_LAST_FRAG;
 207        }
 208
 209        if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG ||
 210            frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) {
 211                if (len &&
 212                    creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
 213                        mv_cesa_set_mac_op_total_len(op, creq->len);
 214                } else {
 215                        int trailerlen = mv_cesa_ahash_pad_len(creq) + 8;
 216
 217                        if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) {
 218                                len &= CESA_HASH_BLOCK_SIZE_MSK;
 219                                new_cache_ptr = 64 - trailerlen;
 220                                memcpy_fromio(creq->cache,
 221                                              engine->sram +
 222                                              CESA_SA_DATA_SRAM_OFFSET + len,
 223                                              new_cache_ptr);
 224                        } else {
 225                                len += mv_cesa_ahash_pad_req(creq,
 226                                                engine->sram + len +
 227                                                CESA_SA_DATA_SRAM_OFFSET);
 228                        }
 229
 230                        if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG)
 231                                frag_mode = CESA_SA_DESC_CFG_MID_FRAG;
 232                        else
 233                                frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG;
 234                }
 235        }
 236
 237        mv_cesa_set_mac_op_frag_len(op, len);
 238        mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK);
 239
 240        /* FIXME: only update enc_len field */
 241        memcpy_toio(engine->sram, op, sizeof(*op));
 242
 243        if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
 244                mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
 245                                      CESA_SA_DESC_CFG_FRAG_MSK);
 246
 247        creq->cache_ptr = new_cache_ptr;
 248
 249        mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
 250        writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
 251        BUG_ON(readl(engine->regs + CESA_SA_CMD) &
 252               CESA_SA_CMD_EN_CESA_SA_ACCL0);
 253        writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
 254}
 255
 256static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status)
 257{
 258        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 259        struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
 260
 261        if (sreq->offset < (req->nbytes - creq->cache_ptr))
 262                return -EINPROGRESS;
 263
 264        return 0;
 265}
 266
 267static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req)
 268{
 269        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 270        struct mv_cesa_req *basereq = &creq->base;
 271
 272        mv_cesa_dma_prepare(basereq, basereq->engine);
 273}
 274
 275static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
 276{
 277        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 278        struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
 279
 280        sreq->offset = 0;
 281}
 282
 283static void mv_cesa_ahash_step(struct crypto_async_request *req)
 284{
 285        struct ahash_request *ahashreq = ahash_request_cast(req);
 286        struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
 287
 288        if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
 289                mv_cesa_dma_step(&creq->base);
 290        else
 291                mv_cesa_ahash_std_step(ahashreq);
 292}
 293
 294static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
 295{
 296        struct ahash_request *ahashreq = ahash_request_cast(req);
 297        struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
 298
 299        if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
 300                return mv_cesa_dma_process(&creq->base, status);
 301
 302        return mv_cesa_ahash_std_process(ahashreq, status);
 303}
 304
 305static void mv_cesa_ahash_complete(struct crypto_async_request *req)
 306{
 307        struct ahash_request *ahashreq = ahash_request_cast(req);
 308        struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
 309        struct mv_cesa_engine *engine = creq->base.engine;
 310        unsigned int digsize;
 311        int i;
 312
 313        digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
 314        for (i = 0; i < digsize / 4; i++)
 315                creq->state[i] = readl_relaxed(engine->regs + CESA_IVDIG(i));
 316
 317        if (creq->last_req) {
 318                /*
 319                 * Hardware's MD5 digest is in little endian format, but
 320                 * SHA in big endian format
 321                 */
 322                if (creq->algo_le) {
 323                        __le32 *result = (void *)ahashreq->result;
 324
 325                        for (i = 0; i < digsize / 4; i++)
 326                                result[i] = cpu_to_le32(creq->state[i]);
 327                } else {
 328                        __be32 *result = (void *)ahashreq->result;
 329
 330                        for (i = 0; i < digsize / 4; i++)
 331                                result[i] = cpu_to_be32(creq->state[i]);
 332                }
 333        }
 334
 335        atomic_sub(ahashreq->nbytes, &engine->load);
 336}
 337
 338static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
 339                                  struct mv_cesa_engine *engine)
 340{
 341        struct ahash_request *ahashreq = ahash_request_cast(req);
 342        struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
 343
 344        creq->base.engine = engine;
 345
 346        if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
 347                mv_cesa_ahash_dma_prepare(ahashreq);
 348        else
 349                mv_cesa_ahash_std_prepare(ahashreq);
 350}
 351
 352static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
 353{
 354        struct ahash_request *ahashreq = ahash_request_cast(req);
 355        struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
 356
 357        if (creq->last_req)
 358                mv_cesa_ahash_last_cleanup(ahashreq);
 359
 360        mv_cesa_ahash_cleanup(ahashreq);
 361
 362        if (creq->cache_ptr)
 363                sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
 364                                   creq->cache,
 365                                   creq->cache_ptr,
 366                                   ahashreq->nbytes - creq->cache_ptr);
 367}
 368
 369static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {
 370        .step = mv_cesa_ahash_step,
 371        .process = mv_cesa_ahash_process,
 372        .cleanup = mv_cesa_ahash_req_cleanup,
 373        .complete = mv_cesa_ahash_complete,
 374};
 375
 376static void mv_cesa_ahash_init(struct ahash_request *req,
 377                              struct mv_cesa_op_ctx *tmpl, bool algo_le)
 378{
 379        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 380
 381        memset(creq, 0, sizeof(*creq));
 382        mv_cesa_update_op_cfg(tmpl,
 383                              CESA_SA_DESC_CFG_OP_MAC_ONLY |
 384                              CESA_SA_DESC_CFG_FIRST_FRAG,
 385                              CESA_SA_DESC_CFG_OP_MSK |
 386                              CESA_SA_DESC_CFG_FRAG_MSK);
 387        mv_cesa_set_mac_op_total_len(tmpl, 0);
 388        mv_cesa_set_mac_op_frag_len(tmpl, 0);
 389        creq->op_tmpl = *tmpl;
 390        creq->len = 0;
 391        creq->algo_le = algo_le;
 392}
 393
 394static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm)
 395{
 396        struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 397
 398        ctx->base.ops = &mv_cesa_ahash_req_ops;
 399
 400        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 401                                 sizeof(struct mv_cesa_ahash_req));
 402        return 0;
 403}
 404
 405static bool mv_cesa_ahash_cache_req(struct ahash_request *req)
 406{
 407        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 408        bool cached = false;
 409
 410        if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE && !creq->last_req) {
 411                cached = true;
 412
 413                if (!req->nbytes)
 414                        return cached;
 415
 416                sg_pcopy_to_buffer(req->src, creq->src_nents,
 417                                   creq->cache + creq->cache_ptr,
 418                                   req->nbytes, 0);
 419
 420                creq->cache_ptr += req->nbytes;
 421        }
 422
 423        return cached;
 424}
 425
 426static struct mv_cesa_op_ctx *
 427mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain,
 428                     struct mv_cesa_op_ctx *tmpl, unsigned int frag_len,
 429                     gfp_t flags)
 430{
 431        struct mv_cesa_op_ctx *op;
 432        int ret;
 433
 434        op = mv_cesa_dma_add_op(chain, tmpl, false, flags);
 435        if (IS_ERR(op))
 436                return op;
 437
 438        /* Set the operation block fragment length. */
 439        mv_cesa_set_mac_op_frag_len(op, frag_len);
 440
 441        /* Append dummy desc to launch operation */
 442        ret = mv_cesa_dma_add_dummy_launch(chain, flags);
 443        if (ret)
 444                return ERR_PTR(ret);
 445
 446        if (mv_cesa_mac_op_is_first_frag(tmpl))
 447                mv_cesa_update_op_cfg(tmpl,
 448                                      CESA_SA_DESC_CFG_MID_FRAG,
 449                                      CESA_SA_DESC_CFG_FRAG_MSK);
 450
 451        return op;
 452}
 453
 454static int
 455mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain,
 456                            struct mv_cesa_ahash_req *creq,
 457                            gfp_t flags)
 458{
 459        struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
 460        int ret;
 461
 462        if (!creq->cache_ptr)
 463                return 0;
 464
 465        ret = mv_cesa_ahash_dma_alloc_cache(ahashdreq, flags);
 466        if (ret)
 467                return ret;
 468
 469        memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr);
 470
 471        return mv_cesa_dma_add_data_transfer(chain,
 472                                             CESA_SA_DATA_SRAM_OFFSET,
 473                                             ahashdreq->cache_dma,
 474                                             creq->cache_ptr,
 475                                             CESA_TDMA_DST_IN_SRAM,
 476                                             flags);
 477}
 478
 479static struct mv_cesa_op_ctx *
 480mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
 481                           struct mv_cesa_ahash_dma_iter *dma_iter,
 482                           struct mv_cesa_ahash_req *creq,
 483                           unsigned int frag_len, gfp_t flags)
 484{
 485        struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
 486        unsigned int len, trailerlen, padoff = 0;
 487        struct mv_cesa_op_ctx *op;
 488        int ret;
 489
 490        /*
 491         * If the transfer is smaller than our maximum length, and we have
 492         * some data outstanding, we can ask the engine to finish the hash.
 493         */
 494        if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) {
 495                op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len,
 496                                          flags);
 497                if (IS_ERR(op))
 498                        return op;
 499
 500                mv_cesa_set_mac_op_total_len(op, creq->len);
 501                mv_cesa_update_op_cfg(op, mv_cesa_mac_op_is_first_frag(op) ?
 502                                                CESA_SA_DESC_CFG_NOT_FRAG :
 503                                                CESA_SA_DESC_CFG_LAST_FRAG,
 504                                      CESA_SA_DESC_CFG_FRAG_MSK);
 505
 506                return op;
 507        }
 508
 509        /*
 510         * The request is longer than the engine can handle, or we have
 511         * no data outstanding. Manually generate the padding, adding it
 512         * as a "mid" fragment.
 513         */
 514        ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags);
 515        if (ret)
 516                return ERR_PTR(ret);
 517
 518        trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding);
 519
 520        len = min(CESA_SA_SRAM_PAYLOAD_SIZE - frag_len, trailerlen);
 521        if (len) {
 522                ret = mv_cesa_dma_add_data_transfer(chain,
 523                                                CESA_SA_DATA_SRAM_OFFSET +
 524                                                frag_len,
 525                                                ahashdreq->padding_dma,
 526                                                len, CESA_TDMA_DST_IN_SRAM,
 527                                                flags);
 528                if (ret)
 529                        return ERR_PTR(ret);
 530
 531                op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len,
 532                                          flags);
 533                if (IS_ERR(op))
 534                        return op;
 535
 536                if (len == trailerlen)
 537                        return op;
 538
 539                padoff += len;
 540        }
 541
 542        ret = mv_cesa_dma_add_data_transfer(chain,
 543                                            CESA_SA_DATA_SRAM_OFFSET,
 544                                            ahashdreq->padding_dma +
 545                                            padoff,
 546                                            trailerlen - padoff,
 547                                            CESA_TDMA_DST_IN_SRAM,
 548                                            flags);
 549        if (ret)
 550                return ERR_PTR(ret);
 551
 552        return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff,
 553                                    flags);
 554}
 555
 556static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
 557{
 558        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 559        gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
 560                      GFP_KERNEL : GFP_ATOMIC;
 561        struct mv_cesa_req *basereq = &creq->base;
 562        struct mv_cesa_ahash_dma_iter iter;
 563        struct mv_cesa_op_ctx *op = NULL;
 564        unsigned int frag_len;
 565        int ret;
 566
 567        basereq->chain.first = NULL;
 568        basereq->chain.last = NULL;
 569
 570        if (creq->src_nents) {
 571                ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
 572                                 DMA_TO_DEVICE);
 573                if (!ret) {
 574                        ret = -ENOMEM;
 575                        goto err;
 576                }
 577        }
 578
 579        mv_cesa_tdma_desc_iter_init(&basereq->chain);
 580        mv_cesa_ahash_req_iter_init(&iter, req);
 581
 582        /*
 583         * Add the cache (left-over data from a previous block) first.
 584         * This will never overflow the SRAM size.
 585         */
 586        ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, creq, flags);
 587        if (ret)
 588                goto err_free_tdma;
 589
 590        if (iter.src.sg) {
 591                /*
 592                 * Add all the new data, inserting an operation block and
 593                 * launch command between each full SRAM block-worth of
 594                 * data. We intentionally do not add the final op block.
 595                 */
 596                while (true) {
 597                        ret = mv_cesa_dma_add_op_transfers(&basereq->chain,
 598                                                           &iter.base,
 599                                                           &iter.src, flags);
 600                        if (ret)
 601                                goto err_free_tdma;
 602
 603                        frag_len = iter.base.op_len;
 604
 605                        if (!mv_cesa_ahash_req_iter_next_op(&iter))
 606                                break;
 607
 608                        op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
 609                                                  frag_len, flags);
 610                        if (IS_ERR(op)) {
 611                                ret = PTR_ERR(op);
 612                                goto err_free_tdma;
 613                        }
 614                }
 615        } else {
 616                /* Account for the data that was in the cache. */
 617                frag_len = iter.base.op_len;
 618        }
 619
 620        /*
 621         * At this point, frag_len indicates whether we have any data
 622         * outstanding which needs an operation.  Queue up the final
 623         * operation, which depends whether this is the final request.
 624         */
 625        if (creq->last_req)
 626                op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq,
 627                                                frag_len, flags);
 628        else if (frag_len)
 629                op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
 630                                          frag_len, flags);
 631
 632        if (IS_ERR(op)) {
 633                ret = PTR_ERR(op);
 634                goto err_free_tdma;
 635        }
 636
 637        if (op) {
 638                /* Add dummy desc to wait for crypto operation end */
 639                ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags);
 640                if (ret)
 641                        goto err_free_tdma;
 642        }
 643
 644        if (!creq->last_req)
 645                creq->cache_ptr = req->nbytes + creq->cache_ptr -
 646                                  iter.base.len;
 647        else
 648                creq->cache_ptr = 0;
 649
 650        basereq->chain.last->flags |= (CESA_TDMA_END_OF_REQ |
 651                                       CESA_TDMA_BREAK_CHAIN);
 652
 653        return 0;
 654
 655err_free_tdma:
 656        mv_cesa_dma_cleanup(basereq);
 657        dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
 658
 659err:
 660        mv_cesa_ahash_last_cleanup(req);
 661
 662        return ret;
 663}
 664
 665static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
 666{
 667        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 668
 669        creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
 670        if (creq->src_nents < 0) {
 671                dev_err(cesa_dev->dev, "Invalid number of src SG");
 672                return creq->src_nents;
 673        }
 674
 675        *cached = mv_cesa_ahash_cache_req(req);
 676
 677        if (*cached)
 678                return 0;
 679
 680        if (cesa_dev->caps->has_tdma)
 681                return mv_cesa_ahash_dma_req_init(req);
 682        else
 683                return 0;
 684}
 685
 686static int mv_cesa_ahash_queue_req(struct ahash_request *req)
 687{
 688        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 689        struct mv_cesa_engine *engine;
 690        bool cached = false;
 691        int ret;
 692
 693        ret = mv_cesa_ahash_req_init(req, &cached);
 694        if (ret)
 695                return ret;
 696
 697        if (cached)
 698                return 0;
 699
 700        engine = mv_cesa_select_engine(req->nbytes);
 701        mv_cesa_ahash_prepare(&req->base, engine);
 702
 703        ret = mv_cesa_queue_req(&req->base, &creq->base);
 704
 705        if (mv_cesa_req_needs_cleanup(&req->base, ret))
 706                mv_cesa_ahash_cleanup(req);
 707
 708        return ret;
 709}
 710
 711static int mv_cesa_ahash_update(struct ahash_request *req)
 712{
 713        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 714
 715        creq->len += req->nbytes;
 716
 717        return mv_cesa_ahash_queue_req(req);
 718}
 719
 720static int mv_cesa_ahash_final(struct ahash_request *req)
 721{
 722        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 723        struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
 724
 725        mv_cesa_set_mac_op_total_len(tmpl, creq->len);
 726        creq->last_req = true;
 727        req->nbytes = 0;
 728
 729        return mv_cesa_ahash_queue_req(req);
 730}
 731
 732static int mv_cesa_ahash_finup(struct ahash_request *req)
 733{
 734        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 735        struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
 736
 737        creq->len += req->nbytes;
 738        mv_cesa_set_mac_op_total_len(tmpl, creq->len);
 739        creq->last_req = true;
 740
 741        return mv_cesa_ahash_queue_req(req);
 742}
 743
 744static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
 745                                u64 *len, void *cache)
 746{
 747        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
 748        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 749        unsigned int digsize = crypto_ahash_digestsize(ahash);
 750        unsigned int blocksize;
 751
 752        blocksize = crypto_ahash_blocksize(ahash);
 753
 754        *len = creq->len;
 755        memcpy(hash, creq->state, digsize);
 756        memset(cache, 0, blocksize);
 757        memcpy(cache, creq->cache, creq->cache_ptr);
 758
 759        return 0;
 760}
 761
 762static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash,
 763                                u64 len, const void *cache)
 764{
 765        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
 766        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 767        unsigned int digsize = crypto_ahash_digestsize(ahash);
 768        unsigned int blocksize;
 769        unsigned int cache_ptr;
 770        int ret;
 771
 772        ret = crypto_ahash_init(req);
 773        if (ret)
 774                return ret;
 775
 776        blocksize = crypto_ahash_blocksize(ahash);
 777        if (len >= blocksize)
 778                mv_cesa_update_op_cfg(&creq->op_tmpl,
 779                                      CESA_SA_DESC_CFG_MID_FRAG,
 780                                      CESA_SA_DESC_CFG_FRAG_MSK);
 781
 782        creq->len = len;
 783        memcpy(creq->state, hash, digsize);
 784        creq->cache_ptr = 0;
 785
 786        cache_ptr = do_div(len, blocksize);
 787        if (!cache_ptr)
 788                return 0;
 789
 790        memcpy(creq->cache, cache, cache_ptr);
 791        creq->cache_ptr = cache_ptr;
 792
 793        return 0;
 794}
 795
 796static int mv_cesa_md5_init(struct ahash_request *req)
 797{
 798        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 799        struct mv_cesa_op_ctx tmpl = { };
 800
 801        mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5);
 802
 803        mv_cesa_ahash_init(req, &tmpl, true);
 804
 805        creq->state[0] = MD5_H0;
 806        creq->state[1] = MD5_H1;
 807        creq->state[2] = MD5_H2;
 808        creq->state[3] = MD5_H3;
 809
 810        return 0;
 811}
 812
 813static int mv_cesa_md5_export(struct ahash_request *req, void *out)
 814{
 815        struct md5_state *out_state = out;
 816
 817        return mv_cesa_ahash_export(req, out_state->hash,
 818                                    &out_state->byte_count, out_state->block);
 819}
 820
 821static int mv_cesa_md5_import(struct ahash_request *req, const void *in)
 822{
 823        const struct md5_state *in_state = in;
 824
 825        return mv_cesa_ahash_import(req, in_state->hash, in_state->byte_count,
 826                                    in_state->block);
 827}
 828
 829static int mv_cesa_md5_digest(struct ahash_request *req)
 830{
 831        int ret;
 832
 833        ret = mv_cesa_md5_init(req);
 834        if (ret)
 835                return ret;
 836
 837        return mv_cesa_ahash_finup(req);
 838}
 839
 840struct ahash_alg mv_md5_alg = {
 841        .init = mv_cesa_md5_init,
 842        .update = mv_cesa_ahash_update,
 843        .final = mv_cesa_ahash_final,
 844        .finup = mv_cesa_ahash_finup,
 845        .digest = mv_cesa_md5_digest,
 846        .export = mv_cesa_md5_export,
 847        .import = mv_cesa_md5_import,
 848        .halg = {
 849                .digestsize = MD5_DIGEST_SIZE,
 850                .statesize = sizeof(struct md5_state),
 851                .base = {
 852                        .cra_name = "md5",
 853                        .cra_driver_name = "mv-md5",
 854                        .cra_priority = 300,
 855                        .cra_flags = CRYPTO_ALG_ASYNC |
 856                                     CRYPTO_ALG_KERN_DRIVER_ONLY,
 857                        .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
 858                        .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
 859                        .cra_init = mv_cesa_ahash_cra_init,
 860                        .cra_module = THIS_MODULE,
 861                 }
 862        }
 863};
 864
 865static int mv_cesa_sha1_init(struct ahash_request *req)
 866{
 867        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 868        struct mv_cesa_op_ctx tmpl = { };
 869
 870        mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1);
 871
 872        mv_cesa_ahash_init(req, &tmpl, false);
 873
 874        creq->state[0] = SHA1_H0;
 875        creq->state[1] = SHA1_H1;
 876        creq->state[2] = SHA1_H2;
 877        creq->state[3] = SHA1_H3;
 878        creq->state[4] = SHA1_H4;
 879
 880        return 0;
 881}
 882
 883static int mv_cesa_sha1_export(struct ahash_request *req, void *out)
 884{
 885        struct sha1_state *out_state = out;
 886
 887        return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
 888                                    out_state->buffer);
 889}
 890
 891static int mv_cesa_sha1_import(struct ahash_request *req, const void *in)
 892{
 893        const struct sha1_state *in_state = in;
 894
 895        return mv_cesa_ahash_import(req, in_state->state, in_state->count,
 896                                    in_state->buffer);
 897}
 898
 899static int mv_cesa_sha1_digest(struct ahash_request *req)
 900{
 901        int ret;
 902
 903        ret = mv_cesa_sha1_init(req);
 904        if (ret)
 905                return ret;
 906
 907        return mv_cesa_ahash_finup(req);
 908}
 909
 910struct ahash_alg mv_sha1_alg = {
 911        .init = mv_cesa_sha1_init,
 912        .update = mv_cesa_ahash_update,
 913        .final = mv_cesa_ahash_final,
 914        .finup = mv_cesa_ahash_finup,
 915        .digest = mv_cesa_sha1_digest,
 916        .export = mv_cesa_sha1_export,
 917        .import = mv_cesa_sha1_import,
 918        .halg = {
 919                .digestsize = SHA1_DIGEST_SIZE,
 920                .statesize = sizeof(struct sha1_state),
 921                .base = {
 922                        .cra_name = "sha1",
 923                        .cra_driver_name = "mv-sha1",
 924                        .cra_priority = 300,
 925                        .cra_flags = CRYPTO_ALG_ASYNC |
 926                                     CRYPTO_ALG_KERN_DRIVER_ONLY,
 927                        .cra_blocksize = SHA1_BLOCK_SIZE,
 928                        .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
 929                        .cra_init = mv_cesa_ahash_cra_init,
 930                        .cra_module = THIS_MODULE,
 931                 }
 932        }
 933};
 934
 935static int mv_cesa_sha256_init(struct ahash_request *req)
 936{
 937        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 938        struct mv_cesa_op_ctx tmpl = { };
 939
 940        mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256);
 941
 942        mv_cesa_ahash_init(req, &tmpl, false);
 943
 944        creq->state[0] = SHA256_H0;
 945        creq->state[1] = SHA256_H1;
 946        creq->state[2] = SHA256_H2;
 947        creq->state[3] = SHA256_H3;
 948        creq->state[4] = SHA256_H4;
 949        creq->state[5] = SHA256_H5;
 950        creq->state[6] = SHA256_H6;
 951        creq->state[7] = SHA256_H7;
 952
 953        return 0;
 954}
 955
 956static int mv_cesa_sha256_digest(struct ahash_request *req)
 957{
 958        int ret;
 959
 960        ret = mv_cesa_sha256_init(req);
 961        if (ret)
 962                return ret;
 963
 964        return mv_cesa_ahash_finup(req);
 965}
 966
 967static int mv_cesa_sha256_export(struct ahash_request *req, void *out)
 968{
 969        struct sha256_state *out_state = out;
 970
 971        return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
 972                                    out_state->buf);
 973}
 974
 975static int mv_cesa_sha256_import(struct ahash_request *req, const void *in)
 976{
 977        const struct sha256_state *in_state = in;
 978
 979        return mv_cesa_ahash_import(req, in_state->state, in_state->count,
 980                                    in_state->buf);
 981}
 982
 983struct ahash_alg mv_sha256_alg = {
 984        .init = mv_cesa_sha256_init,
 985        .update = mv_cesa_ahash_update,
 986        .final = mv_cesa_ahash_final,
 987        .finup = mv_cesa_ahash_finup,
 988        .digest = mv_cesa_sha256_digest,
 989        .export = mv_cesa_sha256_export,
 990        .import = mv_cesa_sha256_import,
 991        .halg = {
 992                .digestsize = SHA256_DIGEST_SIZE,
 993                .statesize = sizeof(struct sha256_state),
 994                .base = {
 995                        .cra_name = "sha256",
 996                        .cra_driver_name = "mv-sha256",
 997                        .cra_priority = 300,
 998                        .cra_flags = CRYPTO_ALG_ASYNC |
 999                                     CRYPTO_ALG_KERN_DRIVER_ONLY,
1000                        .cra_blocksize = SHA256_BLOCK_SIZE,
1001                        .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
1002                        .cra_init = mv_cesa_ahash_cra_init,
1003                        .cra_module = THIS_MODULE,
1004                 }
1005        }
1006};
1007
1008struct mv_cesa_ahash_result {
1009        struct completion completion;
1010        int error;
1011};
1012
1013static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req,
1014                                        int error)
1015{
1016        struct mv_cesa_ahash_result *result = req->data;
1017
1018        if (error == -EINPROGRESS)
1019                return;
1020
1021        result->error = error;
1022        complete(&result->completion);
1023}
1024
1025static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad,
1026                                       void *state, unsigned int blocksize)
1027{
1028        struct mv_cesa_ahash_result result;
1029        struct scatterlist sg;
1030        int ret;
1031
1032        ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1033                                   mv_cesa_hmac_ahash_complete, &result);
1034        sg_init_one(&sg, pad, blocksize);
1035        ahash_request_set_crypt(req, &sg, pad, blocksize);
1036        init_completion(&result.completion);
1037
1038        ret = crypto_ahash_init(req);
1039        if (ret)
1040                return ret;
1041
1042        ret = crypto_ahash_update(req);
1043        if (ret && ret != -EINPROGRESS)
1044                return ret;
1045
1046        wait_for_completion_interruptible(&result.completion);
1047        if (result.error)
1048                return result.error;
1049
1050        ret = crypto_ahash_export(req, state);
1051        if (ret)
1052                return ret;
1053
1054        return 0;
1055}
1056
1057static int mv_cesa_ahmac_pad_init(struct ahash_request *req,
1058                                  const u8 *key, unsigned int keylen,
1059                                  u8 *ipad, u8 *opad,
1060                                  unsigned int blocksize)
1061{
1062        struct mv_cesa_ahash_result result;
1063        struct scatterlist sg;
1064        int ret;
1065        int i;
1066
1067        if (keylen <= blocksize) {
1068                memcpy(ipad, key, keylen);
1069        } else {
1070                u8 *keydup = kmemdup(key, keylen, GFP_KERNEL);
1071
1072                if (!keydup)
1073                        return -ENOMEM;
1074
1075                ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1076                                           mv_cesa_hmac_ahash_complete,
1077                                           &result);
1078                sg_init_one(&sg, keydup, keylen);
1079                ahash_request_set_crypt(req, &sg, ipad, keylen);
1080                init_completion(&result.completion);
1081
1082                ret = crypto_ahash_digest(req);
1083                if (ret == -EINPROGRESS) {
1084                        wait_for_completion_interruptible(&result.completion);
1085                        ret = result.error;
1086                }
1087
1088                /* Set the memory region to 0 to avoid any leak. */
1089                memset(keydup, 0, keylen);
1090                kfree(keydup);
1091
1092                if (ret)
1093                        return ret;
1094
1095                keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
1096        }
1097
1098        memset(ipad + keylen, 0, blocksize - keylen);
1099        memcpy(opad, ipad, blocksize);
1100
1101        for (i = 0; i < blocksize; i++) {
1102                ipad[i] ^= 0x36;
1103                opad[i] ^= 0x5c;
1104        }
1105
1106        return 0;
1107}
1108
1109static int mv_cesa_ahmac_setkey(const char *hash_alg_name,
1110                                const u8 *key, unsigned int keylen,
1111                                void *istate, void *ostate)
1112{
1113        struct ahash_request *req;
1114        struct crypto_ahash *tfm;
1115        unsigned int blocksize;
1116        u8 *ipad = NULL;
1117        u8 *opad;
1118        int ret;
1119
1120        tfm = crypto_alloc_ahash(hash_alg_name, CRYPTO_ALG_TYPE_AHASH,
1121                                 CRYPTO_ALG_TYPE_AHASH_MASK);
1122        if (IS_ERR(tfm))
1123                return PTR_ERR(tfm);
1124
1125        req = ahash_request_alloc(tfm, GFP_KERNEL);
1126        if (!req) {
1127                ret = -ENOMEM;
1128                goto free_ahash;
1129        }
1130
1131        crypto_ahash_clear_flags(tfm, ~0);
1132
1133        blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1134
1135        ipad = kzalloc(2 * blocksize, GFP_KERNEL);
1136        if (!ipad) {
1137                ret = -ENOMEM;
1138                goto free_req;
1139        }
1140
1141        opad = ipad + blocksize;
1142
1143        ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize);
1144        if (ret)
1145                goto free_ipad;
1146
1147        ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize);
1148        if (ret)
1149                goto free_ipad;
1150
1151        ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize);
1152
1153free_ipad:
1154        kfree(ipad);
1155free_req:
1156        ahash_request_free(req);
1157free_ahash:
1158        crypto_free_ahash(tfm);
1159
1160        return ret;
1161}
1162
1163static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm)
1164{
1165        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm);
1166
1167        ctx->base.ops = &mv_cesa_ahash_req_ops;
1168
1169        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1170                                 sizeof(struct mv_cesa_ahash_req));
1171        return 0;
1172}
1173
1174static int mv_cesa_ahmac_md5_init(struct ahash_request *req)
1175{
1176        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1177        struct mv_cesa_op_ctx tmpl = { };
1178
1179        mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5);
1180        memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1181
1182        mv_cesa_ahash_init(req, &tmpl, true);
1183
1184        return 0;
1185}
1186
1187static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
1188                                    unsigned int keylen)
1189{
1190        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1191        struct md5_state istate, ostate;
1192        int ret, i;
1193
1194        ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate);
1195        if (ret)
1196                return ret;
1197
1198        for (i = 0; i < ARRAY_SIZE(istate.hash); i++)
1199                ctx->iv[i] = be32_to_cpu(istate.hash[i]);
1200
1201        for (i = 0; i < ARRAY_SIZE(ostate.hash); i++)
1202                ctx->iv[i + 8] = be32_to_cpu(ostate.hash[i]);
1203
1204        return 0;
1205}
1206
1207static int mv_cesa_ahmac_md5_digest(struct ahash_request *req)
1208{
1209        int ret;
1210
1211        ret = mv_cesa_ahmac_md5_init(req);
1212        if (ret)
1213                return ret;
1214
1215        return mv_cesa_ahash_finup(req);
1216}
1217
1218struct ahash_alg mv_ahmac_md5_alg = {
1219        .init = mv_cesa_ahmac_md5_init,
1220        .update = mv_cesa_ahash_update,
1221        .final = mv_cesa_ahash_final,
1222        .finup = mv_cesa_ahash_finup,
1223        .digest = mv_cesa_ahmac_md5_digest,
1224        .setkey = mv_cesa_ahmac_md5_setkey,
1225        .export = mv_cesa_md5_export,
1226        .import = mv_cesa_md5_import,
1227        .halg = {
1228                .digestsize = MD5_DIGEST_SIZE,
1229                .statesize = sizeof(struct md5_state),
1230                .base = {
1231                        .cra_name = "hmac(md5)",
1232                        .cra_driver_name = "mv-hmac-md5",
1233                        .cra_priority = 300,
1234                        .cra_flags = CRYPTO_ALG_ASYNC |
1235                                     CRYPTO_ALG_KERN_DRIVER_ONLY,
1236                        .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1237                        .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1238                        .cra_init = mv_cesa_ahmac_cra_init,
1239                        .cra_module = THIS_MODULE,
1240                 }
1241        }
1242};
1243
1244static int mv_cesa_ahmac_sha1_init(struct ahash_request *req)
1245{
1246        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1247        struct mv_cesa_op_ctx tmpl = { };
1248
1249        mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1);
1250        memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1251
1252        mv_cesa_ahash_init(req, &tmpl, false);
1253
1254        return 0;
1255}
1256
1257static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
1258                                     unsigned int keylen)
1259{
1260        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1261        struct sha1_state istate, ostate;
1262        int ret, i;
1263
1264        ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate);
1265        if (ret)
1266                return ret;
1267
1268        for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1269                ctx->iv[i] = be32_to_cpu(istate.state[i]);
1270
1271        for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1272                ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]);
1273
1274        return 0;
1275}
1276
1277static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req)
1278{
1279        int ret;
1280
1281        ret = mv_cesa_ahmac_sha1_init(req);
1282        if (ret)
1283                return ret;
1284
1285        return mv_cesa_ahash_finup(req);
1286}
1287
1288struct ahash_alg mv_ahmac_sha1_alg = {
1289        .init = mv_cesa_ahmac_sha1_init,
1290        .update = mv_cesa_ahash_update,
1291        .final = mv_cesa_ahash_final,
1292        .finup = mv_cesa_ahash_finup,
1293        .digest = mv_cesa_ahmac_sha1_digest,
1294        .setkey = mv_cesa_ahmac_sha1_setkey,
1295        .export = mv_cesa_sha1_export,
1296        .import = mv_cesa_sha1_import,
1297        .halg = {
1298                .digestsize = SHA1_DIGEST_SIZE,
1299                .statesize = sizeof(struct sha1_state),
1300                .base = {
1301                        .cra_name = "hmac(sha1)",
1302                        .cra_driver_name = "mv-hmac-sha1",
1303                        .cra_priority = 300,
1304                        .cra_flags = CRYPTO_ALG_ASYNC |
1305                                     CRYPTO_ALG_KERN_DRIVER_ONLY,
1306                        .cra_blocksize = SHA1_BLOCK_SIZE,
1307                        .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1308                        .cra_init = mv_cesa_ahmac_cra_init,
1309                        .cra_module = THIS_MODULE,
1310                 }
1311        }
1312};
1313
1314static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
1315                                       unsigned int keylen)
1316{
1317        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1318        struct sha256_state istate, ostate;
1319        int ret, i;
1320
1321        ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate);
1322        if (ret)
1323                return ret;
1324
1325        for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1326                ctx->iv[i] = be32_to_cpu(istate.state[i]);
1327
1328        for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1329                ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]);
1330
1331        return 0;
1332}
1333
1334static int mv_cesa_ahmac_sha256_init(struct ahash_request *req)
1335{
1336        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1337        struct mv_cesa_op_ctx tmpl = { };
1338
1339        mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256);
1340        memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1341
1342        mv_cesa_ahash_init(req, &tmpl, false);
1343
1344        return 0;
1345}
1346
1347static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req)
1348{
1349        int ret;
1350
1351        ret = mv_cesa_ahmac_sha256_init(req);
1352        if (ret)
1353                return ret;
1354
1355        return mv_cesa_ahash_finup(req);
1356}
1357
1358struct ahash_alg mv_ahmac_sha256_alg = {
1359        .init = mv_cesa_ahmac_sha256_init,
1360        .update = mv_cesa_ahash_update,
1361        .final = mv_cesa_ahash_final,
1362        .finup = mv_cesa_ahash_finup,
1363        .digest = mv_cesa_ahmac_sha256_digest,
1364        .setkey = mv_cesa_ahmac_sha256_setkey,
1365        .export = mv_cesa_sha256_export,
1366        .import = mv_cesa_sha256_import,
1367        .halg = {
1368                .digestsize = SHA256_DIGEST_SIZE,
1369                .statesize = sizeof(struct sha256_state),
1370                .base = {
1371                        .cra_name = "hmac(sha256)",
1372                        .cra_driver_name = "mv-hmac-sha256",
1373                        .cra_priority = 300,
1374                        .cra_flags = CRYPTO_ALG_ASYNC |
1375                                     CRYPTO_ALG_KERN_DRIVER_ONLY,
1376                        .cra_blocksize = SHA256_BLOCK_SIZE,
1377                        .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1378                        .cra_init = mv_cesa_ahmac_cra_init,
1379                        .cra_module = THIS_MODULE,
1380                 }
1381        }
1382};
1383