linux/drivers/crypto/marvell/hash.c
<<
>>
Prefs
   1/*
   2 * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256.
   3 *
   4 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
   5 * Author: Arnaud Ebalard <arno@natisbad.org>
   6 *
   7 * This work is based on an initial version written by
   8 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
   9 *
  10 * This program is free software; you can redistribute it and/or modify it
  11 * under the terms of the GNU General Public License version 2 as published
  12 * by the Free Software Foundation.
  13 */
  14
  15#include <crypto/hmac.h>
  16#include <crypto/md5.h>
  17#include <crypto/sha.h>
  18
  19#include "cesa.h"
  20
  21struct mv_cesa_ahash_dma_iter {
  22        struct mv_cesa_dma_iter base;
  23        struct mv_cesa_sg_dma_iter src;
  24};
  25
  26static inline void
  27mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter,
  28                            struct ahash_request *req)
  29{
  30        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
  31        unsigned int len = req->nbytes + creq->cache_ptr;
  32
  33        if (!creq->last_req)
  34                len &= ~CESA_HASH_BLOCK_SIZE_MSK;
  35
  36        mv_cesa_req_dma_iter_init(&iter->base, len);
  37        mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
  38        iter->src.op_offset = creq->cache_ptr;
  39}
  40
  41static inline bool
  42mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter)
  43{
  44        iter->src.op_offset = 0;
  45
  46        return mv_cesa_req_dma_iter_next_op(&iter->base);
  47}
  48
  49static inline int
  50mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags)
  51{
  52        req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags,
  53                                    &req->cache_dma);
  54        if (!req->cache)
  55                return -ENOMEM;
  56
  57        return 0;
  58}
  59
  60static inline void
  61mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req)
  62{
  63        if (!req->cache)
  64                return;
  65
  66        dma_pool_free(cesa_dev->dma->cache_pool, req->cache,
  67                      req->cache_dma);
  68}
  69
  70static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req,
  71                                           gfp_t flags)
  72{
  73        if (req->padding)
  74                return 0;
  75
  76        req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags,
  77                                      &req->padding_dma);
  78        if (!req->padding)
  79                return -ENOMEM;
  80
  81        return 0;
  82}
  83
  84static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req)
  85{
  86        if (!req->padding)
  87                return;
  88
  89        dma_pool_free(cesa_dev->dma->padding_pool, req->padding,
  90                      req->padding_dma);
  91        req->padding = NULL;
  92}
  93
  94static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req)
  95{
  96        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
  97
  98        mv_cesa_ahash_dma_free_padding(&creq->req.dma);
  99}
 100
 101static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
 102{
 103        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 104
 105        dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
 106        mv_cesa_ahash_dma_free_cache(&creq->req.dma);
 107        mv_cesa_dma_cleanup(&creq->base);
 108}
 109
 110static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
 111{
 112        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 113
 114        if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
 115                mv_cesa_ahash_dma_cleanup(req);
 116}
 117
 118static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
 119{
 120        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 121
 122        if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
 123                mv_cesa_ahash_dma_last_cleanup(req);
 124}
 125
 126static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
 127{
 128        unsigned int index, padlen;
 129
 130        index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
 131        padlen = (index < 56) ? (56 - index) : (64 + 56 - index);
 132
 133        return padlen;
 134}
 135
 136static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
 137{
 138        unsigned int index, padlen;
 139
 140        buf[0] = 0x80;
 141        /* Pad out to 56 mod 64 */
 142        index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
 143        padlen = mv_cesa_ahash_pad_len(creq);
 144        memset(buf + 1, 0, padlen - 1);
 145
 146        if (creq->algo_le) {
 147                __le64 bits = cpu_to_le64(creq->len << 3);
 148                memcpy(buf + padlen, &bits, sizeof(bits));
 149        } else {
 150                __be64 bits = cpu_to_be64(creq->len << 3);
 151                memcpy(buf + padlen, &bits, sizeof(bits));
 152        }
 153
 154        return padlen + 8;
 155}
 156
 157static void mv_cesa_ahash_std_step(struct ahash_request *req)
 158{
 159        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 160        struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
 161        struct mv_cesa_engine *engine = creq->base.engine;
 162        struct mv_cesa_op_ctx *op;
 163        unsigned int new_cache_ptr = 0;
 164        u32 frag_mode;
 165        size_t  len;
 166        unsigned int digsize;
 167        int i;
 168
 169        mv_cesa_adjust_op(engine, &creq->op_tmpl);
 170        memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
 171
 172        if (!sreq->offset) {
 173                digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
 174                for (i = 0; i < digsize / 4; i++)
 175                        writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
 176        }
 177
 178        if (creq->cache_ptr)
 179                memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
 180                            creq->cache, creq->cache_ptr);
 181
 182        len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
 183                    CESA_SA_SRAM_PAYLOAD_SIZE);
 184
 185        if (!creq->last_req) {
 186                new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK;
 187                len &= ~CESA_HASH_BLOCK_SIZE_MSK;
 188        }
 189
 190        if (len - creq->cache_ptr)
 191                sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents,
 192                                                   engine->sram +
 193                                                   CESA_SA_DATA_SRAM_OFFSET +
 194                                                   creq->cache_ptr,
 195                                                   len - creq->cache_ptr,
 196                                                   sreq->offset);
 197
 198        op = &creq->op_tmpl;
 199
 200        frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK;
 201
 202        if (creq->last_req && sreq->offset == req->nbytes &&
 203            creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
 204                if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
 205                        frag_mode = CESA_SA_DESC_CFG_NOT_FRAG;
 206                else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG)
 207                        frag_mode = CESA_SA_DESC_CFG_LAST_FRAG;
 208        }
 209
 210        if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG ||
 211            frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) {
 212                if (len &&
 213                    creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
 214                        mv_cesa_set_mac_op_total_len(op, creq->len);
 215                } else {
 216                        int trailerlen = mv_cesa_ahash_pad_len(creq) + 8;
 217
 218                        if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) {
 219                                len &= CESA_HASH_BLOCK_SIZE_MSK;
 220                                new_cache_ptr = 64 - trailerlen;
 221                                memcpy_fromio(creq->cache,
 222                                              engine->sram +
 223                                              CESA_SA_DATA_SRAM_OFFSET + len,
 224                                              new_cache_ptr);
 225                        } else {
 226                                len += mv_cesa_ahash_pad_req(creq,
 227                                                engine->sram + len +
 228                                                CESA_SA_DATA_SRAM_OFFSET);
 229                        }
 230
 231                        if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG)
 232                                frag_mode = CESA_SA_DESC_CFG_MID_FRAG;
 233                        else
 234                                frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG;
 235                }
 236        }
 237
 238        mv_cesa_set_mac_op_frag_len(op, len);
 239        mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK);
 240
 241        /* FIXME: only update enc_len field */
 242        memcpy_toio(engine->sram, op, sizeof(*op));
 243
 244        if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
 245                mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
 246                                      CESA_SA_DESC_CFG_FRAG_MSK);
 247
 248        creq->cache_ptr = new_cache_ptr;
 249
 250        mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
 251        writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
 252        BUG_ON(readl(engine->regs + CESA_SA_CMD) &
 253               CESA_SA_CMD_EN_CESA_SA_ACCL0);
 254        writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
 255}
 256
 257static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status)
 258{
 259        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 260        struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
 261
 262        if (sreq->offset < (req->nbytes - creq->cache_ptr))
 263                return -EINPROGRESS;
 264
 265        return 0;
 266}
 267
 268static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req)
 269{
 270        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 271        struct mv_cesa_req *basereq = &creq->base;
 272
 273        mv_cesa_dma_prepare(basereq, basereq->engine);
 274}
 275
 276static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
 277{
 278        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 279        struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
 280
 281        sreq->offset = 0;
 282}
 283
 284static void mv_cesa_ahash_dma_step(struct ahash_request *req)
 285{
 286        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 287        struct mv_cesa_req *base = &creq->base;
 288
 289        /* We must explicitly set the digest state. */
 290        if (base->chain.first->flags & CESA_TDMA_SET_STATE) {
 291                struct mv_cesa_engine *engine = base->engine;
 292                int i;
 293
 294                /* Set the hash state in the IVDIG regs. */
 295                for (i = 0; i < ARRAY_SIZE(creq->state); i++)
 296                        writel_relaxed(creq->state[i], engine->regs +
 297                                       CESA_IVDIG(i));
 298        }
 299
 300        mv_cesa_dma_step(base);
 301}
 302
 303static void mv_cesa_ahash_step(struct crypto_async_request *req)
 304{
 305        struct ahash_request *ahashreq = ahash_request_cast(req);
 306        struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
 307
 308        if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
 309                mv_cesa_ahash_dma_step(ahashreq);
 310        else
 311                mv_cesa_ahash_std_step(ahashreq);
 312}
 313
 314static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
 315{
 316        struct ahash_request *ahashreq = ahash_request_cast(req);
 317        struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
 318
 319        if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
 320                return mv_cesa_dma_process(&creq->base, status);
 321
 322        return mv_cesa_ahash_std_process(ahashreq, status);
 323}
 324
 325static void mv_cesa_ahash_complete(struct crypto_async_request *req)
 326{
 327        struct ahash_request *ahashreq = ahash_request_cast(req);
 328        struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
 329        struct mv_cesa_engine *engine = creq->base.engine;
 330        unsigned int digsize;
 331        int i;
 332
 333        digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
 334
 335        if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
 336            (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_RESULT) {
 337                __le32 *data = NULL;
 338
 339                /*
 340                 * Result is already in the correct endianess when the SA is
 341                 * used
 342                 */
 343                data = creq->base.chain.last->op->ctx.hash.hash;
 344                for (i = 0; i < digsize / 4; i++)
 345                        creq->state[i] = cpu_to_le32(data[i]);
 346
 347                memcpy(ahashreq->result, data, digsize);
 348        } else {
 349                for (i = 0; i < digsize / 4; i++)
 350                        creq->state[i] = readl_relaxed(engine->regs +
 351                                                       CESA_IVDIG(i));
 352                if (creq->last_req) {
 353                        /*
 354                        * Hardware's MD5 digest is in little endian format, but
 355                        * SHA in big endian format
 356                        */
 357                        if (creq->algo_le) {
 358                                __le32 *result = (void *)ahashreq->result;
 359
 360                                for (i = 0; i < digsize / 4; i++)
 361                                        result[i] = cpu_to_le32(creq->state[i]);
 362                        } else {
 363                                __be32 *result = (void *)ahashreq->result;
 364
 365                                for (i = 0; i < digsize / 4; i++)
 366                                        result[i] = cpu_to_be32(creq->state[i]);
 367                        }
 368                }
 369        }
 370
 371        atomic_sub(ahashreq->nbytes, &engine->load);
 372}
 373
 374static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
 375                                  struct mv_cesa_engine *engine)
 376{
 377        struct ahash_request *ahashreq = ahash_request_cast(req);
 378        struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
 379
 380        creq->base.engine = engine;
 381
 382        if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
 383                mv_cesa_ahash_dma_prepare(ahashreq);
 384        else
 385                mv_cesa_ahash_std_prepare(ahashreq);
 386}
 387
 388static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
 389{
 390        struct ahash_request *ahashreq = ahash_request_cast(req);
 391        struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
 392
 393        if (creq->last_req)
 394                mv_cesa_ahash_last_cleanup(ahashreq);
 395
 396        mv_cesa_ahash_cleanup(ahashreq);
 397
 398        if (creq->cache_ptr)
 399                sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
 400                                   creq->cache,
 401                                   creq->cache_ptr,
 402                                   ahashreq->nbytes - creq->cache_ptr);
 403}
 404
 405static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {
 406        .step = mv_cesa_ahash_step,
 407        .process = mv_cesa_ahash_process,
 408        .cleanup = mv_cesa_ahash_req_cleanup,
 409        .complete = mv_cesa_ahash_complete,
 410};
 411
 412static void mv_cesa_ahash_init(struct ahash_request *req,
 413                              struct mv_cesa_op_ctx *tmpl, bool algo_le)
 414{
 415        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 416
 417        memset(creq, 0, sizeof(*creq));
 418        mv_cesa_update_op_cfg(tmpl,
 419                              CESA_SA_DESC_CFG_OP_MAC_ONLY |
 420                              CESA_SA_DESC_CFG_FIRST_FRAG,
 421                              CESA_SA_DESC_CFG_OP_MSK |
 422                              CESA_SA_DESC_CFG_FRAG_MSK);
 423        mv_cesa_set_mac_op_total_len(tmpl, 0);
 424        mv_cesa_set_mac_op_frag_len(tmpl, 0);
 425        creq->op_tmpl = *tmpl;
 426        creq->len = 0;
 427        creq->algo_le = algo_le;
 428}
 429
 430static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm)
 431{
 432        struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 433
 434        ctx->base.ops = &mv_cesa_ahash_req_ops;
 435
 436        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 437                                 sizeof(struct mv_cesa_ahash_req));
 438        return 0;
 439}
 440
 441static bool mv_cesa_ahash_cache_req(struct ahash_request *req)
 442{
 443        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 444        bool cached = false;
 445
 446        if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE && !creq->last_req) {
 447                cached = true;
 448
 449                if (!req->nbytes)
 450                        return cached;
 451
 452                sg_pcopy_to_buffer(req->src, creq->src_nents,
 453                                   creq->cache + creq->cache_ptr,
 454                                   req->nbytes, 0);
 455
 456                creq->cache_ptr += req->nbytes;
 457        }
 458
 459        return cached;
 460}
 461
 462static struct mv_cesa_op_ctx *
 463mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain,
 464                     struct mv_cesa_op_ctx *tmpl, unsigned int frag_len,
 465                     gfp_t flags)
 466{
 467        struct mv_cesa_op_ctx *op;
 468        int ret;
 469
 470        op = mv_cesa_dma_add_op(chain, tmpl, false, flags);
 471        if (IS_ERR(op))
 472                return op;
 473
 474        /* Set the operation block fragment length. */
 475        mv_cesa_set_mac_op_frag_len(op, frag_len);
 476
 477        /* Append dummy desc to launch operation */
 478        ret = mv_cesa_dma_add_dummy_launch(chain, flags);
 479        if (ret)
 480                return ERR_PTR(ret);
 481
 482        if (mv_cesa_mac_op_is_first_frag(tmpl))
 483                mv_cesa_update_op_cfg(tmpl,
 484                                      CESA_SA_DESC_CFG_MID_FRAG,
 485                                      CESA_SA_DESC_CFG_FRAG_MSK);
 486
 487        return op;
 488}
 489
 490static int
 491mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain,
 492                            struct mv_cesa_ahash_req *creq,
 493                            gfp_t flags)
 494{
 495        struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
 496        int ret;
 497
 498        if (!creq->cache_ptr)
 499                return 0;
 500
 501        ret = mv_cesa_ahash_dma_alloc_cache(ahashdreq, flags);
 502        if (ret)
 503                return ret;
 504
 505        memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr);
 506
 507        return mv_cesa_dma_add_data_transfer(chain,
 508                                             CESA_SA_DATA_SRAM_OFFSET,
 509                                             ahashdreq->cache_dma,
 510                                             creq->cache_ptr,
 511                                             CESA_TDMA_DST_IN_SRAM,
 512                                             flags);
 513}
 514
 515static struct mv_cesa_op_ctx *
 516mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
 517                           struct mv_cesa_ahash_dma_iter *dma_iter,
 518                           struct mv_cesa_ahash_req *creq,
 519                           unsigned int frag_len, gfp_t flags)
 520{
 521        struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
 522        unsigned int len, trailerlen, padoff = 0;
 523        struct mv_cesa_op_ctx *op;
 524        int ret;
 525
 526        /*
 527         * If the transfer is smaller than our maximum length, and we have
 528         * some data outstanding, we can ask the engine to finish the hash.
 529         */
 530        if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) {
 531                op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len,
 532                                          flags);
 533                if (IS_ERR(op))
 534                        return op;
 535
 536                mv_cesa_set_mac_op_total_len(op, creq->len);
 537                mv_cesa_update_op_cfg(op, mv_cesa_mac_op_is_first_frag(op) ?
 538                                                CESA_SA_DESC_CFG_NOT_FRAG :
 539                                                CESA_SA_DESC_CFG_LAST_FRAG,
 540                                      CESA_SA_DESC_CFG_FRAG_MSK);
 541
 542                ret = mv_cesa_dma_add_result_op(chain,
 543                                                CESA_SA_CFG_SRAM_OFFSET,
 544                                                CESA_SA_DATA_SRAM_OFFSET,
 545                                                CESA_TDMA_SRC_IN_SRAM, flags);
 546                if (ret)
 547                        return ERR_PTR(-ENOMEM);
 548                return op;
 549        }
 550
 551        /*
 552         * The request is longer than the engine can handle, or we have
 553         * no data outstanding. Manually generate the padding, adding it
 554         * as a "mid" fragment.
 555         */
 556        ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags);
 557        if (ret)
 558                return ERR_PTR(ret);
 559
 560        trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding);
 561
 562        len = min(CESA_SA_SRAM_PAYLOAD_SIZE - frag_len, trailerlen);
 563        if (len) {
 564                ret = mv_cesa_dma_add_data_transfer(chain,
 565                                                CESA_SA_DATA_SRAM_OFFSET +
 566                                                frag_len,
 567                                                ahashdreq->padding_dma,
 568                                                len, CESA_TDMA_DST_IN_SRAM,
 569                                                flags);
 570                if (ret)
 571                        return ERR_PTR(ret);
 572
 573                op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len,
 574                                          flags);
 575                if (IS_ERR(op))
 576                        return op;
 577
 578                if (len == trailerlen)
 579                        return op;
 580
 581                padoff += len;
 582        }
 583
 584        ret = mv_cesa_dma_add_data_transfer(chain,
 585                                            CESA_SA_DATA_SRAM_OFFSET,
 586                                            ahashdreq->padding_dma +
 587                                            padoff,
 588                                            trailerlen - padoff,
 589                                            CESA_TDMA_DST_IN_SRAM,
 590                                            flags);
 591        if (ret)
 592                return ERR_PTR(ret);
 593
 594        return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff,
 595                                    flags);
 596}
 597
 598static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
 599{
 600        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 601        gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
 602                      GFP_KERNEL : GFP_ATOMIC;
 603        struct mv_cesa_req *basereq = &creq->base;
 604        struct mv_cesa_ahash_dma_iter iter;
 605        struct mv_cesa_op_ctx *op = NULL;
 606        unsigned int frag_len;
 607        bool set_state = false;
 608        int ret;
 609        u32 type;
 610
 611        basereq->chain.first = NULL;
 612        basereq->chain.last = NULL;
 613
 614        if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
 615                set_state = true;
 616
 617        if (creq->src_nents) {
 618                ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
 619                                 DMA_TO_DEVICE);
 620                if (!ret) {
 621                        ret = -ENOMEM;
 622                        goto err;
 623                }
 624        }
 625
 626        mv_cesa_tdma_desc_iter_init(&basereq->chain);
 627        mv_cesa_ahash_req_iter_init(&iter, req);
 628
 629        /*
 630         * Add the cache (left-over data from a previous block) first.
 631         * This will never overflow the SRAM size.
 632         */
 633        ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, creq, flags);
 634        if (ret)
 635                goto err_free_tdma;
 636
 637        if (iter.src.sg) {
 638                /*
 639                 * Add all the new data, inserting an operation block and
 640                 * launch command between each full SRAM block-worth of
 641                 * data. We intentionally do not add the final op block.
 642                 */
 643                while (true) {
 644                        ret = mv_cesa_dma_add_op_transfers(&basereq->chain,
 645                                                           &iter.base,
 646                                                           &iter.src, flags);
 647                        if (ret)
 648                                goto err_free_tdma;
 649
 650                        frag_len = iter.base.op_len;
 651
 652                        if (!mv_cesa_ahash_req_iter_next_op(&iter))
 653                                break;
 654
 655                        op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
 656                                                  frag_len, flags);
 657                        if (IS_ERR(op)) {
 658                                ret = PTR_ERR(op);
 659                                goto err_free_tdma;
 660                        }
 661                }
 662        } else {
 663                /* Account for the data that was in the cache. */
 664                frag_len = iter.base.op_len;
 665        }
 666
 667        /*
 668         * At this point, frag_len indicates whether we have any data
 669         * outstanding which needs an operation.  Queue up the final
 670         * operation, which depends whether this is the final request.
 671         */
 672        if (creq->last_req)
 673                op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq,
 674                                                frag_len, flags);
 675        else if (frag_len)
 676                op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
 677                                          frag_len, flags);
 678
 679        if (IS_ERR(op)) {
 680                ret = PTR_ERR(op);
 681                goto err_free_tdma;
 682        }
 683
 684        /*
 685         * If results are copied via DMA, this means that this
 686         * request can be directly processed by the engine,
 687         * without partial updates. So we can chain it at the
 688         * DMA level with other requests.
 689         */
 690        type = basereq->chain.last->flags & CESA_TDMA_TYPE_MSK;
 691
 692        if (op && type != CESA_TDMA_RESULT) {
 693                /* Add dummy desc to wait for crypto operation end */
 694                ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags);
 695                if (ret)
 696                        goto err_free_tdma;
 697        }
 698
 699        if (!creq->last_req)
 700                creq->cache_ptr = req->nbytes + creq->cache_ptr -
 701                                  iter.base.len;
 702        else
 703                creq->cache_ptr = 0;
 704
 705        basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
 706
 707        if (type != CESA_TDMA_RESULT)
 708                basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
 709
 710        if (set_state) {
 711                /*
 712                 * Put the CESA_TDMA_SET_STATE flag on the first tdma desc to
 713                 * let the step logic know that the IVDIG registers should be
 714                 * explicitly set before launching a TDMA chain.
 715                 */
 716                basereq->chain.first->flags |= CESA_TDMA_SET_STATE;
 717        }
 718
 719        return 0;
 720
 721err_free_tdma:
 722        mv_cesa_dma_cleanup(basereq);
 723        dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
 724
 725err:
 726        mv_cesa_ahash_last_cleanup(req);
 727
 728        return ret;
 729}
 730
 731static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
 732{
 733        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 734
 735        creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
 736        if (creq->src_nents < 0) {
 737                dev_err(cesa_dev->dev, "Invalid number of src SG");
 738                return creq->src_nents;
 739        }
 740
 741        *cached = mv_cesa_ahash_cache_req(req);
 742
 743        if (*cached)
 744                return 0;
 745
 746        if (cesa_dev->caps->has_tdma)
 747                return mv_cesa_ahash_dma_req_init(req);
 748        else
 749                return 0;
 750}
 751
 752static int mv_cesa_ahash_queue_req(struct ahash_request *req)
 753{
 754        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 755        struct mv_cesa_engine *engine;
 756        bool cached = false;
 757        int ret;
 758
 759        ret = mv_cesa_ahash_req_init(req, &cached);
 760        if (ret)
 761                return ret;
 762
 763        if (cached)
 764                return 0;
 765
 766        engine = mv_cesa_select_engine(req->nbytes);
 767        mv_cesa_ahash_prepare(&req->base, engine);
 768
 769        ret = mv_cesa_queue_req(&req->base, &creq->base);
 770
 771        if (mv_cesa_req_needs_cleanup(&req->base, ret))
 772                mv_cesa_ahash_cleanup(req);
 773
 774        return ret;
 775}
 776
 777static int mv_cesa_ahash_update(struct ahash_request *req)
 778{
 779        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 780
 781        creq->len += req->nbytes;
 782
 783        return mv_cesa_ahash_queue_req(req);
 784}
 785
 786static int mv_cesa_ahash_final(struct ahash_request *req)
 787{
 788        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 789        struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
 790
 791        mv_cesa_set_mac_op_total_len(tmpl, creq->len);
 792        creq->last_req = true;
 793        req->nbytes = 0;
 794
 795        return mv_cesa_ahash_queue_req(req);
 796}
 797
 798static int mv_cesa_ahash_finup(struct ahash_request *req)
 799{
 800        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 801        struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
 802
 803        creq->len += req->nbytes;
 804        mv_cesa_set_mac_op_total_len(tmpl, creq->len);
 805        creq->last_req = true;
 806
 807        return mv_cesa_ahash_queue_req(req);
 808}
 809
 810static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
 811                                u64 *len, void *cache)
 812{
 813        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
 814        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 815        unsigned int digsize = crypto_ahash_digestsize(ahash);
 816        unsigned int blocksize;
 817
 818        blocksize = crypto_ahash_blocksize(ahash);
 819
 820        *len = creq->len;
 821        memcpy(hash, creq->state, digsize);
 822        memset(cache, 0, blocksize);
 823        memcpy(cache, creq->cache, creq->cache_ptr);
 824
 825        return 0;
 826}
 827
 828static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash,
 829                                u64 len, const void *cache)
 830{
 831        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
 832        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 833        unsigned int digsize = crypto_ahash_digestsize(ahash);
 834        unsigned int blocksize;
 835        unsigned int cache_ptr;
 836        int ret;
 837
 838        ret = crypto_ahash_init(req);
 839        if (ret)
 840                return ret;
 841
 842        blocksize = crypto_ahash_blocksize(ahash);
 843        if (len >= blocksize)
 844                mv_cesa_update_op_cfg(&creq->op_tmpl,
 845                                      CESA_SA_DESC_CFG_MID_FRAG,
 846                                      CESA_SA_DESC_CFG_FRAG_MSK);
 847
 848        creq->len = len;
 849        memcpy(creq->state, hash, digsize);
 850        creq->cache_ptr = 0;
 851
 852        cache_ptr = do_div(len, blocksize);
 853        if (!cache_ptr)
 854                return 0;
 855
 856        memcpy(creq->cache, cache, cache_ptr);
 857        creq->cache_ptr = cache_ptr;
 858
 859        return 0;
 860}
 861
 862static int mv_cesa_md5_init(struct ahash_request *req)
 863{
 864        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 865        struct mv_cesa_op_ctx tmpl = { };
 866
 867        mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5);
 868
 869        mv_cesa_ahash_init(req, &tmpl, true);
 870
 871        creq->state[0] = MD5_H0;
 872        creq->state[1] = MD5_H1;
 873        creq->state[2] = MD5_H2;
 874        creq->state[3] = MD5_H3;
 875
 876        return 0;
 877}
 878
 879static int mv_cesa_md5_export(struct ahash_request *req, void *out)
 880{
 881        struct md5_state *out_state = out;
 882
 883        return mv_cesa_ahash_export(req, out_state->hash,
 884                                    &out_state->byte_count, out_state->block);
 885}
 886
 887static int mv_cesa_md5_import(struct ahash_request *req, const void *in)
 888{
 889        const struct md5_state *in_state = in;
 890
 891        return mv_cesa_ahash_import(req, in_state->hash, in_state->byte_count,
 892                                    in_state->block);
 893}
 894
 895static int mv_cesa_md5_digest(struct ahash_request *req)
 896{
 897        int ret;
 898
 899        ret = mv_cesa_md5_init(req);
 900        if (ret)
 901                return ret;
 902
 903        return mv_cesa_ahash_finup(req);
 904}
 905
 906struct ahash_alg mv_md5_alg = {
 907        .init = mv_cesa_md5_init,
 908        .update = mv_cesa_ahash_update,
 909        .final = mv_cesa_ahash_final,
 910        .finup = mv_cesa_ahash_finup,
 911        .digest = mv_cesa_md5_digest,
 912        .export = mv_cesa_md5_export,
 913        .import = mv_cesa_md5_import,
 914        .halg = {
 915                .digestsize = MD5_DIGEST_SIZE,
 916                .statesize = sizeof(struct md5_state),
 917                .base = {
 918                        .cra_name = "md5",
 919                        .cra_driver_name = "mv-md5",
 920                        .cra_priority = 300,
 921                        .cra_flags = CRYPTO_ALG_ASYNC |
 922                                     CRYPTO_ALG_KERN_DRIVER_ONLY,
 923                        .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
 924                        .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
 925                        .cra_init = mv_cesa_ahash_cra_init,
 926                        .cra_module = THIS_MODULE,
 927                 }
 928        }
 929};
 930
 931static int mv_cesa_sha1_init(struct ahash_request *req)
 932{
 933        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 934        struct mv_cesa_op_ctx tmpl = { };
 935
 936        mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1);
 937
 938        mv_cesa_ahash_init(req, &tmpl, false);
 939
 940        creq->state[0] = SHA1_H0;
 941        creq->state[1] = SHA1_H1;
 942        creq->state[2] = SHA1_H2;
 943        creq->state[3] = SHA1_H3;
 944        creq->state[4] = SHA1_H4;
 945
 946        return 0;
 947}
 948
 949static int mv_cesa_sha1_export(struct ahash_request *req, void *out)
 950{
 951        struct sha1_state *out_state = out;
 952
 953        return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
 954                                    out_state->buffer);
 955}
 956
 957static int mv_cesa_sha1_import(struct ahash_request *req, const void *in)
 958{
 959        const struct sha1_state *in_state = in;
 960
 961        return mv_cesa_ahash_import(req, in_state->state, in_state->count,
 962                                    in_state->buffer);
 963}
 964
 965static int mv_cesa_sha1_digest(struct ahash_request *req)
 966{
 967        int ret;
 968
 969        ret = mv_cesa_sha1_init(req);
 970        if (ret)
 971                return ret;
 972
 973        return mv_cesa_ahash_finup(req);
 974}
 975
 976struct ahash_alg mv_sha1_alg = {
 977        .init = mv_cesa_sha1_init,
 978        .update = mv_cesa_ahash_update,
 979        .final = mv_cesa_ahash_final,
 980        .finup = mv_cesa_ahash_finup,
 981        .digest = mv_cesa_sha1_digest,
 982        .export = mv_cesa_sha1_export,
 983        .import = mv_cesa_sha1_import,
 984        .halg = {
 985                .digestsize = SHA1_DIGEST_SIZE,
 986                .statesize = sizeof(struct sha1_state),
 987                .base = {
 988                        .cra_name = "sha1",
 989                        .cra_driver_name = "mv-sha1",
 990                        .cra_priority = 300,
 991                        .cra_flags = CRYPTO_ALG_ASYNC |
 992                                     CRYPTO_ALG_KERN_DRIVER_ONLY,
 993                        .cra_blocksize = SHA1_BLOCK_SIZE,
 994                        .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
 995                        .cra_init = mv_cesa_ahash_cra_init,
 996                        .cra_module = THIS_MODULE,
 997                 }
 998        }
 999};
1000
1001static int mv_cesa_sha256_init(struct ahash_request *req)
1002{
1003        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
1004        struct mv_cesa_op_ctx tmpl = { };
1005
1006        mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256);
1007
1008        mv_cesa_ahash_init(req, &tmpl, false);
1009
1010        creq->state[0] = SHA256_H0;
1011        creq->state[1] = SHA256_H1;
1012        creq->state[2] = SHA256_H2;
1013        creq->state[3] = SHA256_H3;
1014        creq->state[4] = SHA256_H4;
1015        creq->state[5] = SHA256_H5;
1016        creq->state[6] = SHA256_H6;
1017        creq->state[7] = SHA256_H7;
1018
1019        return 0;
1020}
1021
1022static int mv_cesa_sha256_digest(struct ahash_request *req)
1023{
1024        int ret;
1025
1026        ret = mv_cesa_sha256_init(req);
1027        if (ret)
1028                return ret;
1029
1030        return mv_cesa_ahash_finup(req);
1031}
1032
1033static int mv_cesa_sha256_export(struct ahash_request *req, void *out)
1034{
1035        struct sha256_state *out_state = out;
1036
1037        return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
1038                                    out_state->buf);
1039}
1040
1041static int mv_cesa_sha256_import(struct ahash_request *req, const void *in)
1042{
1043        const struct sha256_state *in_state = in;
1044
1045        return mv_cesa_ahash_import(req, in_state->state, in_state->count,
1046                                    in_state->buf);
1047}
1048
1049struct ahash_alg mv_sha256_alg = {
1050        .init = mv_cesa_sha256_init,
1051        .update = mv_cesa_ahash_update,
1052        .final = mv_cesa_ahash_final,
1053        .finup = mv_cesa_ahash_finup,
1054        .digest = mv_cesa_sha256_digest,
1055        .export = mv_cesa_sha256_export,
1056        .import = mv_cesa_sha256_import,
1057        .halg = {
1058                .digestsize = SHA256_DIGEST_SIZE,
1059                .statesize = sizeof(struct sha256_state),
1060                .base = {
1061                        .cra_name = "sha256",
1062                        .cra_driver_name = "mv-sha256",
1063                        .cra_priority = 300,
1064                        .cra_flags = CRYPTO_ALG_ASYNC |
1065                                     CRYPTO_ALG_KERN_DRIVER_ONLY,
1066                        .cra_blocksize = SHA256_BLOCK_SIZE,
1067                        .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
1068                        .cra_init = mv_cesa_ahash_cra_init,
1069                        .cra_module = THIS_MODULE,
1070                 }
1071        }
1072};
1073
1074struct mv_cesa_ahash_result {
1075        struct completion completion;
1076        int error;
1077};
1078
1079static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req,
1080                                        int error)
1081{
1082        struct mv_cesa_ahash_result *result = req->data;
1083
1084        if (error == -EINPROGRESS)
1085                return;
1086
1087        result->error = error;
1088        complete(&result->completion);
1089}
1090
1091static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad,
1092                                       void *state, unsigned int blocksize)
1093{
1094        struct mv_cesa_ahash_result result;
1095        struct scatterlist sg;
1096        int ret;
1097
1098        ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1099                                   mv_cesa_hmac_ahash_complete, &result);
1100        sg_init_one(&sg, pad, blocksize);
1101        ahash_request_set_crypt(req, &sg, pad, blocksize);
1102        init_completion(&result.completion);
1103
1104        ret = crypto_ahash_init(req);
1105        if (ret)
1106                return ret;
1107
1108        ret = crypto_ahash_update(req);
1109        if (ret && ret != -EINPROGRESS)
1110                return ret;
1111
1112        wait_for_completion_interruptible(&result.completion);
1113        if (result.error)
1114                return result.error;
1115
1116        ret = crypto_ahash_export(req, state);
1117        if (ret)
1118                return ret;
1119
1120        return 0;
1121}
1122
1123static int mv_cesa_ahmac_pad_init(struct ahash_request *req,
1124                                  const u8 *key, unsigned int keylen,
1125                                  u8 *ipad, u8 *opad,
1126                                  unsigned int blocksize)
1127{
1128        struct mv_cesa_ahash_result result;
1129        struct scatterlist sg;
1130        int ret;
1131        int i;
1132
1133        if (keylen <= blocksize) {
1134                memcpy(ipad, key, keylen);
1135        } else {
1136                u8 *keydup = kmemdup(key, keylen, GFP_KERNEL);
1137
1138                if (!keydup)
1139                        return -ENOMEM;
1140
1141                ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1142                                           mv_cesa_hmac_ahash_complete,
1143                                           &result);
1144                sg_init_one(&sg, keydup, keylen);
1145                ahash_request_set_crypt(req, &sg, ipad, keylen);
1146                init_completion(&result.completion);
1147
1148                ret = crypto_ahash_digest(req);
1149                if (ret == -EINPROGRESS) {
1150                        wait_for_completion_interruptible(&result.completion);
1151                        ret = result.error;
1152                }
1153
1154                /* Set the memory region to 0 to avoid any leak. */
1155                memset(keydup, 0, keylen);
1156                kfree(keydup);
1157
1158                if (ret)
1159                        return ret;
1160
1161                keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
1162        }
1163
1164        memset(ipad + keylen, 0, blocksize - keylen);
1165        memcpy(opad, ipad, blocksize);
1166
1167        for (i = 0; i < blocksize; i++) {
1168                ipad[i] ^= HMAC_IPAD_VALUE;
1169                opad[i] ^= HMAC_OPAD_VALUE;
1170        }
1171
1172        return 0;
1173}
1174
1175static int mv_cesa_ahmac_setkey(const char *hash_alg_name,
1176                                const u8 *key, unsigned int keylen,
1177                                void *istate, void *ostate)
1178{
1179        struct ahash_request *req;
1180        struct crypto_ahash *tfm;
1181        unsigned int blocksize;
1182        u8 *ipad = NULL;
1183        u8 *opad;
1184        int ret;
1185
1186        tfm = crypto_alloc_ahash(hash_alg_name, 0, 0);
1187        if (IS_ERR(tfm))
1188                return PTR_ERR(tfm);
1189
1190        req = ahash_request_alloc(tfm, GFP_KERNEL);
1191        if (!req) {
1192                ret = -ENOMEM;
1193                goto free_ahash;
1194        }
1195
1196        crypto_ahash_clear_flags(tfm, ~0);
1197
1198        blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1199
1200        ipad = kcalloc(2, blocksize, GFP_KERNEL);
1201        if (!ipad) {
1202                ret = -ENOMEM;
1203                goto free_req;
1204        }
1205
1206        opad = ipad + blocksize;
1207
1208        ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize);
1209        if (ret)
1210                goto free_ipad;
1211
1212        ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize);
1213        if (ret)
1214                goto free_ipad;
1215
1216        ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize);
1217
1218free_ipad:
1219        kfree(ipad);
1220free_req:
1221        ahash_request_free(req);
1222free_ahash:
1223        crypto_free_ahash(tfm);
1224
1225        return ret;
1226}
1227
1228static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm)
1229{
1230        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm);
1231
1232        ctx->base.ops = &mv_cesa_ahash_req_ops;
1233
1234        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1235                                 sizeof(struct mv_cesa_ahash_req));
1236        return 0;
1237}
1238
1239static int mv_cesa_ahmac_md5_init(struct ahash_request *req)
1240{
1241        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1242        struct mv_cesa_op_ctx tmpl = { };
1243
1244        mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5);
1245        memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1246
1247        mv_cesa_ahash_init(req, &tmpl, true);
1248
1249        return 0;
1250}
1251
1252static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
1253                                    unsigned int keylen)
1254{
1255        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1256        struct md5_state istate, ostate;
1257        int ret, i;
1258
1259        ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate);
1260        if (ret)
1261                return ret;
1262
1263        for (i = 0; i < ARRAY_SIZE(istate.hash); i++)
1264                ctx->iv[i] = be32_to_cpu(istate.hash[i]);
1265
1266        for (i = 0; i < ARRAY_SIZE(ostate.hash); i++)
1267                ctx->iv[i + 8] = be32_to_cpu(ostate.hash[i]);
1268
1269        return 0;
1270}
1271
1272static int mv_cesa_ahmac_md5_digest(struct ahash_request *req)
1273{
1274        int ret;
1275
1276        ret = mv_cesa_ahmac_md5_init(req);
1277        if (ret)
1278                return ret;
1279
1280        return mv_cesa_ahash_finup(req);
1281}
1282
1283struct ahash_alg mv_ahmac_md5_alg = {
1284        .init = mv_cesa_ahmac_md5_init,
1285        .update = mv_cesa_ahash_update,
1286        .final = mv_cesa_ahash_final,
1287        .finup = mv_cesa_ahash_finup,
1288        .digest = mv_cesa_ahmac_md5_digest,
1289        .setkey = mv_cesa_ahmac_md5_setkey,
1290        .export = mv_cesa_md5_export,
1291        .import = mv_cesa_md5_import,
1292        .halg = {
1293                .digestsize = MD5_DIGEST_SIZE,
1294                .statesize = sizeof(struct md5_state),
1295                .base = {
1296                        .cra_name = "hmac(md5)",
1297                        .cra_driver_name = "mv-hmac-md5",
1298                        .cra_priority = 300,
1299                        .cra_flags = CRYPTO_ALG_ASYNC |
1300                                     CRYPTO_ALG_KERN_DRIVER_ONLY,
1301                        .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1302                        .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1303                        .cra_init = mv_cesa_ahmac_cra_init,
1304                        .cra_module = THIS_MODULE,
1305                 }
1306        }
1307};
1308
1309static int mv_cesa_ahmac_sha1_init(struct ahash_request *req)
1310{
1311        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1312        struct mv_cesa_op_ctx tmpl = { };
1313
1314        mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1);
1315        memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1316
1317        mv_cesa_ahash_init(req, &tmpl, false);
1318
1319        return 0;
1320}
1321
1322static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
1323                                     unsigned int keylen)
1324{
1325        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1326        struct sha1_state istate, ostate;
1327        int ret, i;
1328
1329        ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate);
1330        if (ret)
1331                return ret;
1332
1333        for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1334                ctx->iv[i] = be32_to_cpu(istate.state[i]);
1335
1336        for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1337                ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]);
1338
1339        return 0;
1340}
1341
1342static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req)
1343{
1344        int ret;
1345
1346        ret = mv_cesa_ahmac_sha1_init(req);
1347        if (ret)
1348                return ret;
1349
1350        return mv_cesa_ahash_finup(req);
1351}
1352
1353struct ahash_alg mv_ahmac_sha1_alg = {
1354        .init = mv_cesa_ahmac_sha1_init,
1355        .update = mv_cesa_ahash_update,
1356        .final = mv_cesa_ahash_final,
1357        .finup = mv_cesa_ahash_finup,
1358        .digest = mv_cesa_ahmac_sha1_digest,
1359        .setkey = mv_cesa_ahmac_sha1_setkey,
1360        .export = mv_cesa_sha1_export,
1361        .import = mv_cesa_sha1_import,
1362        .halg = {
1363                .digestsize = SHA1_DIGEST_SIZE,
1364                .statesize = sizeof(struct sha1_state),
1365                .base = {
1366                        .cra_name = "hmac(sha1)",
1367                        .cra_driver_name = "mv-hmac-sha1",
1368                        .cra_priority = 300,
1369                        .cra_flags = CRYPTO_ALG_ASYNC |
1370                                     CRYPTO_ALG_KERN_DRIVER_ONLY,
1371                        .cra_blocksize = SHA1_BLOCK_SIZE,
1372                        .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1373                        .cra_init = mv_cesa_ahmac_cra_init,
1374                        .cra_module = THIS_MODULE,
1375                 }
1376        }
1377};
1378
1379static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
1380                                       unsigned int keylen)
1381{
1382        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1383        struct sha256_state istate, ostate;
1384        int ret, i;
1385
1386        ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate);
1387        if (ret)
1388                return ret;
1389
1390        for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1391                ctx->iv[i] = be32_to_cpu(istate.state[i]);
1392
1393        for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1394                ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]);
1395
1396        return 0;
1397}
1398
1399static int mv_cesa_ahmac_sha256_init(struct ahash_request *req)
1400{
1401        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1402        struct mv_cesa_op_ctx tmpl = { };
1403
1404        mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256);
1405        memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1406
1407        mv_cesa_ahash_init(req, &tmpl, false);
1408
1409        return 0;
1410}
1411
1412static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req)
1413{
1414        int ret;
1415
1416        ret = mv_cesa_ahmac_sha256_init(req);
1417        if (ret)
1418                return ret;
1419
1420        return mv_cesa_ahash_finup(req);
1421}
1422
1423struct ahash_alg mv_ahmac_sha256_alg = {
1424        .init = mv_cesa_ahmac_sha256_init,
1425        .update = mv_cesa_ahash_update,
1426        .final = mv_cesa_ahash_final,
1427        .finup = mv_cesa_ahash_finup,
1428        .digest = mv_cesa_ahmac_sha256_digest,
1429        .setkey = mv_cesa_ahmac_sha256_setkey,
1430        .export = mv_cesa_sha256_export,
1431        .import = mv_cesa_sha256_import,
1432        .halg = {
1433                .digestsize = SHA256_DIGEST_SIZE,
1434                .statesize = sizeof(struct sha256_state),
1435                .base = {
1436                        .cra_name = "hmac(sha256)",
1437                        .cra_driver_name = "mv-hmac-sha256",
1438                        .cra_priority = 300,
1439                        .cra_flags = CRYPTO_ALG_ASYNC |
1440                                     CRYPTO_ALG_KERN_DRIVER_ONLY,
1441                        .cra_blocksize = SHA256_BLOCK_SIZE,
1442                        .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1443                        .cra_init = mv_cesa_ahmac_cra_init,
1444                        .cra_module = THIS_MODULE,
1445                 }
1446        }
1447};
1448