linux/drivers/crypto/marvell/cesa/hash.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256.
   4 *
   5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
   6 * Author: Arnaud Ebalard <arno@natisbad.org>
   7 *
   8 * This work is based on an initial version written by
   9 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
  10 */
  11
  12#include <crypto/hmac.h>
  13#include <crypto/md5.h>
  14#include <crypto/sha1.h>
  15#include <crypto/sha2.h>
  16#include <linux/device.h>
  17#include <linux/dma-mapping.h>
  18
  19#include "cesa.h"
  20
  21struct mv_cesa_ahash_dma_iter {
  22        struct mv_cesa_dma_iter base;
  23        struct mv_cesa_sg_dma_iter src;
  24};
  25
  26static inline void
  27mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter,
  28                            struct ahash_request *req)
  29{
  30        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
  31        unsigned int len = req->nbytes + creq->cache_ptr;
  32
  33        if (!creq->last_req)
  34                len &= ~CESA_HASH_BLOCK_SIZE_MSK;
  35
  36        mv_cesa_req_dma_iter_init(&iter->base, len);
  37        mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
  38        iter->src.op_offset = creq->cache_ptr;
  39}
  40
  41static inline bool
  42mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter)
  43{
  44        iter->src.op_offset = 0;
  45
  46        return mv_cesa_req_dma_iter_next_op(&iter->base);
  47}
  48
  49static inline int
  50mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags)
  51{
  52        req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags,
  53                                    &req->cache_dma);
  54        if (!req->cache)
  55                return -ENOMEM;
  56
  57        return 0;
  58}
  59
  60static inline void
  61mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req)
  62{
  63        if (!req->cache)
  64                return;
  65
  66        dma_pool_free(cesa_dev->dma->cache_pool, req->cache,
  67                      req->cache_dma);
  68}
  69
  70static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req,
  71                                           gfp_t flags)
  72{
  73        if (req->padding)
  74                return 0;
  75
  76        req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags,
  77                                      &req->padding_dma);
  78        if (!req->padding)
  79                return -ENOMEM;
  80
  81        return 0;
  82}
  83
  84static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req)
  85{
  86        if (!req->padding)
  87                return;
  88
  89        dma_pool_free(cesa_dev->dma->padding_pool, req->padding,
  90                      req->padding_dma);
  91        req->padding = NULL;
  92}
  93
  94static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req)
  95{
  96        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
  97
  98        mv_cesa_ahash_dma_free_padding(&creq->req.dma);
  99}
 100
 101static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
 102{
 103        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 104
 105        dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
 106        mv_cesa_ahash_dma_free_cache(&creq->req.dma);
 107        mv_cesa_dma_cleanup(&creq->base);
 108}
 109
 110static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
 111{
 112        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 113
 114        if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
 115                mv_cesa_ahash_dma_cleanup(req);
 116}
 117
 118static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
 119{
 120        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 121
 122        if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
 123                mv_cesa_ahash_dma_last_cleanup(req);
 124}
 125
 126static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
 127{
 128        unsigned int index, padlen;
 129
 130        index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
 131        padlen = (index < 56) ? (56 - index) : (64 + 56 - index);
 132
 133        return padlen;
 134}
 135
 136static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
 137{
 138        unsigned int padlen;
 139
 140        buf[0] = 0x80;
 141        /* Pad out to 56 mod 64 */
 142        padlen = mv_cesa_ahash_pad_len(creq);
 143        memset(buf + 1, 0, padlen - 1);
 144
 145        if (creq->algo_le) {
 146                __le64 bits = cpu_to_le64(creq->len << 3);
 147
 148                memcpy(buf + padlen, &bits, sizeof(bits));
 149        } else {
 150                __be64 bits = cpu_to_be64(creq->len << 3);
 151
 152                memcpy(buf + padlen, &bits, sizeof(bits));
 153        }
 154
 155        return padlen + 8;
 156}
 157
 158static void mv_cesa_ahash_std_step(struct ahash_request *req)
 159{
 160        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 161        struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
 162        struct mv_cesa_engine *engine = creq->base.engine;
 163        struct mv_cesa_op_ctx *op;
 164        unsigned int new_cache_ptr = 0;
 165        u32 frag_mode;
 166        size_t  len;
 167        unsigned int digsize;
 168        int i;
 169
 170        mv_cesa_adjust_op(engine, &creq->op_tmpl);
 171        if (engine->pool)
 172                memcpy(engine->sram_pool, &creq->op_tmpl,
 173                       sizeof(creq->op_tmpl));
 174        else
 175                memcpy_toio(engine->sram, &creq->op_tmpl,
 176                            sizeof(creq->op_tmpl));
 177
 178        if (!sreq->offset) {
 179                digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
 180                for (i = 0; i < digsize / 4; i++)
 181                        writel_relaxed(creq->state[i],
 182                                       engine->regs + CESA_IVDIG(i));
 183        }
 184
 185        if (creq->cache_ptr) {
 186                if (engine->pool)
 187                        memcpy(engine->sram_pool + CESA_SA_DATA_SRAM_OFFSET,
 188                               creq->cache, creq->cache_ptr);
 189                else
 190                        memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
 191                                    creq->cache, creq->cache_ptr);
 192        }
 193
 194        len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
 195                    CESA_SA_SRAM_PAYLOAD_SIZE);
 196
 197        if (!creq->last_req) {
 198                new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK;
 199                len &= ~CESA_HASH_BLOCK_SIZE_MSK;
 200        }
 201
 202        if (len - creq->cache_ptr)
 203                sreq->offset += mv_cesa_sg_copy_to_sram(
 204                        engine, req->src, creq->src_nents,
 205                        CESA_SA_DATA_SRAM_OFFSET + creq->cache_ptr,
 206                        len - creq->cache_ptr, sreq->offset);
 207
 208        op = &creq->op_tmpl;
 209
 210        frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK;
 211
 212        if (creq->last_req && sreq->offset == req->nbytes &&
 213            creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
 214                if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
 215                        frag_mode = CESA_SA_DESC_CFG_NOT_FRAG;
 216                else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG)
 217                        frag_mode = CESA_SA_DESC_CFG_LAST_FRAG;
 218        }
 219
 220        if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG ||
 221            frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) {
 222                if (len &&
 223                    creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
 224                        mv_cesa_set_mac_op_total_len(op, creq->len);
 225                } else {
 226                        int trailerlen = mv_cesa_ahash_pad_len(creq) + 8;
 227
 228                        if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) {
 229                                len &= CESA_HASH_BLOCK_SIZE_MSK;
 230                                new_cache_ptr = 64 - trailerlen;
 231                                if (engine->pool)
 232                                        memcpy(creq->cache,
 233                                               engine->sram_pool +
 234                                               CESA_SA_DATA_SRAM_OFFSET + len,
 235                                               new_cache_ptr);
 236                                else
 237                                        memcpy_fromio(creq->cache,
 238                                                      engine->sram +
 239                                                      CESA_SA_DATA_SRAM_OFFSET +
 240                                                      len,
 241                                                      new_cache_ptr);
 242                        } else {
 243                                i = mv_cesa_ahash_pad_req(creq, creq->cache);
 244                                len += i;
 245                                if (engine->pool)
 246                                        memcpy(engine->sram_pool + len +
 247                                               CESA_SA_DATA_SRAM_OFFSET,
 248                                               creq->cache, i);
 249                                else
 250                                        memcpy_toio(engine->sram + len +
 251                                                    CESA_SA_DATA_SRAM_OFFSET,
 252                                                    creq->cache, i);
 253                        }
 254
 255                        if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG)
 256                                frag_mode = CESA_SA_DESC_CFG_MID_FRAG;
 257                        else
 258                                frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG;
 259                }
 260        }
 261
 262        mv_cesa_set_mac_op_frag_len(op, len);
 263        mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK);
 264
 265        /* FIXME: only update enc_len field */
 266        if (engine->pool)
 267                memcpy(engine->sram_pool, op, sizeof(*op));
 268        else
 269                memcpy_toio(engine->sram, op, sizeof(*op));
 270
 271        if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
 272                mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
 273                                      CESA_SA_DESC_CFG_FRAG_MSK);
 274
 275        creq->cache_ptr = new_cache_ptr;
 276
 277        mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
 278        writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
 279        WARN_ON(readl(engine->regs + CESA_SA_CMD) &
 280                CESA_SA_CMD_EN_CESA_SA_ACCL0);
 281        writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
 282}
 283
 284static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status)
 285{
 286        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 287        struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
 288
 289        if (sreq->offset < (req->nbytes - creq->cache_ptr))
 290                return -EINPROGRESS;
 291
 292        return 0;
 293}
 294
 295static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req)
 296{
 297        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 298        struct mv_cesa_req *basereq = &creq->base;
 299
 300        mv_cesa_dma_prepare(basereq, basereq->engine);
 301}
 302
 303static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
 304{
 305        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 306        struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
 307
 308        sreq->offset = 0;
 309}
 310
 311static void mv_cesa_ahash_dma_step(struct ahash_request *req)
 312{
 313        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 314        struct mv_cesa_req *base = &creq->base;
 315
 316        /* We must explicitly set the digest state. */
 317        if (base->chain.first->flags & CESA_TDMA_SET_STATE) {
 318                struct mv_cesa_engine *engine = base->engine;
 319                int i;
 320
 321                /* Set the hash state in the IVDIG regs. */
 322                for (i = 0; i < ARRAY_SIZE(creq->state); i++)
 323                        writel_relaxed(creq->state[i], engine->regs +
 324                                       CESA_IVDIG(i));
 325        }
 326
 327        mv_cesa_dma_step(base);
 328}
 329
 330static void mv_cesa_ahash_step(struct crypto_async_request *req)
 331{
 332        struct ahash_request *ahashreq = ahash_request_cast(req);
 333        struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
 334
 335        if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
 336                mv_cesa_ahash_dma_step(ahashreq);
 337        else
 338                mv_cesa_ahash_std_step(ahashreq);
 339}
 340
 341static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
 342{
 343        struct ahash_request *ahashreq = ahash_request_cast(req);
 344        struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
 345
 346        if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
 347                return mv_cesa_dma_process(&creq->base, status);
 348
 349        return mv_cesa_ahash_std_process(ahashreq, status);
 350}
 351
 352static void mv_cesa_ahash_complete(struct crypto_async_request *req)
 353{
 354        struct ahash_request *ahashreq = ahash_request_cast(req);
 355        struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
 356        struct mv_cesa_engine *engine = creq->base.engine;
 357        unsigned int digsize;
 358        int i;
 359
 360        digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
 361
 362        if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
 363            (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) ==
 364             CESA_TDMA_RESULT) {
 365                __le32 *data = NULL;
 366
 367                /*
 368                 * Result is already in the correct endianness when the SA is
 369                 * used
 370                 */
 371                data = creq->base.chain.last->op->ctx.hash.hash;
 372                for (i = 0; i < digsize / 4; i++)
 373                        creq->state[i] = le32_to_cpu(data[i]);
 374
 375                memcpy(ahashreq->result, data, digsize);
 376        } else {
 377                for (i = 0; i < digsize / 4; i++)
 378                        creq->state[i] = readl_relaxed(engine->regs +
 379                                                       CESA_IVDIG(i));
 380                if (creq->last_req) {
 381                        /*
 382                         * Hardware's MD5 digest is in little endian format, but
 383                         * SHA in big endian format
 384                         */
 385                        if (creq->algo_le) {
 386                                __le32 *result = (void *)ahashreq->result;
 387
 388                                for (i = 0; i < digsize / 4; i++)
 389                                        result[i] = cpu_to_le32(creq->state[i]);
 390                        } else {
 391                                __be32 *result = (void *)ahashreq->result;
 392
 393                                for (i = 0; i < digsize / 4; i++)
 394                                        result[i] = cpu_to_be32(creq->state[i]);
 395                        }
 396                }
 397        }
 398
 399        atomic_sub(ahashreq->nbytes, &engine->load);
 400}
 401
 402static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
 403                                  struct mv_cesa_engine *engine)
 404{
 405        struct ahash_request *ahashreq = ahash_request_cast(req);
 406        struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
 407
 408        creq->base.engine = engine;
 409
 410        if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
 411                mv_cesa_ahash_dma_prepare(ahashreq);
 412        else
 413                mv_cesa_ahash_std_prepare(ahashreq);
 414}
 415
 416static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
 417{
 418        struct ahash_request *ahashreq = ahash_request_cast(req);
 419        struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
 420
 421        if (creq->last_req)
 422                mv_cesa_ahash_last_cleanup(ahashreq);
 423
 424        mv_cesa_ahash_cleanup(ahashreq);
 425
 426        if (creq->cache_ptr)
 427                sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
 428                                   creq->cache,
 429                                   creq->cache_ptr,
 430                                   ahashreq->nbytes - creq->cache_ptr);
 431}
 432
 433static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {
 434        .step = mv_cesa_ahash_step,
 435        .process = mv_cesa_ahash_process,
 436        .cleanup = mv_cesa_ahash_req_cleanup,
 437        .complete = mv_cesa_ahash_complete,
 438};
 439
 440static void mv_cesa_ahash_init(struct ahash_request *req,
 441                              struct mv_cesa_op_ctx *tmpl, bool algo_le)
 442{
 443        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 444
 445        memset(creq, 0, sizeof(*creq));
 446        mv_cesa_update_op_cfg(tmpl,
 447                              CESA_SA_DESC_CFG_OP_MAC_ONLY |
 448                              CESA_SA_DESC_CFG_FIRST_FRAG,
 449                              CESA_SA_DESC_CFG_OP_MSK |
 450                              CESA_SA_DESC_CFG_FRAG_MSK);
 451        mv_cesa_set_mac_op_total_len(tmpl, 0);
 452        mv_cesa_set_mac_op_frag_len(tmpl, 0);
 453        creq->op_tmpl = *tmpl;
 454        creq->len = 0;
 455        creq->algo_le = algo_le;
 456}
 457
 458static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm)
 459{
 460        struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 461
 462        ctx->base.ops = &mv_cesa_ahash_req_ops;
 463
 464        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 465                                 sizeof(struct mv_cesa_ahash_req));
 466        return 0;
 467}
 468
 469static bool mv_cesa_ahash_cache_req(struct ahash_request *req)
 470{
 471        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 472        bool cached = false;
 473
 474        if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE &&
 475            !creq->last_req) {
 476                cached = true;
 477
 478                if (!req->nbytes)
 479                        return cached;
 480
 481                sg_pcopy_to_buffer(req->src, creq->src_nents,
 482                                   creq->cache + creq->cache_ptr,
 483                                   req->nbytes, 0);
 484
 485                creq->cache_ptr += req->nbytes;
 486        }
 487
 488        return cached;
 489}
 490
 491static struct mv_cesa_op_ctx *
 492mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain,
 493                     struct mv_cesa_op_ctx *tmpl, unsigned int frag_len,
 494                     gfp_t flags)
 495{
 496        struct mv_cesa_op_ctx *op;
 497        int ret;
 498
 499        op = mv_cesa_dma_add_op(chain, tmpl, false, flags);
 500        if (IS_ERR(op))
 501                return op;
 502
 503        /* Set the operation block fragment length. */
 504        mv_cesa_set_mac_op_frag_len(op, frag_len);
 505
 506        /* Append dummy desc to launch operation */
 507        ret = mv_cesa_dma_add_dummy_launch(chain, flags);
 508        if (ret)
 509                return ERR_PTR(ret);
 510
 511        if (mv_cesa_mac_op_is_first_frag(tmpl))
 512                mv_cesa_update_op_cfg(tmpl,
 513                                      CESA_SA_DESC_CFG_MID_FRAG,
 514                                      CESA_SA_DESC_CFG_FRAG_MSK);
 515
 516        return op;
 517}
 518
 519static int
 520mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain,
 521                            struct mv_cesa_ahash_req *creq,
 522                            gfp_t flags)
 523{
 524        struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
 525        int ret;
 526
 527        if (!creq->cache_ptr)
 528                return 0;
 529
 530        ret = mv_cesa_ahash_dma_alloc_cache(ahashdreq, flags);
 531        if (ret)
 532                return ret;
 533
 534        memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr);
 535
 536        return mv_cesa_dma_add_data_transfer(chain,
 537                                             CESA_SA_DATA_SRAM_OFFSET,
 538                                             ahashdreq->cache_dma,
 539                                             creq->cache_ptr,
 540                                             CESA_TDMA_DST_IN_SRAM,
 541                                             flags);
 542}
 543
 544static struct mv_cesa_op_ctx *
 545mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
 546                           struct mv_cesa_ahash_dma_iter *dma_iter,
 547                           struct mv_cesa_ahash_req *creq,
 548                           unsigned int frag_len, gfp_t flags)
 549{
 550        struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
 551        unsigned int len, trailerlen, padoff = 0;
 552        struct mv_cesa_op_ctx *op;
 553        int ret;
 554
 555        /*
 556         * If the transfer is smaller than our maximum length, and we have
 557         * some data outstanding, we can ask the engine to finish the hash.
 558         */
 559        if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) {
 560                op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len,
 561                                          flags);
 562                if (IS_ERR(op))
 563                        return op;
 564
 565                mv_cesa_set_mac_op_total_len(op, creq->len);
 566                mv_cesa_update_op_cfg(op, mv_cesa_mac_op_is_first_frag(op) ?
 567                                                CESA_SA_DESC_CFG_NOT_FRAG :
 568                                                CESA_SA_DESC_CFG_LAST_FRAG,
 569                                      CESA_SA_DESC_CFG_FRAG_MSK);
 570
 571                ret = mv_cesa_dma_add_result_op(chain,
 572                                                CESA_SA_CFG_SRAM_OFFSET,
 573                                                CESA_SA_DATA_SRAM_OFFSET,
 574                                                CESA_TDMA_SRC_IN_SRAM, flags);
 575                if (ret)
 576                        return ERR_PTR(-ENOMEM);
 577                return op;
 578        }
 579
 580        /*
 581         * The request is longer than the engine can handle, or we have
 582         * no data outstanding. Manually generate the padding, adding it
 583         * as a "mid" fragment.
 584         */
 585        ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags);
 586        if (ret)
 587                return ERR_PTR(ret);
 588
 589        trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding);
 590
 591        len = min(CESA_SA_SRAM_PAYLOAD_SIZE - frag_len, trailerlen);
 592        if (len) {
 593                ret = mv_cesa_dma_add_data_transfer(chain,
 594                                                CESA_SA_DATA_SRAM_OFFSET +
 595                                                frag_len,
 596                                                ahashdreq->padding_dma,
 597                                                len, CESA_TDMA_DST_IN_SRAM,
 598                                                flags);
 599                if (ret)
 600                        return ERR_PTR(ret);
 601
 602                op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len,
 603                                          flags);
 604                if (IS_ERR(op))
 605                        return op;
 606
 607                if (len == trailerlen)
 608                        return op;
 609
 610                padoff += len;
 611        }
 612
 613        ret = mv_cesa_dma_add_data_transfer(chain,
 614                                            CESA_SA_DATA_SRAM_OFFSET,
 615                                            ahashdreq->padding_dma +
 616                                            padoff,
 617                                            trailerlen - padoff,
 618                                            CESA_TDMA_DST_IN_SRAM,
 619                                            flags);
 620        if (ret)
 621                return ERR_PTR(ret);
 622
 623        return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff,
 624                                    flags);
 625}
 626
 627static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
 628{
 629        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 630        gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
 631                      GFP_KERNEL : GFP_ATOMIC;
 632        struct mv_cesa_req *basereq = &creq->base;
 633        struct mv_cesa_ahash_dma_iter iter;
 634        struct mv_cesa_op_ctx *op = NULL;
 635        unsigned int frag_len;
 636        bool set_state = false;
 637        int ret;
 638        u32 type;
 639
 640        basereq->chain.first = NULL;
 641        basereq->chain.last = NULL;
 642
 643        if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
 644                set_state = true;
 645
 646        if (creq->src_nents) {
 647                ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
 648                                 DMA_TO_DEVICE);
 649                if (!ret) {
 650                        ret = -ENOMEM;
 651                        goto err;
 652                }
 653        }
 654
 655        mv_cesa_tdma_desc_iter_init(&basereq->chain);
 656        mv_cesa_ahash_req_iter_init(&iter, req);
 657
 658        /*
 659         * Add the cache (left-over data from a previous block) first.
 660         * This will never overflow the SRAM size.
 661         */
 662        ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, creq, flags);
 663        if (ret)
 664                goto err_free_tdma;
 665
 666        if (iter.src.sg) {
 667                /*
 668                 * Add all the new data, inserting an operation block and
 669                 * launch command between each full SRAM block-worth of
 670                 * data. We intentionally do not add the final op block.
 671                 */
 672                while (true) {
 673                        ret = mv_cesa_dma_add_op_transfers(&basereq->chain,
 674                                                           &iter.base,
 675                                                           &iter.src, flags);
 676                        if (ret)
 677                                goto err_free_tdma;
 678
 679                        frag_len = iter.base.op_len;
 680
 681                        if (!mv_cesa_ahash_req_iter_next_op(&iter))
 682                                break;
 683
 684                        op = mv_cesa_dma_add_frag(&basereq->chain,
 685                                                  &creq->op_tmpl,
 686                                                  frag_len, flags);
 687                        if (IS_ERR(op)) {
 688                                ret = PTR_ERR(op);
 689                                goto err_free_tdma;
 690                        }
 691                }
 692        } else {
 693                /* Account for the data that was in the cache. */
 694                frag_len = iter.base.op_len;
 695        }
 696
 697        /*
 698         * At this point, frag_len indicates whether we have any data
 699         * outstanding which needs an operation.  Queue up the final
 700         * operation, which depends whether this is the final request.
 701         */
 702        if (creq->last_req)
 703                op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq,
 704                                                frag_len, flags);
 705        else if (frag_len)
 706                op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
 707                                          frag_len, flags);
 708
 709        if (IS_ERR(op)) {
 710                ret = PTR_ERR(op);
 711                goto err_free_tdma;
 712        }
 713
 714        /*
 715         * If results are copied via DMA, this means that this
 716         * request can be directly processed by the engine,
 717         * without partial updates. So we can chain it at the
 718         * DMA level with other requests.
 719         */
 720        type = basereq->chain.last->flags & CESA_TDMA_TYPE_MSK;
 721
 722        if (op && type != CESA_TDMA_RESULT) {
 723                /* Add dummy desc to wait for crypto operation end */
 724                ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags);
 725                if (ret)
 726                        goto err_free_tdma;
 727        }
 728
 729        if (!creq->last_req)
 730                creq->cache_ptr = req->nbytes + creq->cache_ptr -
 731                                  iter.base.len;
 732        else
 733                creq->cache_ptr = 0;
 734
 735        basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
 736
 737        if (type != CESA_TDMA_RESULT)
 738                basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
 739
 740        if (set_state) {
 741                /*
 742                 * Put the CESA_TDMA_SET_STATE flag on the first tdma desc to
 743                 * let the step logic know that the IVDIG registers should be
 744                 * explicitly set before launching a TDMA chain.
 745                 */
 746                basereq->chain.first->flags |= CESA_TDMA_SET_STATE;
 747        }
 748
 749        return 0;
 750
 751err_free_tdma:
 752        mv_cesa_dma_cleanup(basereq);
 753        dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
 754
 755err:
 756        mv_cesa_ahash_last_cleanup(req);
 757
 758        return ret;
 759}
 760
 761static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
 762{
 763        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 764
 765        creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
 766        if (creq->src_nents < 0) {
 767                dev_err(cesa_dev->dev, "Invalid number of src SG");
 768                return creq->src_nents;
 769        }
 770
 771        *cached = mv_cesa_ahash_cache_req(req);
 772
 773        if (*cached)
 774                return 0;
 775
 776        if (cesa_dev->caps->has_tdma)
 777                return mv_cesa_ahash_dma_req_init(req);
 778        else
 779                return 0;
 780}
 781
 782static int mv_cesa_ahash_queue_req(struct ahash_request *req)
 783{
 784        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 785        struct mv_cesa_engine *engine;
 786        bool cached = false;
 787        int ret;
 788
 789        ret = mv_cesa_ahash_req_init(req, &cached);
 790        if (ret)
 791                return ret;
 792
 793        if (cached)
 794                return 0;
 795
 796        engine = mv_cesa_select_engine(req->nbytes);
 797        mv_cesa_ahash_prepare(&req->base, engine);
 798
 799        ret = mv_cesa_queue_req(&req->base, &creq->base);
 800
 801        if (mv_cesa_req_needs_cleanup(&req->base, ret))
 802                mv_cesa_ahash_cleanup(req);
 803
 804        return ret;
 805}
 806
 807static int mv_cesa_ahash_update(struct ahash_request *req)
 808{
 809        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 810
 811        creq->len += req->nbytes;
 812
 813        return mv_cesa_ahash_queue_req(req);
 814}
 815
 816static int mv_cesa_ahash_final(struct ahash_request *req)
 817{
 818        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 819        struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
 820
 821        mv_cesa_set_mac_op_total_len(tmpl, creq->len);
 822        creq->last_req = true;
 823        req->nbytes = 0;
 824
 825        return mv_cesa_ahash_queue_req(req);
 826}
 827
 828static int mv_cesa_ahash_finup(struct ahash_request *req)
 829{
 830        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 831        struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
 832
 833        creq->len += req->nbytes;
 834        mv_cesa_set_mac_op_total_len(tmpl, creq->len);
 835        creq->last_req = true;
 836
 837        return mv_cesa_ahash_queue_req(req);
 838}
 839
 840static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
 841                                u64 *len, void *cache)
 842{
 843        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
 844        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 845        unsigned int digsize = crypto_ahash_digestsize(ahash);
 846        unsigned int blocksize;
 847
 848        blocksize = crypto_ahash_blocksize(ahash);
 849
 850        *len = creq->len;
 851        memcpy(hash, creq->state, digsize);
 852        memset(cache, 0, blocksize);
 853        memcpy(cache, creq->cache, creq->cache_ptr);
 854
 855        return 0;
 856}
 857
 858static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash,
 859                                u64 len, const void *cache)
 860{
 861        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
 862        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 863        unsigned int digsize = crypto_ahash_digestsize(ahash);
 864        unsigned int blocksize;
 865        unsigned int cache_ptr;
 866        int ret;
 867
 868        ret = crypto_ahash_init(req);
 869        if (ret)
 870                return ret;
 871
 872        blocksize = crypto_ahash_blocksize(ahash);
 873        if (len >= blocksize)
 874                mv_cesa_update_op_cfg(&creq->op_tmpl,
 875                                      CESA_SA_DESC_CFG_MID_FRAG,
 876                                      CESA_SA_DESC_CFG_FRAG_MSK);
 877
 878        creq->len = len;
 879        memcpy(creq->state, hash, digsize);
 880        creq->cache_ptr = 0;
 881
 882        cache_ptr = do_div(len, blocksize);
 883        if (!cache_ptr)
 884                return 0;
 885
 886        memcpy(creq->cache, cache, cache_ptr);
 887        creq->cache_ptr = cache_ptr;
 888
 889        return 0;
 890}
 891
 892static int mv_cesa_md5_init(struct ahash_request *req)
 893{
 894        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 895        struct mv_cesa_op_ctx tmpl = { };
 896
 897        mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5);
 898
 899        mv_cesa_ahash_init(req, &tmpl, true);
 900
 901        creq->state[0] = MD5_H0;
 902        creq->state[1] = MD5_H1;
 903        creq->state[2] = MD5_H2;
 904        creq->state[3] = MD5_H3;
 905
 906        return 0;
 907}
 908
 909static int mv_cesa_md5_export(struct ahash_request *req, void *out)
 910{
 911        struct md5_state *out_state = out;
 912
 913        return mv_cesa_ahash_export(req, out_state->hash,
 914                                    &out_state->byte_count, out_state->block);
 915}
 916
 917static int mv_cesa_md5_import(struct ahash_request *req, const void *in)
 918{
 919        const struct md5_state *in_state = in;
 920
 921        return mv_cesa_ahash_import(req, in_state->hash, in_state->byte_count,
 922                                    in_state->block);
 923}
 924
 925static int mv_cesa_md5_digest(struct ahash_request *req)
 926{
 927        int ret;
 928
 929        ret = mv_cesa_md5_init(req);
 930        if (ret)
 931                return ret;
 932
 933        return mv_cesa_ahash_finup(req);
 934}
 935
 936struct ahash_alg mv_md5_alg = {
 937        .init = mv_cesa_md5_init,
 938        .update = mv_cesa_ahash_update,
 939        .final = mv_cesa_ahash_final,
 940        .finup = mv_cesa_ahash_finup,
 941        .digest = mv_cesa_md5_digest,
 942        .export = mv_cesa_md5_export,
 943        .import = mv_cesa_md5_import,
 944        .halg = {
 945                .digestsize = MD5_DIGEST_SIZE,
 946                .statesize = sizeof(struct md5_state),
 947                .base = {
 948                        .cra_name = "md5",
 949                        .cra_driver_name = "mv-md5",
 950                        .cra_priority = 300,
 951                        .cra_flags = CRYPTO_ALG_ASYNC |
 952                                     CRYPTO_ALG_ALLOCATES_MEMORY |
 953                                     CRYPTO_ALG_KERN_DRIVER_ONLY,
 954                        .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
 955                        .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
 956                        .cra_init = mv_cesa_ahash_cra_init,
 957                        .cra_module = THIS_MODULE,
 958                }
 959        }
 960};
 961
 962static int mv_cesa_sha1_init(struct ahash_request *req)
 963{
 964        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
 965        struct mv_cesa_op_ctx tmpl = { };
 966
 967        mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1);
 968
 969        mv_cesa_ahash_init(req, &tmpl, false);
 970
 971        creq->state[0] = SHA1_H0;
 972        creq->state[1] = SHA1_H1;
 973        creq->state[2] = SHA1_H2;
 974        creq->state[3] = SHA1_H3;
 975        creq->state[4] = SHA1_H4;
 976
 977        return 0;
 978}
 979
 980static int mv_cesa_sha1_export(struct ahash_request *req, void *out)
 981{
 982        struct sha1_state *out_state = out;
 983
 984        return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
 985                                    out_state->buffer);
 986}
 987
 988static int mv_cesa_sha1_import(struct ahash_request *req, const void *in)
 989{
 990        const struct sha1_state *in_state = in;
 991
 992        return mv_cesa_ahash_import(req, in_state->state, in_state->count,
 993                                    in_state->buffer);
 994}
 995
 996static int mv_cesa_sha1_digest(struct ahash_request *req)
 997{
 998        int ret;
 999
1000        ret = mv_cesa_sha1_init(req);
1001        if (ret)
1002                return ret;
1003
1004        return mv_cesa_ahash_finup(req);
1005}
1006
1007struct ahash_alg mv_sha1_alg = {
1008        .init = mv_cesa_sha1_init,
1009        .update = mv_cesa_ahash_update,
1010        .final = mv_cesa_ahash_final,
1011        .finup = mv_cesa_ahash_finup,
1012        .digest = mv_cesa_sha1_digest,
1013        .export = mv_cesa_sha1_export,
1014        .import = mv_cesa_sha1_import,
1015        .halg = {
1016                .digestsize = SHA1_DIGEST_SIZE,
1017                .statesize = sizeof(struct sha1_state),
1018                .base = {
1019                        .cra_name = "sha1",
1020                        .cra_driver_name = "mv-sha1",
1021                        .cra_priority = 300,
1022                        .cra_flags = CRYPTO_ALG_ASYNC |
1023                                     CRYPTO_ALG_ALLOCATES_MEMORY |
1024                                     CRYPTO_ALG_KERN_DRIVER_ONLY,
1025                        .cra_blocksize = SHA1_BLOCK_SIZE,
1026                        .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
1027                        .cra_init = mv_cesa_ahash_cra_init,
1028                        .cra_module = THIS_MODULE,
1029                }
1030        }
1031};
1032
1033static int mv_cesa_sha256_init(struct ahash_request *req)
1034{
1035        struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
1036        struct mv_cesa_op_ctx tmpl = { };
1037
1038        mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256);
1039
1040        mv_cesa_ahash_init(req, &tmpl, false);
1041
1042        creq->state[0] = SHA256_H0;
1043        creq->state[1] = SHA256_H1;
1044        creq->state[2] = SHA256_H2;
1045        creq->state[3] = SHA256_H3;
1046        creq->state[4] = SHA256_H4;
1047        creq->state[5] = SHA256_H5;
1048        creq->state[6] = SHA256_H6;
1049        creq->state[7] = SHA256_H7;
1050
1051        return 0;
1052}
1053
1054static int mv_cesa_sha256_digest(struct ahash_request *req)
1055{
1056        int ret;
1057
1058        ret = mv_cesa_sha256_init(req);
1059        if (ret)
1060                return ret;
1061
1062        return mv_cesa_ahash_finup(req);
1063}
1064
1065static int mv_cesa_sha256_export(struct ahash_request *req, void *out)
1066{
1067        struct sha256_state *out_state = out;
1068
1069        return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
1070                                    out_state->buf);
1071}
1072
1073static int mv_cesa_sha256_import(struct ahash_request *req, const void *in)
1074{
1075        const struct sha256_state *in_state = in;
1076
1077        return mv_cesa_ahash_import(req, in_state->state, in_state->count,
1078                                    in_state->buf);
1079}
1080
1081struct ahash_alg mv_sha256_alg = {
1082        .init = mv_cesa_sha256_init,
1083        .update = mv_cesa_ahash_update,
1084        .final = mv_cesa_ahash_final,
1085        .finup = mv_cesa_ahash_finup,
1086        .digest = mv_cesa_sha256_digest,
1087        .export = mv_cesa_sha256_export,
1088        .import = mv_cesa_sha256_import,
1089        .halg = {
1090                .digestsize = SHA256_DIGEST_SIZE,
1091                .statesize = sizeof(struct sha256_state),
1092                .base = {
1093                        .cra_name = "sha256",
1094                        .cra_driver_name = "mv-sha256",
1095                        .cra_priority = 300,
1096                        .cra_flags = CRYPTO_ALG_ASYNC |
1097                                     CRYPTO_ALG_ALLOCATES_MEMORY |
1098                                     CRYPTO_ALG_KERN_DRIVER_ONLY,
1099                        .cra_blocksize = SHA256_BLOCK_SIZE,
1100                        .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
1101                        .cra_init = mv_cesa_ahash_cra_init,
1102                        .cra_module = THIS_MODULE,
1103                }
1104        }
1105};
1106
1107struct mv_cesa_ahash_result {
1108        struct completion completion;
1109        int error;
1110};
1111
1112static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req,
1113                                        int error)
1114{
1115        struct mv_cesa_ahash_result *result = req->data;
1116
1117        if (error == -EINPROGRESS)
1118                return;
1119
1120        result->error = error;
1121        complete(&result->completion);
1122}
1123
1124static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad,
1125                                       void *state, unsigned int blocksize)
1126{
1127        struct mv_cesa_ahash_result result;
1128        struct scatterlist sg;
1129        int ret;
1130
1131        ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1132                                   mv_cesa_hmac_ahash_complete, &result);
1133        sg_init_one(&sg, pad, blocksize);
1134        ahash_request_set_crypt(req, &sg, pad, blocksize);
1135        init_completion(&result.completion);
1136
1137        ret = crypto_ahash_init(req);
1138        if (ret)
1139                return ret;
1140
1141        ret = crypto_ahash_update(req);
1142        if (ret && ret != -EINPROGRESS)
1143                return ret;
1144
1145        wait_for_completion_interruptible(&result.completion);
1146        if (result.error)
1147                return result.error;
1148
1149        ret = crypto_ahash_export(req, state);
1150        if (ret)
1151                return ret;
1152
1153        return 0;
1154}
1155
1156static int mv_cesa_ahmac_pad_init(struct ahash_request *req,
1157                                  const u8 *key, unsigned int keylen,
1158                                  u8 *ipad, u8 *opad,
1159                                  unsigned int blocksize)
1160{
1161        struct mv_cesa_ahash_result result;
1162        struct scatterlist sg;
1163        int ret;
1164        int i;
1165
1166        if (keylen <= blocksize) {
1167                memcpy(ipad, key, keylen);
1168        } else {
1169                u8 *keydup = kmemdup(key, keylen, GFP_KERNEL);
1170
1171                if (!keydup)
1172                        return -ENOMEM;
1173
1174                ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1175                                           mv_cesa_hmac_ahash_complete,
1176                                           &result);
1177                sg_init_one(&sg, keydup, keylen);
1178                ahash_request_set_crypt(req, &sg, ipad, keylen);
1179                init_completion(&result.completion);
1180
1181                ret = crypto_ahash_digest(req);
1182                if (ret == -EINPROGRESS) {
1183                        wait_for_completion_interruptible(&result.completion);
1184                        ret = result.error;
1185                }
1186
1187                /* Set the memory region to 0 to avoid any leak. */
1188                kfree_sensitive(keydup);
1189
1190                if (ret)
1191                        return ret;
1192
1193                keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
1194        }
1195
1196        memset(ipad + keylen, 0, blocksize - keylen);
1197        memcpy(opad, ipad, blocksize);
1198
1199        for (i = 0; i < blocksize; i++) {
1200                ipad[i] ^= HMAC_IPAD_VALUE;
1201                opad[i] ^= HMAC_OPAD_VALUE;
1202        }
1203
1204        return 0;
1205}
1206
1207static int mv_cesa_ahmac_setkey(const char *hash_alg_name,
1208                                const u8 *key, unsigned int keylen,
1209                                void *istate, void *ostate)
1210{
1211        struct ahash_request *req;
1212        struct crypto_ahash *tfm;
1213        unsigned int blocksize;
1214        u8 *ipad = NULL;
1215        u8 *opad;
1216        int ret;
1217
1218        tfm = crypto_alloc_ahash(hash_alg_name, 0, 0);
1219        if (IS_ERR(tfm))
1220                return PTR_ERR(tfm);
1221
1222        req = ahash_request_alloc(tfm, GFP_KERNEL);
1223        if (!req) {
1224                ret = -ENOMEM;
1225                goto free_ahash;
1226        }
1227
1228        crypto_ahash_clear_flags(tfm, ~0);
1229
1230        blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1231
1232        ipad = kcalloc(2, blocksize, GFP_KERNEL);
1233        if (!ipad) {
1234                ret = -ENOMEM;
1235                goto free_req;
1236        }
1237
1238        opad = ipad + blocksize;
1239
1240        ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize);
1241        if (ret)
1242                goto free_ipad;
1243
1244        ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize);
1245        if (ret)
1246                goto free_ipad;
1247
1248        ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize);
1249
1250free_ipad:
1251        kfree(ipad);
1252free_req:
1253        ahash_request_free(req);
1254free_ahash:
1255        crypto_free_ahash(tfm);
1256
1257        return ret;
1258}
1259
1260static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm)
1261{
1262        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm);
1263
1264        ctx->base.ops = &mv_cesa_ahash_req_ops;
1265
1266        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1267                                 sizeof(struct mv_cesa_ahash_req));
1268        return 0;
1269}
1270
1271static int mv_cesa_ahmac_md5_init(struct ahash_request *req)
1272{
1273        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1274        struct mv_cesa_op_ctx tmpl = { };
1275
1276        mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5);
1277        memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1278
1279        mv_cesa_ahash_init(req, &tmpl, true);
1280
1281        return 0;
1282}
1283
1284static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
1285                                    unsigned int keylen)
1286{
1287        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1288        struct md5_state istate, ostate;
1289        int ret, i;
1290
1291        ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate);
1292        if (ret)
1293                return ret;
1294
1295        for (i = 0; i < ARRAY_SIZE(istate.hash); i++)
1296                ctx->iv[i] = cpu_to_be32(istate.hash[i]);
1297
1298        for (i = 0; i < ARRAY_SIZE(ostate.hash); i++)
1299                ctx->iv[i + 8] = cpu_to_be32(ostate.hash[i]);
1300
1301        return 0;
1302}
1303
1304static int mv_cesa_ahmac_md5_digest(struct ahash_request *req)
1305{
1306        int ret;
1307
1308        ret = mv_cesa_ahmac_md5_init(req);
1309        if (ret)
1310                return ret;
1311
1312        return mv_cesa_ahash_finup(req);
1313}
1314
1315struct ahash_alg mv_ahmac_md5_alg = {
1316        .init = mv_cesa_ahmac_md5_init,
1317        .update = mv_cesa_ahash_update,
1318        .final = mv_cesa_ahash_final,
1319        .finup = mv_cesa_ahash_finup,
1320        .digest = mv_cesa_ahmac_md5_digest,
1321        .setkey = mv_cesa_ahmac_md5_setkey,
1322        .export = mv_cesa_md5_export,
1323        .import = mv_cesa_md5_import,
1324        .halg = {
1325                .digestsize = MD5_DIGEST_SIZE,
1326                .statesize = sizeof(struct md5_state),
1327                .base = {
1328                        .cra_name = "hmac(md5)",
1329                        .cra_driver_name = "mv-hmac-md5",
1330                        .cra_priority = 300,
1331                        .cra_flags = CRYPTO_ALG_ASYNC |
1332                                     CRYPTO_ALG_ALLOCATES_MEMORY |
1333                                     CRYPTO_ALG_KERN_DRIVER_ONLY,
1334                        .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1335                        .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1336                        .cra_init = mv_cesa_ahmac_cra_init,
1337                        .cra_module = THIS_MODULE,
1338                }
1339        }
1340};
1341
1342static int mv_cesa_ahmac_sha1_init(struct ahash_request *req)
1343{
1344        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1345        struct mv_cesa_op_ctx tmpl = { };
1346
1347        mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1);
1348        memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1349
1350        mv_cesa_ahash_init(req, &tmpl, false);
1351
1352        return 0;
1353}
1354
1355static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
1356                                     unsigned int keylen)
1357{
1358        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1359        struct sha1_state istate, ostate;
1360        int ret, i;
1361
1362        ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate);
1363        if (ret)
1364                return ret;
1365
1366        for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1367                ctx->iv[i] = cpu_to_be32(istate.state[i]);
1368
1369        for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1370                ctx->iv[i + 8] = cpu_to_be32(ostate.state[i]);
1371
1372        return 0;
1373}
1374
1375static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req)
1376{
1377        int ret;
1378
1379        ret = mv_cesa_ahmac_sha1_init(req);
1380        if (ret)
1381                return ret;
1382
1383        return mv_cesa_ahash_finup(req);
1384}
1385
1386struct ahash_alg mv_ahmac_sha1_alg = {
1387        .init = mv_cesa_ahmac_sha1_init,
1388        .update = mv_cesa_ahash_update,
1389        .final = mv_cesa_ahash_final,
1390        .finup = mv_cesa_ahash_finup,
1391        .digest = mv_cesa_ahmac_sha1_digest,
1392        .setkey = mv_cesa_ahmac_sha1_setkey,
1393        .export = mv_cesa_sha1_export,
1394        .import = mv_cesa_sha1_import,
1395        .halg = {
1396                .digestsize = SHA1_DIGEST_SIZE,
1397                .statesize = sizeof(struct sha1_state),
1398                .base = {
1399                        .cra_name = "hmac(sha1)",
1400                        .cra_driver_name = "mv-hmac-sha1",
1401                        .cra_priority = 300,
1402                        .cra_flags = CRYPTO_ALG_ASYNC |
1403                                     CRYPTO_ALG_ALLOCATES_MEMORY |
1404                                     CRYPTO_ALG_KERN_DRIVER_ONLY,
1405                        .cra_blocksize = SHA1_BLOCK_SIZE,
1406                        .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1407                        .cra_init = mv_cesa_ahmac_cra_init,
1408                        .cra_module = THIS_MODULE,
1409                }
1410        }
1411};
1412
1413static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
1414                                       unsigned int keylen)
1415{
1416        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1417        struct sha256_state istate, ostate;
1418        int ret, i;
1419
1420        ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate);
1421        if (ret)
1422                return ret;
1423
1424        for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1425                ctx->iv[i] = cpu_to_be32(istate.state[i]);
1426
1427        for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1428                ctx->iv[i + 8] = cpu_to_be32(ostate.state[i]);
1429
1430        return 0;
1431}
1432
1433static int mv_cesa_ahmac_sha256_init(struct ahash_request *req)
1434{
1435        struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1436        struct mv_cesa_op_ctx tmpl = { };
1437
1438        mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256);
1439        memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1440
1441        mv_cesa_ahash_init(req, &tmpl, false);
1442
1443        return 0;
1444}
1445
1446static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req)
1447{
1448        int ret;
1449
1450        ret = mv_cesa_ahmac_sha256_init(req);
1451        if (ret)
1452                return ret;
1453
1454        return mv_cesa_ahash_finup(req);
1455}
1456
1457struct ahash_alg mv_ahmac_sha256_alg = {
1458        .init = mv_cesa_ahmac_sha256_init,
1459        .update = mv_cesa_ahash_update,
1460        .final = mv_cesa_ahash_final,
1461        .finup = mv_cesa_ahash_finup,
1462        .digest = mv_cesa_ahmac_sha256_digest,
1463        .setkey = mv_cesa_ahmac_sha256_setkey,
1464        .export = mv_cesa_sha256_export,
1465        .import = mv_cesa_sha256_import,
1466        .halg = {
1467                .digestsize = SHA256_DIGEST_SIZE,
1468                .statesize = sizeof(struct sha256_state),
1469                .base = {
1470                        .cra_name = "hmac(sha256)",
1471                        .cra_driver_name = "mv-hmac-sha256",
1472                        .cra_priority = 300,
1473                        .cra_flags = CRYPTO_ALG_ASYNC |
1474                                     CRYPTO_ALG_ALLOCATES_MEMORY |
1475                                     CRYPTO_ALG_KERN_DRIVER_ONLY,
1476                        .cra_blocksize = SHA256_BLOCK_SIZE,
1477                        .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1478                        .cra_init = mv_cesa_ahmac_cra_init,
1479                        .cra_module = THIS_MODULE,
1480                }
1481        }
1482};
1483