linux/drivers/crypto/mediatek/mtk-sha.c
<<
>>
Prefs
   1/*
   2 * Cryptographic API.
   3 *
   4 * Driver for EIP97 SHA1/SHA2(HMAC) acceleration.
   5 *
   6 * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 *
  12 * Some ideas are from atmel-sha.c and omap-sham.c drivers.
  13 */
  14
  15#include <crypto/sha.h>
  16#include "mtk-platform.h"
  17
  18#define SHA_ALIGN_MSK           (sizeof(u32) - 1)
  19#define SHA_QUEUE_SIZE          512
  20#define SHA_BUF_SIZE            ((u32)PAGE_SIZE)
  21
  22#define SHA_OP_UPDATE           1
  23#define SHA_OP_FINAL            2
  24
  25#define SHA_DATA_LEN_MSK        cpu_to_le32(GENMASK(16, 0))
  26#define SHA_MAX_DIGEST_BUF_SIZE 32
  27
  28/* SHA command token */
  29#define SHA_CT_SIZE             5
  30#define SHA_CT_CTRL_HDR         cpu_to_le32(0x02220000)
  31#define SHA_CMD0                cpu_to_le32(0x03020000)
  32#define SHA_CMD1                cpu_to_le32(0x21060000)
  33#define SHA_CMD2                cpu_to_le32(0xe0e63802)
  34
  35/* SHA transform information */
  36#define SHA_TFM_HASH            cpu_to_le32(0x2 << 0)
  37#define SHA_TFM_SIZE(x)         cpu_to_le32((x) << 8)
  38#define SHA_TFM_START           cpu_to_le32(0x1 << 4)
  39#define SHA_TFM_CONTINUE        cpu_to_le32(0x1 << 5)
  40#define SHA_TFM_HASH_STORE      cpu_to_le32(0x1 << 19)
  41#define SHA_TFM_SHA1            cpu_to_le32(0x2 << 23)
  42#define SHA_TFM_SHA256          cpu_to_le32(0x3 << 23)
  43#define SHA_TFM_SHA224          cpu_to_le32(0x4 << 23)
  44#define SHA_TFM_SHA512          cpu_to_le32(0x5 << 23)
  45#define SHA_TFM_SHA384          cpu_to_le32(0x6 << 23)
  46#define SHA_TFM_DIGEST(x)       cpu_to_le32(((x) & GENMASK(3, 0)) << 24)
  47
  48/* SHA flags */
  49#define SHA_FLAGS_BUSY          BIT(0)
  50#define SHA_FLAGS_FINAL         BIT(1)
  51#define SHA_FLAGS_FINUP         BIT(2)
  52#define SHA_FLAGS_SG            BIT(3)
  53#define SHA_FLAGS_ALGO_MSK      GENMASK(8, 4)
  54#define SHA_FLAGS_SHA1          BIT(4)
  55#define SHA_FLAGS_SHA224        BIT(5)
  56#define SHA_FLAGS_SHA256        BIT(6)
  57#define SHA_FLAGS_SHA384        BIT(7)
  58#define SHA_FLAGS_SHA512        BIT(8)
  59#define SHA_FLAGS_HMAC          BIT(9)
  60#define SHA_FLAGS_PAD           BIT(10)
  61
  62/**
  63 * mtk_sha_info - hardware information of AES
  64 * @cmd:        command token, hardware instruction
  65 * @tfm:        transform state of cipher algorithm.
  66 * @state:      contains keys and initial vectors.
  67 *
  68 */
  69struct mtk_sha_info {
  70        __le32 ctrl[2];
  71        __le32 cmd[3];
  72        __le32 tfm[2];
  73        __le32 digest[SHA_MAX_DIGEST_BUF_SIZE];
  74};
  75
  76struct mtk_sha_reqctx {
  77        struct mtk_sha_info info;
  78        unsigned long flags;
  79        unsigned long op;
  80
  81        u64 digcnt;
  82        size_t bufcnt;
  83        dma_addr_t dma_addr;
  84
  85        __le32 ct_hdr;
  86        u32 ct_size;
  87        dma_addr_t ct_dma;
  88        dma_addr_t tfm_dma;
  89
  90        /* Walk state */
  91        struct scatterlist *sg;
  92        u32 offset;     /* Offset in current sg */
  93        u32 total;      /* Total request */
  94        size_t ds;
  95        size_t bs;
  96
  97        u8 *buffer;
  98};
  99
 100struct mtk_sha_hmac_ctx {
 101        struct crypto_shash     *shash;
 102        u8 ipad[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
 103        u8 opad[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
 104};
 105
 106struct mtk_sha_ctx {
 107        struct mtk_cryp *cryp;
 108        unsigned long flags;
 109        u8 id;
 110        u8 buf[SHA_BUF_SIZE] __aligned(sizeof(u32));
 111
 112        struct mtk_sha_hmac_ctx base[0];
 113};
 114
 115struct mtk_sha_drv {
 116        struct list_head dev_list;
 117        /* Device list lock */
 118        spinlock_t lock;
 119};
 120
 121static struct mtk_sha_drv mtk_sha = {
 122        .dev_list = LIST_HEAD_INIT(mtk_sha.dev_list),
 123        .lock = __SPIN_LOCK_UNLOCKED(mtk_sha.lock),
 124};
 125
 126static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id,
 127                                struct ahash_request *req);
 128
 129static inline u32 mtk_sha_read(struct mtk_cryp *cryp, u32 offset)
 130{
 131        return readl_relaxed(cryp->base + offset);
 132}
 133
 134static inline void mtk_sha_write(struct mtk_cryp *cryp,
 135                                 u32 offset, u32 value)
 136{
 137        writel_relaxed(value, cryp->base + offset);
 138}
 139
 140static inline void mtk_sha_ring_shift(struct mtk_ring *ring,
 141                                      struct mtk_desc **cmd_curr,
 142                                      struct mtk_desc **res_curr,
 143                                      int *count)
 144{
 145        *cmd_curr = ring->cmd_next++;
 146        *res_curr = ring->res_next++;
 147        (*count)++;
 148
 149        if (ring->cmd_next == ring->cmd_base + MTK_DESC_NUM) {
 150                ring->cmd_next = ring->cmd_base;
 151                ring->res_next = ring->res_base;
 152        }
 153}
 154
 155static struct mtk_cryp *mtk_sha_find_dev(struct mtk_sha_ctx *tctx)
 156{
 157        struct mtk_cryp *cryp = NULL;
 158        struct mtk_cryp *tmp;
 159
 160        spin_lock_bh(&mtk_sha.lock);
 161        if (!tctx->cryp) {
 162                list_for_each_entry(tmp, &mtk_sha.dev_list, sha_list) {
 163                        cryp = tmp;
 164                        break;
 165                }
 166                tctx->cryp = cryp;
 167        } else {
 168                cryp = tctx->cryp;
 169        }
 170
 171        /*
 172         * Assign record id to tfm in round-robin fashion, and this
 173         * will help tfm to bind  to corresponding descriptor rings.
 174         */
 175        tctx->id = cryp->rec;
 176        cryp->rec = !cryp->rec;
 177
 178        spin_unlock_bh(&mtk_sha.lock);
 179
 180        return cryp;
 181}
 182
 183static int mtk_sha_append_sg(struct mtk_sha_reqctx *ctx)
 184{
 185        size_t count;
 186
 187        while ((ctx->bufcnt < SHA_BUF_SIZE) && ctx->total) {
 188                count = min(ctx->sg->length - ctx->offset, ctx->total);
 189                count = min(count, SHA_BUF_SIZE - ctx->bufcnt);
 190
 191                if (count <= 0) {
 192                        /*
 193                         * Check if count <= 0 because the buffer is full or
 194                         * because the sg length is 0. In the latest case,
 195                         * check if there is another sg in the list, a 0 length
 196                         * sg doesn't necessarily mean the end of the sg list.
 197                         */
 198                        if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) {
 199                                ctx->sg = sg_next(ctx->sg);
 200                                continue;
 201                        } else {
 202                                break;
 203                        }
 204                }
 205
 206                scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
 207                                         ctx->offset, count, 0);
 208
 209                ctx->bufcnt += count;
 210                ctx->offset += count;
 211                ctx->total -= count;
 212
 213                if (ctx->offset == ctx->sg->length) {
 214                        ctx->sg = sg_next(ctx->sg);
 215                        if (ctx->sg)
 216                                ctx->offset = 0;
 217                        else
 218                                ctx->total = 0;
 219                }
 220        }
 221
 222        return 0;
 223}
 224
 225/*
 226 * The purpose of this padding is to ensure that the padded message is a
 227 * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
 228 * The bit "1" is appended at the end of the message followed by
 229 * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
 230 * 128 bits block (SHA384/SHA512) equals to the message length in bits
 231 * is appended.
 232 *
 233 * For SHA1/SHA224/SHA256, padlen is calculated as followed:
 234 *  - if message length < 56 bytes then padlen = 56 - message length
 235 *  - else padlen = 64 + 56 - message length
 236 *
 237 * For SHA384/SHA512, padlen is calculated as followed:
 238 *  - if message length < 112 bytes then padlen = 112 - message length
 239 *  - else padlen = 128 + 112 - message length
 240 */
 241static void mtk_sha_fill_padding(struct mtk_sha_reqctx *ctx, u32 len)
 242{
 243        u32 index, padlen;
 244        u64 bits[2];
 245        u64 size = ctx->digcnt;
 246
 247        size += ctx->bufcnt;
 248        size += len;
 249
 250        bits[1] = cpu_to_be64(size << 3);
 251        bits[0] = cpu_to_be64(size >> 61);
 252
 253        switch (ctx->flags & SHA_FLAGS_ALGO_MSK) {
 254        case SHA_FLAGS_SHA384:
 255        case SHA_FLAGS_SHA512:
 256                index = ctx->bufcnt & 0x7f;
 257                padlen = (index < 112) ? (112 - index) : ((128 + 112) - index);
 258                *(ctx->buffer + ctx->bufcnt) = 0x80;
 259                memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen - 1);
 260                memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16);
 261                ctx->bufcnt += padlen + 16;
 262                ctx->flags |= SHA_FLAGS_PAD;
 263                break;
 264
 265        default:
 266                index = ctx->bufcnt & 0x3f;
 267                padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
 268                *(ctx->buffer + ctx->bufcnt) = 0x80;
 269                memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen - 1);
 270                memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8);
 271                ctx->bufcnt += padlen + 8;
 272                ctx->flags |= SHA_FLAGS_PAD;
 273                break;
 274        }
 275}
 276
 277/* Initialize basic transform information of SHA */
 278static void mtk_sha_info_init(struct mtk_sha_reqctx *ctx)
 279{
 280        struct mtk_sha_info *info = &ctx->info;
 281
 282        ctx->ct_hdr = SHA_CT_CTRL_HDR;
 283        ctx->ct_size = SHA_CT_SIZE;
 284
 285        info->tfm[0] = SHA_TFM_HASH | SHA_TFM_SIZE(SIZE_IN_WORDS(ctx->ds));
 286
 287        switch (ctx->flags & SHA_FLAGS_ALGO_MSK) {
 288        case SHA_FLAGS_SHA1:
 289                info->tfm[0] |= SHA_TFM_SHA1;
 290                break;
 291        case SHA_FLAGS_SHA224:
 292                info->tfm[0] |= SHA_TFM_SHA224;
 293                break;
 294        case SHA_FLAGS_SHA256:
 295                info->tfm[0] |= SHA_TFM_SHA256;
 296                break;
 297        case SHA_FLAGS_SHA384:
 298                info->tfm[0] |= SHA_TFM_SHA384;
 299                break;
 300        case SHA_FLAGS_SHA512:
 301                info->tfm[0] |= SHA_TFM_SHA512;
 302                break;
 303
 304        default:
 305                /* Should not happen... */
 306                return;
 307        }
 308
 309        info->tfm[1] = SHA_TFM_HASH_STORE;
 310        info->ctrl[0] = info->tfm[0] | SHA_TFM_CONTINUE | SHA_TFM_START;
 311        info->ctrl[1] = info->tfm[1];
 312
 313        info->cmd[0] = SHA_CMD0;
 314        info->cmd[1] = SHA_CMD1;
 315        info->cmd[2] = SHA_CMD2 | SHA_TFM_DIGEST(SIZE_IN_WORDS(ctx->ds));
 316}
 317
 318/*
 319 * Update input data length field of transform information and
 320 * map it to DMA region.
 321 */
 322static int mtk_sha_info_update(struct mtk_cryp *cryp,
 323                               struct mtk_sha_rec *sha,
 324                               size_t len1, size_t len2)
 325{
 326        struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
 327        struct mtk_sha_info *info = &ctx->info;
 328
 329        ctx->ct_hdr &= ~SHA_DATA_LEN_MSK;
 330        ctx->ct_hdr |= cpu_to_le32(len1 + len2);
 331        info->cmd[0] &= ~SHA_DATA_LEN_MSK;
 332        info->cmd[0] |= cpu_to_le32(len1 + len2);
 333
 334        /* Setting SHA_TFM_START only for the first iteration */
 335        if (ctx->digcnt)
 336                info->ctrl[0] &= ~SHA_TFM_START;
 337
 338        ctx->digcnt += len1;
 339
 340        ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info),
 341                                     DMA_BIDIRECTIONAL);
 342        if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma))) {
 343                dev_err(cryp->dev, "dma %zu bytes error\n", sizeof(*info));
 344                return -EINVAL;
 345        }
 346
 347        ctx->tfm_dma = ctx->ct_dma + sizeof(info->ctrl) + sizeof(info->cmd);
 348
 349        return 0;
 350}
 351
 352/*
 353 * Because of hardware limitation, we must pre-calculate the inner
 354 * and outer digest that need to be processed firstly by engine, then
 355 * apply the result digest to the input message. These complex hashing
 356 * procedures limits HMAC performance, so we use fallback SW encoding.
 357 */
 358static int mtk_sha_finish_hmac(struct ahash_request *req)
 359{
 360        struct mtk_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
 361        struct mtk_sha_hmac_ctx *bctx = tctx->base;
 362        struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 363
 364        SHASH_DESC_ON_STACK(shash, bctx->shash);
 365
 366        shash->tfm = bctx->shash;
 367        shash->flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
 368
 369        return crypto_shash_init(shash) ?:
 370               crypto_shash_update(shash, bctx->opad, ctx->bs) ?:
 371               crypto_shash_finup(shash, req->result, ctx->ds, req->result);
 372}
 373
 374/* Initialize request context */
 375static int mtk_sha_init(struct ahash_request *req)
 376{
 377        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 378        struct mtk_sha_ctx *tctx = crypto_ahash_ctx(tfm);
 379        struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 380
 381        ctx->flags = 0;
 382        ctx->ds = crypto_ahash_digestsize(tfm);
 383
 384        switch (ctx->ds) {
 385        case SHA1_DIGEST_SIZE:
 386                ctx->flags |= SHA_FLAGS_SHA1;
 387                ctx->bs = SHA1_BLOCK_SIZE;
 388                break;
 389        case SHA224_DIGEST_SIZE:
 390                ctx->flags |= SHA_FLAGS_SHA224;
 391                ctx->bs = SHA224_BLOCK_SIZE;
 392                break;
 393        case SHA256_DIGEST_SIZE:
 394                ctx->flags |= SHA_FLAGS_SHA256;
 395                ctx->bs = SHA256_BLOCK_SIZE;
 396                break;
 397        case SHA384_DIGEST_SIZE:
 398                ctx->flags |= SHA_FLAGS_SHA384;
 399                ctx->bs = SHA384_BLOCK_SIZE;
 400                break;
 401        case SHA512_DIGEST_SIZE:
 402                ctx->flags |= SHA_FLAGS_SHA512;
 403                ctx->bs = SHA512_BLOCK_SIZE;
 404                break;
 405        default:
 406                return -EINVAL;
 407        }
 408
 409        ctx->bufcnt = 0;
 410        ctx->digcnt = 0;
 411        ctx->buffer = tctx->buf;
 412
 413        if (tctx->flags & SHA_FLAGS_HMAC) {
 414                struct mtk_sha_hmac_ctx *bctx = tctx->base;
 415
 416                memcpy(ctx->buffer, bctx->ipad, ctx->bs);
 417                ctx->bufcnt = ctx->bs;
 418                ctx->flags |= SHA_FLAGS_HMAC;
 419        }
 420
 421        return 0;
 422}
 423
 424static int mtk_sha_xmit(struct mtk_cryp *cryp, struct mtk_sha_rec *sha,
 425                        dma_addr_t addr1, size_t len1,
 426                        dma_addr_t addr2, size_t len2)
 427{
 428        struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
 429        struct mtk_ring *ring = cryp->ring[sha->id];
 430        struct mtk_desc *cmd, *res;
 431        int err, count = 0;
 432
 433        err = mtk_sha_info_update(cryp, sha, len1, len2);
 434        if (err)
 435                return err;
 436
 437        /* Fill in the command/result descriptors */
 438        mtk_sha_ring_shift(ring, &cmd, &res, &count);
 439
 440        res->hdr = MTK_DESC_FIRST | MTK_DESC_BUF_LEN(len1);
 441        cmd->hdr = MTK_DESC_FIRST | MTK_DESC_BUF_LEN(len1) |
 442                   MTK_DESC_CT_LEN(ctx->ct_size);
 443        cmd->buf = cpu_to_le32(addr1);
 444        cmd->ct = cpu_to_le32(ctx->ct_dma);
 445        cmd->ct_hdr = ctx->ct_hdr;
 446        cmd->tfm = cpu_to_le32(ctx->tfm_dma);
 447
 448        if (len2) {
 449                mtk_sha_ring_shift(ring, &cmd, &res, &count);
 450
 451                res->hdr = MTK_DESC_BUF_LEN(len2);
 452                cmd->hdr = MTK_DESC_BUF_LEN(len2);
 453                cmd->buf = cpu_to_le32(addr2);
 454        }
 455
 456        cmd->hdr |= MTK_DESC_LAST;
 457        res->hdr |= MTK_DESC_LAST;
 458
 459        /*
 460         * Make sure that all changes to the DMA ring are done before we
 461         * start engine.
 462         */
 463        wmb();
 464        /* Start DMA transfer */
 465        mtk_sha_write(cryp, RDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count));
 466        mtk_sha_write(cryp, CDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count));
 467
 468        return -EINPROGRESS;
 469}
 470
 471static int mtk_sha_dma_map(struct mtk_cryp *cryp,
 472                           struct mtk_sha_rec *sha,
 473                           struct mtk_sha_reqctx *ctx,
 474                           size_t count)
 475{
 476        ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer,
 477                                       SHA_BUF_SIZE, DMA_TO_DEVICE);
 478        if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) {
 479                dev_err(cryp->dev, "dma map error\n");
 480                return -EINVAL;
 481        }
 482
 483        ctx->flags &= ~SHA_FLAGS_SG;
 484
 485        return mtk_sha_xmit(cryp, sha, ctx->dma_addr, count, 0, 0);
 486}
 487
 488static int mtk_sha_update_slow(struct mtk_cryp *cryp,
 489                               struct mtk_sha_rec *sha)
 490{
 491        struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
 492        size_t count;
 493        u32 final;
 494
 495        mtk_sha_append_sg(ctx);
 496
 497        final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
 498
 499        dev_dbg(cryp->dev, "slow: bufcnt: %zu\n", ctx->bufcnt);
 500
 501        if (final) {
 502                sha->flags |= SHA_FLAGS_FINAL;
 503                mtk_sha_fill_padding(ctx, 0);
 504        }
 505
 506        if (final || (ctx->bufcnt == SHA_BUF_SIZE && ctx->total)) {
 507                count = ctx->bufcnt;
 508                ctx->bufcnt = 0;
 509
 510                return mtk_sha_dma_map(cryp, sha, ctx, count);
 511        }
 512        return 0;
 513}
 514
 515static int mtk_sha_update_start(struct mtk_cryp *cryp,
 516                                struct mtk_sha_rec *sha)
 517{
 518        struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
 519        u32 len, final, tail;
 520        struct scatterlist *sg;
 521
 522        if (!ctx->total)
 523                return 0;
 524
 525        if (ctx->bufcnt || ctx->offset)
 526                return mtk_sha_update_slow(cryp, sha);
 527
 528        sg = ctx->sg;
 529
 530        if (!IS_ALIGNED(sg->offset, sizeof(u32)))
 531                return mtk_sha_update_slow(cryp, sha);
 532
 533        if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->bs))
 534                /* size is not ctx->bs aligned */
 535                return mtk_sha_update_slow(cryp, sha);
 536
 537        len = min(ctx->total, sg->length);
 538
 539        if (sg_is_last(sg)) {
 540                if (!(ctx->flags & SHA_FLAGS_FINUP)) {
 541                        /* not last sg must be ctx->bs aligned */
 542                        tail = len & (ctx->bs - 1);
 543                        len -= tail;
 544                }
 545        }
 546
 547        ctx->total -= len;
 548        ctx->offset = len; /* offset where to start slow */
 549
 550        final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
 551
 552        /* Add padding */
 553        if (final) {
 554                size_t count;
 555
 556                tail = len & (ctx->bs - 1);
 557                len -= tail;
 558                ctx->total += tail;
 559                ctx->offset = len; /* offset where to start slow */
 560
 561                sg = ctx->sg;
 562                mtk_sha_append_sg(ctx);
 563                mtk_sha_fill_padding(ctx, len);
 564
 565                ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer,
 566                                               SHA_BUF_SIZE, DMA_TO_DEVICE);
 567                if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) {
 568                        dev_err(cryp->dev, "dma map bytes error\n");
 569                        return -EINVAL;
 570                }
 571
 572                sha->flags |= SHA_FLAGS_FINAL;
 573                count = ctx->bufcnt;
 574                ctx->bufcnt = 0;
 575
 576                if (len == 0) {
 577                        ctx->flags &= ~SHA_FLAGS_SG;
 578                        return mtk_sha_xmit(cryp, sha, ctx->dma_addr,
 579                                            count, 0, 0);
 580
 581                } else {
 582                        ctx->sg = sg;
 583                        if (!dma_map_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
 584                                dev_err(cryp->dev, "dma_map_sg error\n");
 585                                return -EINVAL;
 586                        }
 587
 588                        ctx->flags |= SHA_FLAGS_SG;
 589                        return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg),
 590                                            len, ctx->dma_addr, count);
 591                }
 592        }
 593
 594        if (!dma_map_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
 595                dev_err(cryp->dev, "dma_map_sg  error\n");
 596                return -EINVAL;
 597        }
 598
 599        ctx->flags |= SHA_FLAGS_SG;
 600
 601        return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg),
 602                            len, 0, 0);
 603}
 604
 605static int mtk_sha_final_req(struct mtk_cryp *cryp,
 606                             struct mtk_sha_rec *sha)
 607{
 608        struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
 609        size_t count;
 610
 611        mtk_sha_fill_padding(ctx, 0);
 612
 613        sha->flags |= SHA_FLAGS_FINAL;
 614        count = ctx->bufcnt;
 615        ctx->bufcnt = 0;
 616
 617        return mtk_sha_dma_map(cryp, sha, ctx, count);
 618}
 619
 620/* Copy ready hash (+ finalize hmac) */
 621static int mtk_sha_finish(struct ahash_request *req)
 622{
 623        struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 624        __le32 *digest = ctx->info.digest;
 625        u32 *result = (u32 *)req->result;
 626        int i;
 627
 628        /* Get the hash from the digest buffer */
 629        for (i = 0; i < SIZE_IN_WORDS(ctx->ds); i++)
 630                result[i] = le32_to_cpu(digest[i]);
 631
 632        if (ctx->flags & SHA_FLAGS_HMAC)
 633                return mtk_sha_finish_hmac(req);
 634
 635        return 0;
 636}
 637
 638static void mtk_sha_finish_req(struct mtk_cryp *cryp,
 639                               struct mtk_sha_rec *sha,
 640                               int err)
 641{
 642        if (likely(!err && (SHA_FLAGS_FINAL & sha->flags)))
 643                err = mtk_sha_finish(sha->req);
 644
 645        sha->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL);
 646
 647        sha->req->base.complete(&sha->req->base, err);
 648
 649        /* Handle new request */
 650        tasklet_schedule(&sha->queue_task);
 651}
 652
 653static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id,
 654                                struct ahash_request *req)
 655{
 656        struct mtk_sha_rec *sha = cryp->sha[id];
 657        struct crypto_async_request *async_req, *backlog;
 658        struct mtk_sha_reqctx *ctx;
 659        unsigned long flags;
 660        int err = 0, ret = 0;
 661
 662        spin_lock_irqsave(&sha->lock, flags);
 663        if (req)
 664                ret = ahash_enqueue_request(&sha->queue, req);
 665
 666        if (SHA_FLAGS_BUSY & sha->flags) {
 667                spin_unlock_irqrestore(&sha->lock, flags);
 668                return ret;
 669        }
 670
 671        backlog = crypto_get_backlog(&sha->queue);
 672        async_req = crypto_dequeue_request(&sha->queue);
 673        if (async_req)
 674                sha->flags |= SHA_FLAGS_BUSY;
 675        spin_unlock_irqrestore(&sha->lock, flags);
 676
 677        if (!async_req)
 678                return ret;
 679
 680        if (backlog)
 681                backlog->complete(backlog, -EINPROGRESS);
 682
 683        req = ahash_request_cast(async_req);
 684        ctx = ahash_request_ctx(req);
 685
 686        sha->req = req;
 687
 688        mtk_sha_info_init(ctx);
 689
 690        if (ctx->op == SHA_OP_UPDATE) {
 691                err = mtk_sha_update_start(cryp, sha);
 692                if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP))
 693                        /* No final() after finup() */
 694                        err = mtk_sha_final_req(cryp, sha);
 695        } else if (ctx->op == SHA_OP_FINAL) {
 696                err = mtk_sha_final_req(cryp, sha);
 697        }
 698
 699        if (unlikely(err != -EINPROGRESS))
 700                /* Task will not finish it, so do it here */
 701                mtk_sha_finish_req(cryp, sha, err);
 702
 703        return ret;
 704}
 705
 706static int mtk_sha_enqueue(struct ahash_request *req, u32 op)
 707{
 708        struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 709        struct mtk_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
 710
 711        ctx->op = op;
 712
 713        return mtk_sha_handle_queue(tctx->cryp, tctx->id, req);
 714}
 715
 716static void mtk_sha_unmap(struct mtk_cryp *cryp, struct mtk_sha_rec *sha)
 717{
 718        struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
 719
 720        dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info),
 721                         DMA_BIDIRECTIONAL);
 722
 723        if (ctx->flags & SHA_FLAGS_SG) {
 724                dma_unmap_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE);
 725                if (ctx->sg->length == ctx->offset) {
 726                        ctx->sg = sg_next(ctx->sg);
 727                        if (ctx->sg)
 728                                ctx->offset = 0;
 729                }
 730                if (ctx->flags & SHA_FLAGS_PAD) {
 731                        dma_unmap_single(cryp->dev, ctx->dma_addr,
 732                                         SHA_BUF_SIZE, DMA_TO_DEVICE);
 733                }
 734        } else
 735                dma_unmap_single(cryp->dev, ctx->dma_addr,
 736                                 SHA_BUF_SIZE, DMA_TO_DEVICE);
 737}
 738
 739static void mtk_sha_complete(struct mtk_cryp *cryp,
 740                             struct mtk_sha_rec *sha)
 741{
 742        int err = 0;
 743
 744        err = mtk_sha_update_start(cryp, sha);
 745        if (err != -EINPROGRESS)
 746                mtk_sha_finish_req(cryp, sha, err);
 747}
 748
 749static int mtk_sha_update(struct ahash_request *req)
 750{
 751        struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 752
 753        ctx->total = req->nbytes;
 754        ctx->sg = req->src;
 755        ctx->offset = 0;
 756
 757        if ((ctx->bufcnt + ctx->total < SHA_BUF_SIZE) &&
 758            !(ctx->flags & SHA_FLAGS_FINUP))
 759                return mtk_sha_append_sg(ctx);
 760
 761        return mtk_sha_enqueue(req, SHA_OP_UPDATE);
 762}
 763
 764static int mtk_sha_final(struct ahash_request *req)
 765{
 766        struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 767
 768        ctx->flags |= SHA_FLAGS_FINUP;
 769
 770        if (ctx->flags & SHA_FLAGS_PAD)
 771                return mtk_sha_finish(req);
 772
 773        return mtk_sha_enqueue(req, SHA_OP_FINAL);
 774}
 775
 776static int mtk_sha_finup(struct ahash_request *req)
 777{
 778        struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 779        int err1, err2;
 780
 781        ctx->flags |= SHA_FLAGS_FINUP;
 782
 783        err1 = mtk_sha_update(req);
 784        if (err1 == -EINPROGRESS || err1 == -EBUSY)
 785                return err1;
 786        /*
 787         * final() has to be always called to cleanup resources
 788         * even if update() failed
 789         */
 790        err2 = mtk_sha_final(req);
 791
 792        return err1 ?: err2;
 793}
 794
 795static int mtk_sha_digest(struct ahash_request *req)
 796{
 797        return mtk_sha_init(req) ?: mtk_sha_finup(req);
 798}
 799
 800static int mtk_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
 801                          u32 keylen)
 802{
 803        struct mtk_sha_ctx *tctx = crypto_ahash_ctx(tfm);
 804        struct mtk_sha_hmac_ctx *bctx = tctx->base;
 805        size_t bs = crypto_shash_blocksize(bctx->shash);
 806        size_t ds = crypto_shash_digestsize(bctx->shash);
 807        int err, i;
 808
 809        SHASH_DESC_ON_STACK(shash, bctx->shash);
 810
 811        shash->tfm = bctx->shash;
 812        shash->flags = crypto_shash_get_flags(bctx->shash) &
 813                       CRYPTO_TFM_REQ_MAY_SLEEP;
 814
 815        if (keylen > bs) {
 816                err = crypto_shash_digest(shash, key, keylen, bctx->ipad);
 817                if (err)
 818                        return err;
 819                keylen = ds;
 820        } else {
 821                memcpy(bctx->ipad, key, keylen);
 822        }
 823
 824        memset(bctx->ipad + keylen, 0, bs - keylen);
 825        memcpy(bctx->opad, bctx->ipad, bs);
 826
 827        for (i = 0; i < bs; i++) {
 828                bctx->ipad[i] ^= 0x36;
 829                bctx->opad[i] ^= 0x5c;
 830        }
 831
 832        return 0;
 833}
 834
 835static int mtk_sha_export(struct ahash_request *req, void *out)
 836{
 837        const struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 838
 839        memcpy(out, ctx, sizeof(*ctx));
 840        return 0;
 841}
 842
 843static int mtk_sha_import(struct ahash_request *req, const void *in)
 844{
 845        struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 846
 847        memcpy(ctx, in, sizeof(*ctx));
 848        return 0;
 849}
 850
 851static int mtk_sha_cra_init_alg(struct crypto_tfm *tfm,
 852                                const char *alg_base)
 853{
 854        struct mtk_sha_ctx *tctx = crypto_tfm_ctx(tfm);
 855        struct mtk_cryp *cryp = NULL;
 856
 857        cryp = mtk_sha_find_dev(tctx);
 858        if (!cryp)
 859                return -ENODEV;
 860
 861        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 862                                 sizeof(struct mtk_sha_reqctx));
 863
 864        if (alg_base) {
 865                struct mtk_sha_hmac_ctx *bctx = tctx->base;
 866
 867                tctx->flags |= SHA_FLAGS_HMAC;
 868                bctx->shash = crypto_alloc_shash(alg_base, 0,
 869                                        CRYPTO_ALG_NEED_FALLBACK);
 870                if (IS_ERR(bctx->shash)) {
 871                        pr_err("base driver %s could not be loaded.\n",
 872                               alg_base);
 873
 874                        return PTR_ERR(bctx->shash);
 875                }
 876        }
 877        return 0;
 878}
 879
 880static int mtk_sha_cra_init(struct crypto_tfm *tfm)
 881{
 882        return mtk_sha_cra_init_alg(tfm, NULL);
 883}
 884
 885static int mtk_sha_cra_sha1_init(struct crypto_tfm *tfm)
 886{
 887        return mtk_sha_cra_init_alg(tfm, "sha1");
 888}
 889
 890static int mtk_sha_cra_sha224_init(struct crypto_tfm *tfm)
 891{
 892        return mtk_sha_cra_init_alg(tfm, "sha224");
 893}
 894
 895static int mtk_sha_cra_sha256_init(struct crypto_tfm *tfm)
 896{
 897        return mtk_sha_cra_init_alg(tfm, "sha256");
 898}
 899
 900static int mtk_sha_cra_sha384_init(struct crypto_tfm *tfm)
 901{
 902        return mtk_sha_cra_init_alg(tfm, "sha384");
 903}
 904
 905static int mtk_sha_cra_sha512_init(struct crypto_tfm *tfm)
 906{
 907        return mtk_sha_cra_init_alg(tfm, "sha512");
 908}
 909
 910static void mtk_sha_cra_exit(struct crypto_tfm *tfm)
 911{
 912        struct mtk_sha_ctx *tctx = crypto_tfm_ctx(tfm);
 913
 914        if (tctx->flags & SHA_FLAGS_HMAC) {
 915                struct mtk_sha_hmac_ctx *bctx = tctx->base;
 916
 917                crypto_free_shash(bctx->shash);
 918        }
 919}
 920
 921static struct ahash_alg algs_sha1_sha224_sha256[] = {
 922{
 923        .init           = mtk_sha_init,
 924        .update         = mtk_sha_update,
 925        .final          = mtk_sha_final,
 926        .finup          = mtk_sha_finup,
 927        .digest         = mtk_sha_digest,
 928        .export         = mtk_sha_export,
 929        .import         = mtk_sha_import,
 930        .halg.digestsize        = SHA1_DIGEST_SIZE,
 931        .halg.statesize = sizeof(struct mtk_sha_reqctx),
 932        .halg.base      = {
 933                .cra_name               = "sha1",
 934                .cra_driver_name        = "mtk-sha1",
 935                .cra_priority           = 400,
 936                .cra_flags              = CRYPTO_ALG_ASYNC,
 937                .cra_blocksize          = SHA1_BLOCK_SIZE,
 938                .cra_ctxsize            = sizeof(struct mtk_sha_ctx),
 939                .cra_alignmask          = SHA_ALIGN_MSK,
 940                .cra_module             = THIS_MODULE,
 941                .cra_init               = mtk_sha_cra_init,
 942                .cra_exit               = mtk_sha_cra_exit,
 943        }
 944},
 945{
 946        .init           = mtk_sha_init,
 947        .update         = mtk_sha_update,
 948        .final          = mtk_sha_final,
 949        .finup          = mtk_sha_finup,
 950        .digest         = mtk_sha_digest,
 951        .export         = mtk_sha_export,
 952        .import         = mtk_sha_import,
 953        .halg.digestsize        = SHA224_DIGEST_SIZE,
 954        .halg.statesize = sizeof(struct mtk_sha_reqctx),
 955        .halg.base      = {
 956                .cra_name               = "sha224",
 957                .cra_driver_name        = "mtk-sha224",
 958                .cra_priority           = 400,
 959                .cra_flags              = CRYPTO_ALG_ASYNC,
 960                .cra_blocksize          = SHA224_BLOCK_SIZE,
 961                .cra_ctxsize            = sizeof(struct mtk_sha_ctx),
 962                .cra_alignmask          = SHA_ALIGN_MSK,
 963                .cra_module             = THIS_MODULE,
 964                .cra_init               = mtk_sha_cra_init,
 965                .cra_exit               = mtk_sha_cra_exit,
 966        }
 967},
 968{
 969        .init           = mtk_sha_init,
 970        .update         = mtk_sha_update,
 971        .final          = mtk_sha_final,
 972        .finup          = mtk_sha_finup,
 973        .digest         = mtk_sha_digest,
 974        .export         = mtk_sha_export,
 975        .import         = mtk_sha_import,
 976        .halg.digestsize        = SHA256_DIGEST_SIZE,
 977        .halg.statesize = sizeof(struct mtk_sha_reqctx),
 978        .halg.base      = {
 979                .cra_name               = "sha256",
 980                .cra_driver_name        = "mtk-sha256",
 981                .cra_priority           = 400,
 982                .cra_flags              = CRYPTO_ALG_ASYNC,
 983                .cra_blocksize          = SHA256_BLOCK_SIZE,
 984                .cra_ctxsize            = sizeof(struct mtk_sha_ctx),
 985                .cra_alignmask          = SHA_ALIGN_MSK,
 986                .cra_module             = THIS_MODULE,
 987                .cra_init               = mtk_sha_cra_init,
 988                .cra_exit               = mtk_sha_cra_exit,
 989        }
 990},
 991{
 992        .init           = mtk_sha_init,
 993        .update         = mtk_sha_update,
 994        .final          = mtk_sha_final,
 995        .finup          = mtk_sha_finup,
 996        .digest         = mtk_sha_digest,
 997        .export         = mtk_sha_export,
 998        .import         = mtk_sha_import,
 999        .setkey         = mtk_sha_setkey,
1000        .halg.digestsize        = SHA1_DIGEST_SIZE,
1001        .halg.statesize = sizeof(struct mtk_sha_reqctx),
1002        .halg.base      = {
1003                .cra_name               = "hmac(sha1)",
1004                .cra_driver_name        = "mtk-hmac-sha1",
1005                .cra_priority           = 400,
1006                .cra_flags              = CRYPTO_ALG_ASYNC |
1007                                          CRYPTO_ALG_NEED_FALLBACK,
1008                .cra_blocksize          = SHA1_BLOCK_SIZE,
1009                .cra_ctxsize            = sizeof(struct mtk_sha_ctx) +
1010                                        sizeof(struct mtk_sha_hmac_ctx),
1011                .cra_alignmask          = SHA_ALIGN_MSK,
1012                .cra_module             = THIS_MODULE,
1013                .cra_init               = mtk_sha_cra_sha1_init,
1014                .cra_exit               = mtk_sha_cra_exit,
1015        }
1016},
1017{
1018        .init           = mtk_sha_init,
1019        .update         = mtk_sha_update,
1020        .final          = mtk_sha_final,
1021        .finup          = mtk_sha_finup,
1022        .digest         = mtk_sha_digest,
1023        .export         = mtk_sha_export,
1024        .import         = mtk_sha_import,
1025        .setkey         = mtk_sha_setkey,
1026        .halg.digestsize        = SHA224_DIGEST_SIZE,
1027        .halg.statesize = sizeof(struct mtk_sha_reqctx),
1028        .halg.base      = {
1029                .cra_name               = "hmac(sha224)",
1030                .cra_driver_name        = "mtk-hmac-sha224",
1031                .cra_priority           = 400,
1032                .cra_flags              = CRYPTO_ALG_ASYNC |
1033                                          CRYPTO_ALG_NEED_FALLBACK,
1034                .cra_blocksize          = SHA224_BLOCK_SIZE,
1035                .cra_ctxsize            = sizeof(struct mtk_sha_ctx) +
1036                                        sizeof(struct mtk_sha_hmac_ctx),
1037                .cra_alignmask          = SHA_ALIGN_MSK,
1038                .cra_module             = THIS_MODULE,
1039                .cra_init               = mtk_sha_cra_sha224_init,
1040                .cra_exit               = mtk_sha_cra_exit,
1041        }
1042},
1043{
1044        .init           = mtk_sha_init,
1045        .update         = mtk_sha_update,
1046        .final          = mtk_sha_final,
1047        .finup          = mtk_sha_finup,
1048        .digest         = mtk_sha_digest,
1049        .export         = mtk_sha_export,
1050        .import         = mtk_sha_import,
1051        .setkey         = mtk_sha_setkey,
1052        .halg.digestsize        = SHA256_DIGEST_SIZE,
1053        .halg.statesize = sizeof(struct mtk_sha_reqctx),
1054        .halg.base      = {
1055                .cra_name               = "hmac(sha256)",
1056                .cra_driver_name        = "mtk-hmac-sha256",
1057                .cra_priority           = 400,
1058                .cra_flags              = CRYPTO_ALG_ASYNC |
1059                                          CRYPTO_ALG_NEED_FALLBACK,
1060                .cra_blocksize          = SHA256_BLOCK_SIZE,
1061                .cra_ctxsize            = sizeof(struct mtk_sha_ctx) +
1062                                        sizeof(struct mtk_sha_hmac_ctx),
1063                .cra_alignmask          = SHA_ALIGN_MSK,
1064                .cra_module             = THIS_MODULE,
1065                .cra_init               = mtk_sha_cra_sha256_init,
1066                .cra_exit               = mtk_sha_cra_exit,
1067        }
1068},
1069};
1070
1071static struct ahash_alg algs_sha384_sha512[] = {
1072{
1073        .init           = mtk_sha_init,
1074        .update         = mtk_sha_update,
1075        .final          = mtk_sha_final,
1076        .finup          = mtk_sha_finup,
1077        .digest         = mtk_sha_digest,
1078        .export         = mtk_sha_export,
1079        .import         = mtk_sha_import,
1080        .halg.digestsize        = SHA384_DIGEST_SIZE,
1081        .halg.statesize = sizeof(struct mtk_sha_reqctx),
1082        .halg.base      = {
1083                .cra_name               = "sha384",
1084                .cra_driver_name        = "mtk-sha384",
1085                .cra_priority           = 400,
1086                .cra_flags              = CRYPTO_ALG_ASYNC,
1087                .cra_blocksize          = SHA384_BLOCK_SIZE,
1088                .cra_ctxsize            = sizeof(struct mtk_sha_ctx),
1089                .cra_alignmask          = SHA_ALIGN_MSK,
1090                .cra_module             = THIS_MODULE,
1091                .cra_init               = mtk_sha_cra_init,
1092                .cra_exit               = mtk_sha_cra_exit,
1093        }
1094},
1095{
1096        .init           = mtk_sha_init,
1097        .update         = mtk_sha_update,
1098        .final          = mtk_sha_final,
1099        .finup          = mtk_sha_finup,
1100        .digest         = mtk_sha_digest,
1101        .export         = mtk_sha_export,
1102        .import         = mtk_sha_import,
1103        .halg.digestsize        = SHA512_DIGEST_SIZE,
1104        .halg.statesize = sizeof(struct mtk_sha_reqctx),
1105        .halg.base      = {
1106                .cra_name               = "sha512",
1107                .cra_driver_name        = "mtk-sha512",
1108                .cra_priority           = 400,
1109                .cra_flags              = CRYPTO_ALG_ASYNC,
1110                .cra_blocksize          = SHA512_BLOCK_SIZE,
1111                .cra_ctxsize            = sizeof(struct mtk_sha_ctx),
1112                .cra_alignmask          = SHA_ALIGN_MSK,
1113                .cra_module             = THIS_MODULE,
1114                .cra_init               = mtk_sha_cra_init,
1115                .cra_exit               = mtk_sha_cra_exit,
1116        }
1117},
1118{
1119        .init           = mtk_sha_init,
1120        .update         = mtk_sha_update,
1121        .final          = mtk_sha_final,
1122        .finup          = mtk_sha_finup,
1123        .digest         = mtk_sha_digest,
1124        .export         = mtk_sha_export,
1125        .import         = mtk_sha_import,
1126        .setkey         = mtk_sha_setkey,
1127        .halg.digestsize        = SHA384_DIGEST_SIZE,
1128        .halg.statesize = sizeof(struct mtk_sha_reqctx),
1129        .halg.base      = {
1130                .cra_name               = "hmac(sha384)",
1131                .cra_driver_name        = "mtk-hmac-sha384",
1132                .cra_priority           = 400,
1133                .cra_flags              = CRYPTO_ALG_ASYNC |
1134                                          CRYPTO_ALG_NEED_FALLBACK,
1135                .cra_blocksize          = SHA384_BLOCK_SIZE,
1136                .cra_ctxsize            = sizeof(struct mtk_sha_ctx) +
1137                                        sizeof(struct mtk_sha_hmac_ctx),
1138                .cra_alignmask          = SHA_ALIGN_MSK,
1139                .cra_module             = THIS_MODULE,
1140                .cra_init               = mtk_sha_cra_sha384_init,
1141                .cra_exit               = mtk_sha_cra_exit,
1142        }
1143},
1144{
1145        .init           = mtk_sha_init,
1146        .update         = mtk_sha_update,
1147        .final          = mtk_sha_final,
1148        .finup          = mtk_sha_finup,
1149        .digest         = mtk_sha_digest,
1150        .export         = mtk_sha_export,
1151        .import         = mtk_sha_import,
1152        .setkey         = mtk_sha_setkey,
1153        .halg.digestsize        = SHA512_DIGEST_SIZE,
1154        .halg.statesize = sizeof(struct mtk_sha_reqctx),
1155        .halg.base      = {
1156                .cra_name               = "hmac(sha512)",
1157                .cra_driver_name        = "mtk-hmac-sha512",
1158                .cra_priority           = 400,
1159                .cra_flags              = CRYPTO_ALG_ASYNC |
1160                                          CRYPTO_ALG_NEED_FALLBACK,
1161                .cra_blocksize          = SHA512_BLOCK_SIZE,
1162                .cra_ctxsize            = sizeof(struct mtk_sha_ctx) +
1163                                        sizeof(struct mtk_sha_hmac_ctx),
1164                .cra_alignmask          = SHA_ALIGN_MSK,
1165                .cra_module             = THIS_MODULE,
1166                .cra_init               = mtk_sha_cra_sha512_init,
1167                .cra_exit               = mtk_sha_cra_exit,
1168        }
1169},
1170};
1171
1172static void mtk_sha_queue_task(unsigned long data)
1173{
1174        struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data;
1175
1176        mtk_sha_handle_queue(sha->cryp, sha->id - MTK_RING2, NULL);
1177}
1178
1179static void mtk_sha_done_task(unsigned long data)
1180{
1181        struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data;
1182        struct mtk_cryp *cryp = sha->cryp;
1183
1184        mtk_sha_unmap(cryp, sha);
1185        mtk_sha_complete(cryp, sha);
1186}
1187
1188static irqreturn_t mtk_sha_irq(int irq, void *dev_id)
1189{
1190        struct mtk_sha_rec *sha = (struct mtk_sha_rec *)dev_id;
1191        struct mtk_cryp *cryp = sha->cryp;
1192        u32 val = mtk_sha_read(cryp, RDR_STAT(sha->id));
1193
1194        mtk_sha_write(cryp, RDR_STAT(sha->id), val);
1195
1196        if (likely((SHA_FLAGS_BUSY & sha->flags))) {
1197                mtk_sha_write(cryp, RDR_PROC_COUNT(sha->id), MTK_CNT_RST);
1198                mtk_sha_write(cryp, RDR_THRESH(sha->id),
1199                              MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
1200
1201                tasklet_schedule(&sha->done_task);
1202        } else {
1203                dev_warn(cryp->dev, "SHA interrupt when no active requests.\n");
1204        }
1205        return IRQ_HANDLED;
1206}
1207
1208/*
1209 * The purpose of two SHA records is used to get extra performance.
1210 * It is similar to mtk_aes_record_init().
1211 */
1212static int mtk_sha_record_init(struct mtk_cryp *cryp)
1213{
1214        struct mtk_sha_rec **sha = cryp->sha;
1215        int i, err = -ENOMEM;
1216
1217        for (i = 0; i < MTK_REC_NUM; i++) {
1218                sha[i] = kzalloc(sizeof(**sha), GFP_KERNEL);
1219                if (!sha[i])
1220                        goto err_cleanup;
1221
1222                sha[i]->cryp = cryp;
1223
1224                spin_lock_init(&sha[i]->lock);
1225                crypto_init_queue(&sha[i]->queue, SHA_QUEUE_SIZE);
1226
1227                tasklet_init(&sha[i]->queue_task, mtk_sha_queue_task,
1228                             (unsigned long)sha[i]);
1229                tasklet_init(&sha[i]->done_task, mtk_sha_done_task,
1230                             (unsigned long)sha[i]);
1231        }
1232
1233        /* Link to ring2 and ring3 respectively */
1234        sha[0]->id = MTK_RING2;
1235        sha[1]->id = MTK_RING3;
1236
1237        cryp->rec = 1;
1238
1239        return 0;
1240
1241err_cleanup:
1242        for (; i--; )
1243                kfree(sha[i]);
1244        return err;
1245}
1246
1247static void mtk_sha_record_free(struct mtk_cryp *cryp)
1248{
1249        int i;
1250
1251        for (i = 0; i < MTK_REC_NUM; i++) {
1252                tasklet_kill(&cryp->sha[i]->done_task);
1253                tasklet_kill(&cryp->sha[i]->queue_task);
1254
1255                kfree(cryp->sha[i]);
1256        }
1257}
1258
1259static void mtk_sha_unregister_algs(void)
1260{
1261        int i;
1262
1263        for (i = 0; i < ARRAY_SIZE(algs_sha1_sha224_sha256); i++)
1264                crypto_unregister_ahash(&algs_sha1_sha224_sha256[i]);
1265
1266        for (i = 0; i < ARRAY_SIZE(algs_sha384_sha512); i++)
1267                crypto_unregister_ahash(&algs_sha384_sha512[i]);
1268}
1269
1270static int mtk_sha_register_algs(void)
1271{
1272        int err, i;
1273
1274        for (i = 0; i < ARRAY_SIZE(algs_sha1_sha224_sha256); i++) {
1275                err = crypto_register_ahash(&algs_sha1_sha224_sha256[i]);
1276                if (err)
1277                        goto err_sha_224_256_algs;
1278        }
1279
1280        for (i = 0; i < ARRAY_SIZE(algs_sha384_sha512); i++) {
1281                err = crypto_register_ahash(&algs_sha384_sha512[i]);
1282                if (err)
1283                        goto err_sha_384_512_algs;
1284        }
1285
1286        return 0;
1287
1288err_sha_384_512_algs:
1289        for (; i--; )
1290                crypto_unregister_ahash(&algs_sha384_sha512[i]);
1291        i = ARRAY_SIZE(algs_sha1_sha224_sha256);
1292err_sha_224_256_algs:
1293        for (; i--; )
1294                crypto_unregister_ahash(&algs_sha1_sha224_sha256[i]);
1295
1296        return err;
1297}
1298
1299int mtk_hash_alg_register(struct mtk_cryp *cryp)
1300{
1301        int err;
1302
1303        INIT_LIST_HEAD(&cryp->sha_list);
1304
1305        /* Initialize two hash records */
1306        err = mtk_sha_record_init(cryp);
1307        if (err)
1308                goto err_record;
1309
1310        err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING2], mtk_sha_irq,
1311                               0, "mtk-sha", cryp->sha[0]);
1312        if (err) {
1313                dev_err(cryp->dev, "unable to request sha irq0.\n");
1314                goto err_res;
1315        }
1316
1317        err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING3], mtk_sha_irq,
1318                               0, "mtk-sha", cryp->sha[1]);
1319        if (err) {
1320                dev_err(cryp->dev, "unable to request sha irq1.\n");
1321                goto err_res;
1322        }
1323
1324        /* Enable ring2 and ring3 interrupt for hash */
1325        mtk_sha_write(cryp, AIC_ENABLE_SET(MTK_RING2), MTK_IRQ_RDR2);
1326        mtk_sha_write(cryp, AIC_ENABLE_SET(MTK_RING3), MTK_IRQ_RDR3);
1327
1328        spin_lock(&mtk_sha.lock);
1329        list_add_tail(&cryp->sha_list, &mtk_sha.dev_list);
1330        spin_unlock(&mtk_sha.lock);
1331
1332        err = mtk_sha_register_algs();
1333        if (err)
1334                goto err_algs;
1335
1336        return 0;
1337
1338err_algs:
1339        spin_lock(&mtk_sha.lock);
1340        list_del(&cryp->sha_list);
1341        spin_unlock(&mtk_sha.lock);
1342err_res:
1343        mtk_sha_record_free(cryp);
1344err_record:
1345
1346        dev_err(cryp->dev, "mtk-sha initialization failed.\n");
1347        return err;
1348}
1349
1350void mtk_hash_alg_release(struct mtk_cryp *cryp)
1351{
1352        spin_lock(&mtk_sha.lock);
1353        list_del(&cryp->sha_list);
1354        spin_unlock(&mtk_sha.lock);
1355
1356        mtk_sha_unregister_algs();
1357        mtk_sha_record_free(cryp);
1358}
1359