linux/drivers/crypto/mediatek/mtk-sha.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Cryptographic API.
   4 *
   5 * Driver for EIP97 SHA1/SHA2(HMAC) acceleration.
   6 *
   7 * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
   8 *
   9 * Some ideas are from atmel-sha.c and omap-sham.c drivers.
  10 */
  11
  12#include <crypto/hmac.h>
  13#include <crypto/sha.h>
  14#include "mtk-platform.h"
  15
  16#define SHA_ALIGN_MSK           (sizeof(u32) - 1)
  17#define SHA_QUEUE_SIZE          512
  18#define SHA_BUF_SIZE            ((u32)PAGE_SIZE)
  19
  20#define SHA_OP_UPDATE           1
  21#define SHA_OP_FINAL            2
  22
  23#define SHA_DATA_LEN_MSK        cpu_to_le32(GENMASK(16, 0))
  24#define SHA_MAX_DIGEST_BUF_SIZE 32
  25
  26/* SHA command token */
  27#define SHA_CT_SIZE             5
  28#define SHA_CT_CTRL_HDR         cpu_to_le32(0x02220000)
  29#define SHA_CMD0                cpu_to_le32(0x03020000)
  30#define SHA_CMD1                cpu_to_le32(0x21060000)
  31#define SHA_CMD2                cpu_to_le32(0xe0e63802)
  32
  33/* SHA transform information */
  34#define SHA_TFM_HASH            cpu_to_le32(0x2 << 0)
  35#define SHA_TFM_SIZE(x)         cpu_to_le32((x) << 8)
  36#define SHA_TFM_START           cpu_to_le32(0x1 << 4)
  37#define SHA_TFM_CONTINUE        cpu_to_le32(0x1 << 5)
  38#define SHA_TFM_HASH_STORE      cpu_to_le32(0x1 << 19)
  39#define SHA_TFM_SHA1            cpu_to_le32(0x2 << 23)
  40#define SHA_TFM_SHA256          cpu_to_le32(0x3 << 23)
  41#define SHA_TFM_SHA224          cpu_to_le32(0x4 << 23)
  42#define SHA_TFM_SHA512          cpu_to_le32(0x5 << 23)
  43#define SHA_TFM_SHA384          cpu_to_le32(0x6 << 23)
  44#define SHA_TFM_DIGEST(x)       cpu_to_le32(((x) & GENMASK(3, 0)) << 24)
  45
  46/* SHA flags */
  47#define SHA_FLAGS_BUSY          BIT(0)
  48#define SHA_FLAGS_FINAL         BIT(1)
  49#define SHA_FLAGS_FINUP         BIT(2)
  50#define SHA_FLAGS_SG            BIT(3)
  51#define SHA_FLAGS_ALGO_MSK      GENMASK(8, 4)
  52#define SHA_FLAGS_SHA1          BIT(4)
  53#define SHA_FLAGS_SHA224        BIT(5)
  54#define SHA_FLAGS_SHA256        BIT(6)
  55#define SHA_FLAGS_SHA384        BIT(7)
  56#define SHA_FLAGS_SHA512        BIT(8)
  57#define SHA_FLAGS_HMAC          BIT(9)
  58#define SHA_FLAGS_PAD           BIT(10)
  59
  60/**
  61 * mtk_sha_info - hardware information of AES
  62 * @cmd:        command token, hardware instruction
  63 * @tfm:        transform state of cipher algorithm.
  64 * @state:      contains keys and initial vectors.
  65 *
  66 */
  67struct mtk_sha_info {
  68        __le32 ctrl[2];
  69        __le32 cmd[3];
  70        __le32 tfm[2];
  71        __le32 digest[SHA_MAX_DIGEST_BUF_SIZE];
  72};
  73
  74struct mtk_sha_reqctx {
  75        struct mtk_sha_info info;
  76        unsigned long flags;
  77        unsigned long op;
  78
  79        u64 digcnt;
  80        size_t bufcnt;
  81        dma_addr_t dma_addr;
  82
  83        __le32 ct_hdr;
  84        u32 ct_size;
  85        dma_addr_t ct_dma;
  86        dma_addr_t tfm_dma;
  87
  88        /* Walk state */
  89        struct scatterlist *sg;
  90        u32 offset;     /* Offset in current sg */
  91        u32 total;      /* Total request */
  92        size_t ds;
  93        size_t bs;
  94
  95        u8 *buffer;
  96};
  97
  98struct mtk_sha_hmac_ctx {
  99        struct crypto_shash     *shash;
 100        u8 ipad[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
 101        u8 opad[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
 102};
 103
 104struct mtk_sha_ctx {
 105        struct mtk_cryp *cryp;
 106        unsigned long flags;
 107        u8 id;
 108        u8 buf[SHA_BUF_SIZE] __aligned(sizeof(u32));
 109
 110        struct mtk_sha_hmac_ctx base[];
 111};
 112
 113struct mtk_sha_drv {
 114        struct list_head dev_list;
 115        /* Device list lock */
 116        spinlock_t lock;
 117};
 118
 119static struct mtk_sha_drv mtk_sha = {
 120        .dev_list = LIST_HEAD_INIT(mtk_sha.dev_list),
 121        .lock = __SPIN_LOCK_UNLOCKED(mtk_sha.lock),
 122};
 123
 124static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id,
 125                                struct ahash_request *req);
 126
 127static inline u32 mtk_sha_read(struct mtk_cryp *cryp, u32 offset)
 128{
 129        return readl_relaxed(cryp->base + offset);
 130}
 131
 132static inline void mtk_sha_write(struct mtk_cryp *cryp,
 133                                 u32 offset, u32 value)
 134{
 135        writel_relaxed(value, cryp->base + offset);
 136}
 137
 138static inline void mtk_sha_ring_shift(struct mtk_ring *ring,
 139                                      struct mtk_desc **cmd_curr,
 140                                      struct mtk_desc **res_curr,
 141                                      int *count)
 142{
 143        *cmd_curr = ring->cmd_next++;
 144        *res_curr = ring->res_next++;
 145        (*count)++;
 146
 147        if (ring->cmd_next == ring->cmd_base + MTK_DESC_NUM) {
 148                ring->cmd_next = ring->cmd_base;
 149                ring->res_next = ring->res_base;
 150        }
 151}
 152
 153static struct mtk_cryp *mtk_sha_find_dev(struct mtk_sha_ctx *tctx)
 154{
 155        struct mtk_cryp *cryp = NULL;
 156        struct mtk_cryp *tmp;
 157
 158        spin_lock_bh(&mtk_sha.lock);
 159        if (!tctx->cryp) {
 160                list_for_each_entry(tmp, &mtk_sha.dev_list, sha_list) {
 161                        cryp = tmp;
 162                        break;
 163                }
 164                tctx->cryp = cryp;
 165        } else {
 166                cryp = tctx->cryp;
 167        }
 168
 169        /*
 170         * Assign record id to tfm in round-robin fashion, and this
 171         * will help tfm to bind  to corresponding descriptor rings.
 172         */
 173        tctx->id = cryp->rec;
 174        cryp->rec = !cryp->rec;
 175
 176        spin_unlock_bh(&mtk_sha.lock);
 177
 178        return cryp;
 179}
 180
 181static int mtk_sha_append_sg(struct mtk_sha_reqctx *ctx)
 182{
 183        size_t count;
 184
 185        while ((ctx->bufcnt < SHA_BUF_SIZE) && ctx->total) {
 186                count = min(ctx->sg->length - ctx->offset, ctx->total);
 187                count = min(count, SHA_BUF_SIZE - ctx->bufcnt);
 188
 189                if (count <= 0) {
 190                        /*
 191                         * Check if count <= 0 because the buffer is full or
 192                         * because the sg length is 0. In the latest case,
 193                         * check if there is another sg in the list, a 0 length
 194                         * sg doesn't necessarily mean the end of the sg list.
 195                         */
 196                        if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) {
 197                                ctx->sg = sg_next(ctx->sg);
 198                                continue;
 199                        } else {
 200                                break;
 201                        }
 202                }
 203
 204                scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
 205                                         ctx->offset, count, 0);
 206
 207                ctx->bufcnt += count;
 208                ctx->offset += count;
 209                ctx->total -= count;
 210
 211                if (ctx->offset == ctx->sg->length) {
 212                        ctx->sg = sg_next(ctx->sg);
 213                        if (ctx->sg)
 214                                ctx->offset = 0;
 215                        else
 216                                ctx->total = 0;
 217                }
 218        }
 219
 220        return 0;
 221}
 222
 223/*
 224 * The purpose of this padding is to ensure that the padded message is a
 225 * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
 226 * The bit "1" is appended at the end of the message followed by
 227 * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
 228 * 128 bits block (SHA384/SHA512) equals to the message length in bits
 229 * is appended.
 230 *
 231 * For SHA1/SHA224/SHA256, padlen is calculated as followed:
 232 *  - if message length < 56 bytes then padlen = 56 - message length
 233 *  - else padlen = 64 + 56 - message length
 234 *
 235 * For SHA384/SHA512, padlen is calculated as followed:
 236 *  - if message length < 112 bytes then padlen = 112 - message length
 237 *  - else padlen = 128 + 112 - message length
 238 */
 239static void mtk_sha_fill_padding(struct mtk_sha_reqctx *ctx, u32 len)
 240{
 241        u32 index, padlen;
 242        __be64 bits[2];
 243        u64 size = ctx->digcnt;
 244
 245        size += ctx->bufcnt;
 246        size += len;
 247
 248        bits[1] = cpu_to_be64(size << 3);
 249        bits[0] = cpu_to_be64(size >> 61);
 250
 251        switch (ctx->flags & SHA_FLAGS_ALGO_MSK) {
 252        case SHA_FLAGS_SHA384:
 253        case SHA_FLAGS_SHA512:
 254                index = ctx->bufcnt & 0x7f;
 255                padlen = (index < 112) ? (112 - index) : ((128 + 112) - index);
 256                *(ctx->buffer + ctx->bufcnt) = 0x80;
 257                memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen - 1);
 258                memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16);
 259                ctx->bufcnt += padlen + 16;
 260                ctx->flags |= SHA_FLAGS_PAD;
 261                break;
 262
 263        default:
 264                index = ctx->bufcnt & 0x3f;
 265                padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
 266                *(ctx->buffer + ctx->bufcnt) = 0x80;
 267                memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen - 1);
 268                memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8);
 269                ctx->bufcnt += padlen + 8;
 270                ctx->flags |= SHA_FLAGS_PAD;
 271                break;
 272        }
 273}
 274
 275/* Initialize basic transform information of SHA */
 276static void mtk_sha_info_init(struct mtk_sha_reqctx *ctx)
 277{
 278        struct mtk_sha_info *info = &ctx->info;
 279
 280        ctx->ct_hdr = SHA_CT_CTRL_HDR;
 281        ctx->ct_size = SHA_CT_SIZE;
 282
 283        info->tfm[0] = SHA_TFM_HASH | SHA_TFM_SIZE(SIZE_IN_WORDS(ctx->ds));
 284
 285        switch (ctx->flags & SHA_FLAGS_ALGO_MSK) {
 286        case SHA_FLAGS_SHA1:
 287                info->tfm[0] |= SHA_TFM_SHA1;
 288                break;
 289        case SHA_FLAGS_SHA224:
 290                info->tfm[0] |= SHA_TFM_SHA224;
 291                break;
 292        case SHA_FLAGS_SHA256:
 293                info->tfm[0] |= SHA_TFM_SHA256;
 294                break;
 295        case SHA_FLAGS_SHA384:
 296                info->tfm[0] |= SHA_TFM_SHA384;
 297                break;
 298        case SHA_FLAGS_SHA512:
 299                info->tfm[0] |= SHA_TFM_SHA512;
 300                break;
 301
 302        default:
 303                /* Should not happen... */
 304                return;
 305        }
 306
 307        info->tfm[1] = SHA_TFM_HASH_STORE;
 308        info->ctrl[0] = info->tfm[0] | SHA_TFM_CONTINUE | SHA_TFM_START;
 309        info->ctrl[1] = info->tfm[1];
 310
 311        info->cmd[0] = SHA_CMD0;
 312        info->cmd[1] = SHA_CMD1;
 313        info->cmd[2] = SHA_CMD2 | SHA_TFM_DIGEST(SIZE_IN_WORDS(ctx->ds));
 314}
 315
 316/*
 317 * Update input data length field of transform information and
 318 * map it to DMA region.
 319 */
 320static int mtk_sha_info_update(struct mtk_cryp *cryp,
 321                               struct mtk_sha_rec *sha,
 322                               size_t len1, size_t len2)
 323{
 324        struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
 325        struct mtk_sha_info *info = &ctx->info;
 326
 327        ctx->ct_hdr &= ~SHA_DATA_LEN_MSK;
 328        ctx->ct_hdr |= cpu_to_le32(len1 + len2);
 329        info->cmd[0] &= ~SHA_DATA_LEN_MSK;
 330        info->cmd[0] |= cpu_to_le32(len1 + len2);
 331
 332        /* Setting SHA_TFM_START only for the first iteration */
 333        if (ctx->digcnt)
 334                info->ctrl[0] &= ~SHA_TFM_START;
 335
 336        ctx->digcnt += len1;
 337
 338        ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info),
 339                                     DMA_BIDIRECTIONAL);
 340        if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma))) {
 341                dev_err(cryp->dev, "dma %zu bytes error\n", sizeof(*info));
 342                return -EINVAL;
 343        }
 344
 345        ctx->tfm_dma = ctx->ct_dma + sizeof(info->ctrl) + sizeof(info->cmd);
 346
 347        return 0;
 348}
 349
 350/*
 351 * Because of hardware limitation, we must pre-calculate the inner
 352 * and outer digest that need to be processed firstly by engine, then
 353 * apply the result digest to the input message. These complex hashing
 354 * procedures limits HMAC performance, so we use fallback SW encoding.
 355 */
 356static int mtk_sha_finish_hmac(struct ahash_request *req)
 357{
 358        struct mtk_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
 359        struct mtk_sha_hmac_ctx *bctx = tctx->base;
 360        struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 361
 362        SHASH_DESC_ON_STACK(shash, bctx->shash);
 363
 364        shash->tfm = bctx->shash;
 365
 366        return crypto_shash_init(shash) ?:
 367               crypto_shash_update(shash, bctx->opad, ctx->bs) ?:
 368               crypto_shash_finup(shash, req->result, ctx->ds, req->result);
 369}
 370
 371/* Initialize request context */
 372static int mtk_sha_init(struct ahash_request *req)
 373{
 374        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 375        struct mtk_sha_ctx *tctx = crypto_ahash_ctx(tfm);
 376        struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 377
 378        ctx->flags = 0;
 379        ctx->ds = crypto_ahash_digestsize(tfm);
 380
 381        switch (ctx->ds) {
 382        case SHA1_DIGEST_SIZE:
 383                ctx->flags |= SHA_FLAGS_SHA1;
 384                ctx->bs = SHA1_BLOCK_SIZE;
 385                break;
 386        case SHA224_DIGEST_SIZE:
 387                ctx->flags |= SHA_FLAGS_SHA224;
 388                ctx->bs = SHA224_BLOCK_SIZE;
 389                break;
 390        case SHA256_DIGEST_SIZE:
 391                ctx->flags |= SHA_FLAGS_SHA256;
 392                ctx->bs = SHA256_BLOCK_SIZE;
 393                break;
 394        case SHA384_DIGEST_SIZE:
 395                ctx->flags |= SHA_FLAGS_SHA384;
 396                ctx->bs = SHA384_BLOCK_SIZE;
 397                break;
 398        case SHA512_DIGEST_SIZE:
 399                ctx->flags |= SHA_FLAGS_SHA512;
 400                ctx->bs = SHA512_BLOCK_SIZE;
 401                break;
 402        default:
 403                return -EINVAL;
 404        }
 405
 406        ctx->bufcnt = 0;
 407        ctx->digcnt = 0;
 408        ctx->buffer = tctx->buf;
 409
 410        if (tctx->flags & SHA_FLAGS_HMAC) {
 411                struct mtk_sha_hmac_ctx *bctx = tctx->base;
 412
 413                memcpy(ctx->buffer, bctx->ipad, ctx->bs);
 414                ctx->bufcnt = ctx->bs;
 415                ctx->flags |= SHA_FLAGS_HMAC;
 416        }
 417
 418        return 0;
 419}
 420
 421static int mtk_sha_xmit(struct mtk_cryp *cryp, struct mtk_sha_rec *sha,
 422                        dma_addr_t addr1, size_t len1,
 423                        dma_addr_t addr2, size_t len2)
 424{
 425        struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
 426        struct mtk_ring *ring = cryp->ring[sha->id];
 427        struct mtk_desc *cmd, *res;
 428        int err, count = 0;
 429
 430        err = mtk_sha_info_update(cryp, sha, len1, len2);
 431        if (err)
 432                return err;
 433
 434        /* Fill in the command/result descriptors */
 435        mtk_sha_ring_shift(ring, &cmd, &res, &count);
 436
 437        res->hdr = MTK_DESC_FIRST | MTK_DESC_BUF_LEN(len1);
 438        cmd->hdr = MTK_DESC_FIRST | MTK_DESC_BUF_LEN(len1) |
 439                   MTK_DESC_CT_LEN(ctx->ct_size);
 440        cmd->buf = cpu_to_le32(addr1);
 441        cmd->ct = cpu_to_le32(ctx->ct_dma);
 442        cmd->ct_hdr = ctx->ct_hdr;
 443        cmd->tfm = cpu_to_le32(ctx->tfm_dma);
 444
 445        if (len2) {
 446                mtk_sha_ring_shift(ring, &cmd, &res, &count);
 447
 448                res->hdr = MTK_DESC_BUF_LEN(len2);
 449                cmd->hdr = MTK_DESC_BUF_LEN(len2);
 450                cmd->buf = cpu_to_le32(addr2);
 451        }
 452
 453        cmd->hdr |= MTK_DESC_LAST;
 454        res->hdr |= MTK_DESC_LAST;
 455
 456        /*
 457         * Make sure that all changes to the DMA ring are done before we
 458         * start engine.
 459         */
 460        wmb();
 461        /* Start DMA transfer */
 462        mtk_sha_write(cryp, RDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count));
 463        mtk_sha_write(cryp, CDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count));
 464
 465        return -EINPROGRESS;
 466}
 467
 468static int mtk_sha_dma_map(struct mtk_cryp *cryp,
 469                           struct mtk_sha_rec *sha,
 470                           struct mtk_sha_reqctx *ctx,
 471                           size_t count)
 472{
 473        ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer,
 474                                       SHA_BUF_SIZE, DMA_TO_DEVICE);
 475        if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) {
 476                dev_err(cryp->dev, "dma map error\n");
 477                return -EINVAL;
 478        }
 479
 480        ctx->flags &= ~SHA_FLAGS_SG;
 481
 482        return mtk_sha_xmit(cryp, sha, ctx->dma_addr, count, 0, 0);
 483}
 484
 485static int mtk_sha_update_slow(struct mtk_cryp *cryp,
 486                               struct mtk_sha_rec *sha)
 487{
 488        struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
 489        size_t count;
 490        u32 final;
 491
 492        mtk_sha_append_sg(ctx);
 493
 494        final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
 495
 496        dev_dbg(cryp->dev, "slow: bufcnt: %zu\n", ctx->bufcnt);
 497
 498        if (final) {
 499                sha->flags |= SHA_FLAGS_FINAL;
 500                mtk_sha_fill_padding(ctx, 0);
 501        }
 502
 503        if (final || (ctx->bufcnt == SHA_BUF_SIZE && ctx->total)) {
 504                count = ctx->bufcnt;
 505                ctx->bufcnt = 0;
 506
 507                return mtk_sha_dma_map(cryp, sha, ctx, count);
 508        }
 509        return 0;
 510}
 511
 512static int mtk_sha_update_start(struct mtk_cryp *cryp,
 513                                struct mtk_sha_rec *sha)
 514{
 515        struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
 516        u32 len, final, tail;
 517        struct scatterlist *sg;
 518
 519        if (!ctx->total)
 520                return 0;
 521
 522        if (ctx->bufcnt || ctx->offset)
 523                return mtk_sha_update_slow(cryp, sha);
 524
 525        sg = ctx->sg;
 526
 527        if (!IS_ALIGNED(sg->offset, sizeof(u32)))
 528                return mtk_sha_update_slow(cryp, sha);
 529
 530        if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->bs))
 531                /* size is not ctx->bs aligned */
 532                return mtk_sha_update_slow(cryp, sha);
 533
 534        len = min(ctx->total, sg->length);
 535
 536        if (sg_is_last(sg)) {
 537                if (!(ctx->flags & SHA_FLAGS_FINUP)) {
 538                        /* not last sg must be ctx->bs aligned */
 539                        tail = len & (ctx->bs - 1);
 540                        len -= tail;
 541                }
 542        }
 543
 544        ctx->total -= len;
 545        ctx->offset = len; /* offset where to start slow */
 546
 547        final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
 548
 549        /* Add padding */
 550        if (final) {
 551                size_t count;
 552
 553                tail = len & (ctx->bs - 1);
 554                len -= tail;
 555                ctx->total += tail;
 556                ctx->offset = len; /* offset where to start slow */
 557
 558                sg = ctx->sg;
 559                mtk_sha_append_sg(ctx);
 560                mtk_sha_fill_padding(ctx, len);
 561
 562                ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer,
 563                                               SHA_BUF_SIZE, DMA_TO_DEVICE);
 564                if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) {
 565                        dev_err(cryp->dev, "dma map bytes error\n");
 566                        return -EINVAL;
 567                }
 568
 569                sha->flags |= SHA_FLAGS_FINAL;
 570                count = ctx->bufcnt;
 571                ctx->bufcnt = 0;
 572
 573                if (len == 0) {
 574                        ctx->flags &= ~SHA_FLAGS_SG;
 575                        return mtk_sha_xmit(cryp, sha, ctx->dma_addr,
 576                                            count, 0, 0);
 577
 578                } else {
 579                        ctx->sg = sg;
 580                        if (!dma_map_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
 581                                dev_err(cryp->dev, "dma_map_sg error\n");
 582                                return -EINVAL;
 583                        }
 584
 585                        ctx->flags |= SHA_FLAGS_SG;
 586                        return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg),
 587                                            len, ctx->dma_addr, count);
 588                }
 589        }
 590
 591        if (!dma_map_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
 592                dev_err(cryp->dev, "dma_map_sg  error\n");
 593                return -EINVAL;
 594        }
 595
 596        ctx->flags |= SHA_FLAGS_SG;
 597
 598        return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg),
 599                            len, 0, 0);
 600}
 601
 602static int mtk_sha_final_req(struct mtk_cryp *cryp,
 603                             struct mtk_sha_rec *sha)
 604{
 605        struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
 606        size_t count;
 607
 608        mtk_sha_fill_padding(ctx, 0);
 609
 610        sha->flags |= SHA_FLAGS_FINAL;
 611        count = ctx->bufcnt;
 612        ctx->bufcnt = 0;
 613
 614        return mtk_sha_dma_map(cryp, sha, ctx, count);
 615}
 616
 617/* Copy ready hash (+ finalize hmac) */
 618static int mtk_sha_finish(struct ahash_request *req)
 619{
 620        struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 621        __le32 *digest = ctx->info.digest;
 622        u32 *result = (u32 *)req->result;
 623        int i;
 624
 625        /* Get the hash from the digest buffer */
 626        for (i = 0; i < SIZE_IN_WORDS(ctx->ds); i++)
 627                result[i] = le32_to_cpu(digest[i]);
 628
 629        if (ctx->flags & SHA_FLAGS_HMAC)
 630                return mtk_sha_finish_hmac(req);
 631
 632        return 0;
 633}
 634
 635static void mtk_sha_finish_req(struct mtk_cryp *cryp,
 636                               struct mtk_sha_rec *sha,
 637                               int err)
 638{
 639        if (likely(!err && (SHA_FLAGS_FINAL & sha->flags)))
 640                err = mtk_sha_finish(sha->req);
 641
 642        sha->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL);
 643
 644        sha->req->base.complete(&sha->req->base, err);
 645
 646        /* Handle new request */
 647        tasklet_schedule(&sha->queue_task);
 648}
 649
 650static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id,
 651                                struct ahash_request *req)
 652{
 653        struct mtk_sha_rec *sha = cryp->sha[id];
 654        struct crypto_async_request *async_req, *backlog;
 655        struct mtk_sha_reqctx *ctx;
 656        unsigned long flags;
 657        int err = 0, ret = 0;
 658
 659        spin_lock_irqsave(&sha->lock, flags);
 660        if (req)
 661                ret = ahash_enqueue_request(&sha->queue, req);
 662
 663        if (SHA_FLAGS_BUSY & sha->flags) {
 664                spin_unlock_irqrestore(&sha->lock, flags);
 665                return ret;
 666        }
 667
 668        backlog = crypto_get_backlog(&sha->queue);
 669        async_req = crypto_dequeue_request(&sha->queue);
 670        if (async_req)
 671                sha->flags |= SHA_FLAGS_BUSY;
 672        spin_unlock_irqrestore(&sha->lock, flags);
 673
 674        if (!async_req)
 675                return ret;
 676
 677        if (backlog)
 678                backlog->complete(backlog, -EINPROGRESS);
 679
 680        req = ahash_request_cast(async_req);
 681        ctx = ahash_request_ctx(req);
 682
 683        sha->req = req;
 684
 685        mtk_sha_info_init(ctx);
 686
 687        if (ctx->op == SHA_OP_UPDATE) {
 688                err = mtk_sha_update_start(cryp, sha);
 689                if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP))
 690                        /* No final() after finup() */
 691                        err = mtk_sha_final_req(cryp, sha);
 692        } else if (ctx->op == SHA_OP_FINAL) {
 693                err = mtk_sha_final_req(cryp, sha);
 694        }
 695
 696        if (unlikely(err != -EINPROGRESS))
 697                /* Task will not finish it, so do it here */
 698                mtk_sha_finish_req(cryp, sha, err);
 699
 700        return ret;
 701}
 702
 703static int mtk_sha_enqueue(struct ahash_request *req, u32 op)
 704{
 705        struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 706        struct mtk_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
 707
 708        ctx->op = op;
 709
 710        return mtk_sha_handle_queue(tctx->cryp, tctx->id, req);
 711}
 712
 713static void mtk_sha_unmap(struct mtk_cryp *cryp, struct mtk_sha_rec *sha)
 714{
 715        struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
 716
 717        dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info),
 718                         DMA_BIDIRECTIONAL);
 719
 720        if (ctx->flags & SHA_FLAGS_SG) {
 721                dma_unmap_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE);
 722                if (ctx->sg->length == ctx->offset) {
 723                        ctx->sg = sg_next(ctx->sg);
 724                        if (ctx->sg)
 725                                ctx->offset = 0;
 726                }
 727                if (ctx->flags & SHA_FLAGS_PAD) {
 728                        dma_unmap_single(cryp->dev, ctx->dma_addr,
 729                                         SHA_BUF_SIZE, DMA_TO_DEVICE);
 730                }
 731        } else
 732                dma_unmap_single(cryp->dev, ctx->dma_addr,
 733                                 SHA_BUF_SIZE, DMA_TO_DEVICE);
 734}
 735
 736static void mtk_sha_complete(struct mtk_cryp *cryp,
 737                             struct mtk_sha_rec *sha)
 738{
 739        int err = 0;
 740
 741        err = mtk_sha_update_start(cryp, sha);
 742        if (err != -EINPROGRESS)
 743                mtk_sha_finish_req(cryp, sha, err);
 744}
 745
 746static int mtk_sha_update(struct ahash_request *req)
 747{
 748        struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 749
 750        ctx->total = req->nbytes;
 751        ctx->sg = req->src;
 752        ctx->offset = 0;
 753
 754        if ((ctx->bufcnt + ctx->total < SHA_BUF_SIZE) &&
 755            !(ctx->flags & SHA_FLAGS_FINUP))
 756                return mtk_sha_append_sg(ctx);
 757
 758        return mtk_sha_enqueue(req, SHA_OP_UPDATE);
 759}
 760
 761static int mtk_sha_final(struct ahash_request *req)
 762{
 763        struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 764
 765        ctx->flags |= SHA_FLAGS_FINUP;
 766
 767        if (ctx->flags & SHA_FLAGS_PAD)
 768                return mtk_sha_finish(req);
 769
 770        return mtk_sha_enqueue(req, SHA_OP_FINAL);
 771}
 772
 773static int mtk_sha_finup(struct ahash_request *req)
 774{
 775        struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 776        int err1, err2;
 777
 778        ctx->flags |= SHA_FLAGS_FINUP;
 779
 780        err1 = mtk_sha_update(req);
 781        if (err1 == -EINPROGRESS ||
 782            (err1 == -EBUSY && (ahash_request_flags(req) &
 783                                CRYPTO_TFM_REQ_MAY_BACKLOG)))
 784                return err1;
 785        /*
 786         * final() has to be always called to cleanup resources
 787         * even if update() failed
 788         */
 789        err2 = mtk_sha_final(req);
 790
 791        return err1 ?: err2;
 792}
 793
 794static int mtk_sha_digest(struct ahash_request *req)
 795{
 796        return mtk_sha_init(req) ?: mtk_sha_finup(req);
 797}
 798
 799static int mtk_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
 800                          u32 keylen)
 801{
 802        struct mtk_sha_ctx *tctx = crypto_ahash_ctx(tfm);
 803        struct mtk_sha_hmac_ctx *bctx = tctx->base;
 804        size_t bs = crypto_shash_blocksize(bctx->shash);
 805        size_t ds = crypto_shash_digestsize(bctx->shash);
 806        int err, i;
 807
 808        if (keylen > bs) {
 809                err = crypto_shash_tfm_digest(bctx->shash, key, keylen,
 810                                              bctx->ipad);
 811                if (err)
 812                        return err;
 813                keylen = ds;
 814        } else {
 815                memcpy(bctx->ipad, key, keylen);
 816        }
 817
 818        memset(bctx->ipad + keylen, 0, bs - keylen);
 819        memcpy(bctx->opad, bctx->ipad, bs);
 820
 821        for (i = 0; i < bs; i++) {
 822                bctx->ipad[i] ^= HMAC_IPAD_VALUE;
 823                bctx->opad[i] ^= HMAC_OPAD_VALUE;
 824        }
 825
 826        return 0;
 827}
 828
 829static int mtk_sha_export(struct ahash_request *req, void *out)
 830{
 831        const struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 832
 833        memcpy(out, ctx, sizeof(*ctx));
 834        return 0;
 835}
 836
 837static int mtk_sha_import(struct ahash_request *req, const void *in)
 838{
 839        struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 840
 841        memcpy(ctx, in, sizeof(*ctx));
 842        return 0;
 843}
 844
 845static int mtk_sha_cra_init_alg(struct crypto_tfm *tfm,
 846                                const char *alg_base)
 847{
 848        struct mtk_sha_ctx *tctx = crypto_tfm_ctx(tfm);
 849        struct mtk_cryp *cryp = NULL;
 850
 851        cryp = mtk_sha_find_dev(tctx);
 852        if (!cryp)
 853                return -ENODEV;
 854
 855        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 856                                 sizeof(struct mtk_sha_reqctx));
 857
 858        if (alg_base) {
 859                struct mtk_sha_hmac_ctx *bctx = tctx->base;
 860
 861                tctx->flags |= SHA_FLAGS_HMAC;
 862                bctx->shash = crypto_alloc_shash(alg_base, 0,
 863                                        CRYPTO_ALG_NEED_FALLBACK);
 864                if (IS_ERR(bctx->shash)) {
 865                        pr_err("base driver %s could not be loaded.\n",
 866                               alg_base);
 867
 868                        return PTR_ERR(bctx->shash);
 869                }
 870        }
 871        return 0;
 872}
 873
 874static int mtk_sha_cra_init(struct crypto_tfm *tfm)
 875{
 876        return mtk_sha_cra_init_alg(tfm, NULL);
 877}
 878
 879static int mtk_sha_cra_sha1_init(struct crypto_tfm *tfm)
 880{
 881        return mtk_sha_cra_init_alg(tfm, "sha1");
 882}
 883
 884static int mtk_sha_cra_sha224_init(struct crypto_tfm *tfm)
 885{
 886        return mtk_sha_cra_init_alg(tfm, "sha224");
 887}
 888
 889static int mtk_sha_cra_sha256_init(struct crypto_tfm *tfm)
 890{
 891        return mtk_sha_cra_init_alg(tfm, "sha256");
 892}
 893
 894static int mtk_sha_cra_sha384_init(struct crypto_tfm *tfm)
 895{
 896        return mtk_sha_cra_init_alg(tfm, "sha384");
 897}
 898
 899static int mtk_sha_cra_sha512_init(struct crypto_tfm *tfm)
 900{
 901        return mtk_sha_cra_init_alg(tfm, "sha512");
 902}
 903
 904static void mtk_sha_cra_exit(struct crypto_tfm *tfm)
 905{
 906        struct mtk_sha_ctx *tctx = crypto_tfm_ctx(tfm);
 907
 908        if (tctx->flags & SHA_FLAGS_HMAC) {
 909                struct mtk_sha_hmac_ctx *bctx = tctx->base;
 910
 911                crypto_free_shash(bctx->shash);
 912        }
 913}
 914
 915static struct ahash_alg algs_sha1_sha224_sha256[] = {
 916{
 917        .init           = mtk_sha_init,
 918        .update         = mtk_sha_update,
 919        .final          = mtk_sha_final,
 920        .finup          = mtk_sha_finup,
 921        .digest         = mtk_sha_digest,
 922        .export         = mtk_sha_export,
 923        .import         = mtk_sha_import,
 924        .halg.digestsize        = SHA1_DIGEST_SIZE,
 925        .halg.statesize = sizeof(struct mtk_sha_reqctx),
 926        .halg.base      = {
 927                .cra_name               = "sha1",
 928                .cra_driver_name        = "mtk-sha1",
 929                .cra_priority           = 400,
 930                .cra_flags              = CRYPTO_ALG_ASYNC,
 931                .cra_blocksize          = SHA1_BLOCK_SIZE,
 932                .cra_ctxsize            = sizeof(struct mtk_sha_ctx),
 933                .cra_alignmask          = SHA_ALIGN_MSK,
 934                .cra_module             = THIS_MODULE,
 935                .cra_init               = mtk_sha_cra_init,
 936                .cra_exit               = mtk_sha_cra_exit,
 937        }
 938},
 939{
 940        .init           = mtk_sha_init,
 941        .update         = mtk_sha_update,
 942        .final          = mtk_sha_final,
 943        .finup          = mtk_sha_finup,
 944        .digest         = mtk_sha_digest,
 945        .export         = mtk_sha_export,
 946        .import         = mtk_sha_import,
 947        .halg.digestsize        = SHA224_DIGEST_SIZE,
 948        .halg.statesize = sizeof(struct mtk_sha_reqctx),
 949        .halg.base      = {
 950                .cra_name               = "sha224",
 951                .cra_driver_name        = "mtk-sha224",
 952                .cra_priority           = 400,
 953                .cra_flags              = CRYPTO_ALG_ASYNC,
 954                .cra_blocksize          = SHA224_BLOCK_SIZE,
 955                .cra_ctxsize            = sizeof(struct mtk_sha_ctx),
 956                .cra_alignmask          = SHA_ALIGN_MSK,
 957                .cra_module             = THIS_MODULE,
 958                .cra_init               = mtk_sha_cra_init,
 959                .cra_exit               = mtk_sha_cra_exit,
 960        }
 961},
 962{
 963        .init           = mtk_sha_init,
 964        .update         = mtk_sha_update,
 965        .final          = mtk_sha_final,
 966        .finup          = mtk_sha_finup,
 967        .digest         = mtk_sha_digest,
 968        .export         = mtk_sha_export,
 969        .import         = mtk_sha_import,
 970        .halg.digestsize        = SHA256_DIGEST_SIZE,
 971        .halg.statesize = sizeof(struct mtk_sha_reqctx),
 972        .halg.base      = {
 973                .cra_name               = "sha256",
 974                .cra_driver_name        = "mtk-sha256",
 975                .cra_priority           = 400,
 976                .cra_flags              = CRYPTO_ALG_ASYNC,
 977                .cra_blocksize          = SHA256_BLOCK_SIZE,
 978                .cra_ctxsize            = sizeof(struct mtk_sha_ctx),
 979                .cra_alignmask          = SHA_ALIGN_MSK,
 980                .cra_module             = THIS_MODULE,
 981                .cra_init               = mtk_sha_cra_init,
 982                .cra_exit               = mtk_sha_cra_exit,
 983        }
 984},
 985{
 986        .init           = mtk_sha_init,
 987        .update         = mtk_sha_update,
 988        .final          = mtk_sha_final,
 989        .finup          = mtk_sha_finup,
 990        .digest         = mtk_sha_digest,
 991        .export         = mtk_sha_export,
 992        .import         = mtk_sha_import,
 993        .setkey         = mtk_sha_setkey,
 994        .halg.digestsize        = SHA1_DIGEST_SIZE,
 995        .halg.statesize = sizeof(struct mtk_sha_reqctx),
 996        .halg.base      = {
 997                .cra_name               = "hmac(sha1)",
 998                .cra_driver_name        = "mtk-hmac-sha1",
 999                .cra_priority           = 400,
1000                .cra_flags              = CRYPTO_ALG_ASYNC |
1001                                          CRYPTO_ALG_NEED_FALLBACK,
1002                .cra_blocksize          = SHA1_BLOCK_SIZE,
1003                .cra_ctxsize            = sizeof(struct mtk_sha_ctx) +
1004                                        sizeof(struct mtk_sha_hmac_ctx),
1005                .cra_alignmask          = SHA_ALIGN_MSK,
1006                .cra_module             = THIS_MODULE,
1007                .cra_init               = mtk_sha_cra_sha1_init,
1008                .cra_exit               = mtk_sha_cra_exit,
1009        }
1010},
1011{
1012        .init           = mtk_sha_init,
1013        .update         = mtk_sha_update,
1014        .final          = mtk_sha_final,
1015        .finup          = mtk_sha_finup,
1016        .digest         = mtk_sha_digest,
1017        .export         = mtk_sha_export,
1018        .import         = mtk_sha_import,
1019        .setkey         = mtk_sha_setkey,
1020        .halg.digestsize        = SHA224_DIGEST_SIZE,
1021        .halg.statesize = sizeof(struct mtk_sha_reqctx),
1022        .halg.base      = {
1023                .cra_name               = "hmac(sha224)",
1024                .cra_driver_name        = "mtk-hmac-sha224",
1025                .cra_priority           = 400,
1026                .cra_flags              = CRYPTO_ALG_ASYNC |
1027                                          CRYPTO_ALG_NEED_FALLBACK,
1028                .cra_blocksize          = SHA224_BLOCK_SIZE,
1029                .cra_ctxsize            = sizeof(struct mtk_sha_ctx) +
1030                                        sizeof(struct mtk_sha_hmac_ctx),
1031                .cra_alignmask          = SHA_ALIGN_MSK,
1032                .cra_module             = THIS_MODULE,
1033                .cra_init               = mtk_sha_cra_sha224_init,
1034                .cra_exit               = mtk_sha_cra_exit,
1035        }
1036},
1037{
1038        .init           = mtk_sha_init,
1039        .update         = mtk_sha_update,
1040        .final          = mtk_sha_final,
1041        .finup          = mtk_sha_finup,
1042        .digest         = mtk_sha_digest,
1043        .export         = mtk_sha_export,
1044        .import         = mtk_sha_import,
1045        .setkey         = mtk_sha_setkey,
1046        .halg.digestsize        = SHA256_DIGEST_SIZE,
1047        .halg.statesize = sizeof(struct mtk_sha_reqctx),
1048        .halg.base      = {
1049                .cra_name               = "hmac(sha256)",
1050                .cra_driver_name        = "mtk-hmac-sha256",
1051                .cra_priority           = 400,
1052                .cra_flags              = CRYPTO_ALG_ASYNC |
1053                                          CRYPTO_ALG_NEED_FALLBACK,
1054                .cra_blocksize          = SHA256_BLOCK_SIZE,
1055                .cra_ctxsize            = sizeof(struct mtk_sha_ctx) +
1056                                        sizeof(struct mtk_sha_hmac_ctx),
1057                .cra_alignmask          = SHA_ALIGN_MSK,
1058                .cra_module             = THIS_MODULE,
1059                .cra_init               = mtk_sha_cra_sha256_init,
1060                .cra_exit               = mtk_sha_cra_exit,
1061        }
1062},
1063};
1064
1065static struct ahash_alg algs_sha384_sha512[] = {
1066{
1067        .init           = mtk_sha_init,
1068        .update         = mtk_sha_update,
1069        .final          = mtk_sha_final,
1070        .finup          = mtk_sha_finup,
1071        .digest         = mtk_sha_digest,
1072        .export         = mtk_sha_export,
1073        .import         = mtk_sha_import,
1074        .halg.digestsize        = SHA384_DIGEST_SIZE,
1075        .halg.statesize = sizeof(struct mtk_sha_reqctx),
1076        .halg.base      = {
1077                .cra_name               = "sha384",
1078                .cra_driver_name        = "mtk-sha384",
1079                .cra_priority           = 400,
1080                .cra_flags              = CRYPTO_ALG_ASYNC,
1081                .cra_blocksize          = SHA384_BLOCK_SIZE,
1082                .cra_ctxsize            = sizeof(struct mtk_sha_ctx),
1083                .cra_alignmask          = SHA_ALIGN_MSK,
1084                .cra_module             = THIS_MODULE,
1085                .cra_init               = mtk_sha_cra_init,
1086                .cra_exit               = mtk_sha_cra_exit,
1087        }
1088},
1089{
1090        .init           = mtk_sha_init,
1091        .update         = mtk_sha_update,
1092        .final          = mtk_sha_final,
1093        .finup          = mtk_sha_finup,
1094        .digest         = mtk_sha_digest,
1095        .export         = mtk_sha_export,
1096        .import         = mtk_sha_import,
1097        .halg.digestsize        = SHA512_DIGEST_SIZE,
1098        .halg.statesize = sizeof(struct mtk_sha_reqctx),
1099        .halg.base      = {
1100                .cra_name               = "sha512",
1101                .cra_driver_name        = "mtk-sha512",
1102                .cra_priority           = 400,
1103                .cra_flags              = CRYPTO_ALG_ASYNC,
1104                .cra_blocksize          = SHA512_BLOCK_SIZE,
1105                .cra_ctxsize            = sizeof(struct mtk_sha_ctx),
1106                .cra_alignmask          = SHA_ALIGN_MSK,
1107                .cra_module             = THIS_MODULE,
1108                .cra_init               = mtk_sha_cra_init,
1109                .cra_exit               = mtk_sha_cra_exit,
1110        }
1111},
1112{
1113        .init           = mtk_sha_init,
1114        .update         = mtk_sha_update,
1115        .final          = mtk_sha_final,
1116        .finup          = mtk_sha_finup,
1117        .digest         = mtk_sha_digest,
1118        .export         = mtk_sha_export,
1119        .import         = mtk_sha_import,
1120        .setkey         = mtk_sha_setkey,
1121        .halg.digestsize        = SHA384_DIGEST_SIZE,
1122        .halg.statesize = sizeof(struct mtk_sha_reqctx),
1123        .halg.base      = {
1124                .cra_name               = "hmac(sha384)",
1125                .cra_driver_name        = "mtk-hmac-sha384",
1126                .cra_priority           = 400,
1127                .cra_flags              = CRYPTO_ALG_ASYNC |
1128                                          CRYPTO_ALG_NEED_FALLBACK,
1129                .cra_blocksize          = SHA384_BLOCK_SIZE,
1130                .cra_ctxsize            = sizeof(struct mtk_sha_ctx) +
1131                                        sizeof(struct mtk_sha_hmac_ctx),
1132                .cra_alignmask          = SHA_ALIGN_MSK,
1133                .cra_module             = THIS_MODULE,
1134                .cra_init               = mtk_sha_cra_sha384_init,
1135                .cra_exit               = mtk_sha_cra_exit,
1136        }
1137},
1138{
1139        .init           = mtk_sha_init,
1140        .update         = mtk_sha_update,
1141        .final          = mtk_sha_final,
1142        .finup          = mtk_sha_finup,
1143        .digest         = mtk_sha_digest,
1144        .export         = mtk_sha_export,
1145        .import         = mtk_sha_import,
1146        .setkey         = mtk_sha_setkey,
1147        .halg.digestsize        = SHA512_DIGEST_SIZE,
1148        .halg.statesize = sizeof(struct mtk_sha_reqctx),
1149        .halg.base      = {
1150                .cra_name               = "hmac(sha512)",
1151                .cra_driver_name        = "mtk-hmac-sha512",
1152                .cra_priority           = 400,
1153                .cra_flags              = CRYPTO_ALG_ASYNC |
1154                                          CRYPTO_ALG_NEED_FALLBACK,
1155                .cra_blocksize          = SHA512_BLOCK_SIZE,
1156                .cra_ctxsize            = sizeof(struct mtk_sha_ctx) +
1157                                        sizeof(struct mtk_sha_hmac_ctx),
1158                .cra_alignmask          = SHA_ALIGN_MSK,
1159                .cra_module             = THIS_MODULE,
1160                .cra_init               = mtk_sha_cra_sha512_init,
1161                .cra_exit               = mtk_sha_cra_exit,
1162        }
1163},
1164};
1165
1166static void mtk_sha_queue_task(unsigned long data)
1167{
1168        struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data;
1169
1170        mtk_sha_handle_queue(sha->cryp, sha->id - MTK_RING2, NULL);
1171}
1172
1173static void mtk_sha_done_task(unsigned long data)
1174{
1175        struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data;
1176        struct mtk_cryp *cryp = sha->cryp;
1177
1178        mtk_sha_unmap(cryp, sha);
1179        mtk_sha_complete(cryp, sha);
1180}
1181
1182static irqreturn_t mtk_sha_irq(int irq, void *dev_id)
1183{
1184        struct mtk_sha_rec *sha = (struct mtk_sha_rec *)dev_id;
1185        struct mtk_cryp *cryp = sha->cryp;
1186        u32 val = mtk_sha_read(cryp, RDR_STAT(sha->id));
1187
1188        mtk_sha_write(cryp, RDR_STAT(sha->id), val);
1189
1190        if (likely((SHA_FLAGS_BUSY & sha->flags))) {
1191                mtk_sha_write(cryp, RDR_PROC_COUNT(sha->id), MTK_CNT_RST);
1192                mtk_sha_write(cryp, RDR_THRESH(sha->id),
1193                              MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
1194
1195                tasklet_schedule(&sha->done_task);
1196        } else {
1197                dev_warn(cryp->dev, "SHA interrupt when no active requests.\n");
1198        }
1199        return IRQ_HANDLED;
1200}
1201
1202/*
1203 * The purpose of two SHA records is used to get extra performance.
1204 * It is similar to mtk_aes_record_init().
1205 */
1206static int mtk_sha_record_init(struct mtk_cryp *cryp)
1207{
1208        struct mtk_sha_rec **sha = cryp->sha;
1209        int i, err = -ENOMEM;
1210
1211        for (i = 0; i < MTK_REC_NUM; i++) {
1212                sha[i] = kzalloc(sizeof(**sha), GFP_KERNEL);
1213                if (!sha[i])
1214                        goto err_cleanup;
1215
1216                sha[i]->cryp = cryp;
1217
1218                spin_lock_init(&sha[i]->lock);
1219                crypto_init_queue(&sha[i]->queue, SHA_QUEUE_SIZE);
1220
1221                tasklet_init(&sha[i]->queue_task, mtk_sha_queue_task,
1222                             (unsigned long)sha[i]);
1223                tasklet_init(&sha[i]->done_task, mtk_sha_done_task,
1224                             (unsigned long)sha[i]);
1225        }
1226
1227        /* Link to ring2 and ring3 respectively */
1228        sha[0]->id = MTK_RING2;
1229        sha[1]->id = MTK_RING3;
1230
1231        cryp->rec = 1;
1232
1233        return 0;
1234
1235err_cleanup:
1236        for (; i--; )
1237                kfree(sha[i]);
1238        return err;
1239}
1240
1241static void mtk_sha_record_free(struct mtk_cryp *cryp)
1242{
1243        int i;
1244
1245        for (i = 0; i < MTK_REC_NUM; i++) {
1246                tasklet_kill(&cryp->sha[i]->done_task);
1247                tasklet_kill(&cryp->sha[i]->queue_task);
1248
1249                kfree(cryp->sha[i]);
1250        }
1251}
1252
1253static void mtk_sha_unregister_algs(void)
1254{
1255        int i;
1256
1257        for (i = 0; i < ARRAY_SIZE(algs_sha1_sha224_sha256); i++)
1258                crypto_unregister_ahash(&algs_sha1_sha224_sha256[i]);
1259
1260        for (i = 0; i < ARRAY_SIZE(algs_sha384_sha512); i++)
1261                crypto_unregister_ahash(&algs_sha384_sha512[i]);
1262}
1263
1264static int mtk_sha_register_algs(void)
1265{
1266        int err, i;
1267
1268        for (i = 0; i < ARRAY_SIZE(algs_sha1_sha224_sha256); i++) {
1269                err = crypto_register_ahash(&algs_sha1_sha224_sha256[i]);
1270                if (err)
1271                        goto err_sha_224_256_algs;
1272        }
1273
1274        for (i = 0; i < ARRAY_SIZE(algs_sha384_sha512); i++) {
1275                err = crypto_register_ahash(&algs_sha384_sha512[i]);
1276                if (err)
1277                        goto err_sha_384_512_algs;
1278        }
1279
1280        return 0;
1281
1282err_sha_384_512_algs:
1283        for (; i--; )
1284                crypto_unregister_ahash(&algs_sha384_sha512[i]);
1285        i = ARRAY_SIZE(algs_sha1_sha224_sha256);
1286err_sha_224_256_algs:
1287        for (; i--; )
1288                crypto_unregister_ahash(&algs_sha1_sha224_sha256[i]);
1289
1290        return err;
1291}
1292
1293int mtk_hash_alg_register(struct mtk_cryp *cryp)
1294{
1295        int err;
1296
1297        INIT_LIST_HEAD(&cryp->sha_list);
1298
1299        /* Initialize two hash records */
1300        err = mtk_sha_record_init(cryp);
1301        if (err)
1302                goto err_record;
1303
1304        err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING2], mtk_sha_irq,
1305                               0, "mtk-sha", cryp->sha[0]);
1306        if (err) {
1307                dev_err(cryp->dev, "unable to request sha irq0.\n");
1308                goto err_res;
1309        }
1310
1311        err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING3], mtk_sha_irq,
1312                               0, "mtk-sha", cryp->sha[1]);
1313        if (err) {
1314                dev_err(cryp->dev, "unable to request sha irq1.\n");
1315                goto err_res;
1316        }
1317
1318        /* Enable ring2 and ring3 interrupt for hash */
1319        mtk_sha_write(cryp, AIC_ENABLE_SET(MTK_RING2), MTK_IRQ_RDR2);
1320        mtk_sha_write(cryp, AIC_ENABLE_SET(MTK_RING3), MTK_IRQ_RDR3);
1321
1322        spin_lock(&mtk_sha.lock);
1323        list_add_tail(&cryp->sha_list, &mtk_sha.dev_list);
1324        spin_unlock(&mtk_sha.lock);
1325
1326        err = mtk_sha_register_algs();
1327        if (err)
1328                goto err_algs;
1329
1330        return 0;
1331
1332err_algs:
1333        spin_lock(&mtk_sha.lock);
1334        list_del(&cryp->sha_list);
1335        spin_unlock(&mtk_sha.lock);
1336err_res:
1337        mtk_sha_record_free(cryp);
1338err_record:
1339
1340        dev_err(cryp->dev, "mtk-sha initialization failed.\n");
1341        return err;
1342}
1343
1344void mtk_hash_alg_release(struct mtk_cryp *cryp)
1345{
1346        spin_lock(&mtk_sha.lock);
1347        list_del(&cryp->sha_list);
1348        spin_unlock(&mtk_sha.lock);
1349
1350        mtk_sha_unregister_algs();
1351        mtk_sha_record_free(cryp);
1352}
1353