linux/drivers/crypto/mediatek/mtk-sha.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Cryptographic API.
   4 *
   5 * Driver for EIP97 SHA1/SHA2(HMAC) acceleration.
   6 *
   7 * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
   8 *
   9 * Some ideas are from atmel-sha.c and omap-sham.c drivers.
  10 */
  11
  12#include <crypto/hmac.h>
  13#include <crypto/sha.h>
  14#include "mtk-platform.h"
  15
  16#define SHA_ALIGN_MSK           (sizeof(u32) - 1)
  17#define SHA_QUEUE_SIZE          512
  18#define SHA_BUF_SIZE            ((u32)PAGE_SIZE)
  19
  20#define SHA_OP_UPDATE           1
  21#define SHA_OP_FINAL            2
  22
  23#define SHA_DATA_LEN_MSK        cpu_to_le32(GENMASK(16, 0))
  24#define SHA_MAX_DIGEST_BUF_SIZE 32
  25
  26/* SHA command token */
  27#define SHA_CT_SIZE             5
  28#define SHA_CT_CTRL_HDR         cpu_to_le32(0x02220000)
  29#define SHA_CMD0                cpu_to_le32(0x03020000)
  30#define SHA_CMD1                cpu_to_le32(0x21060000)
  31#define SHA_CMD2                cpu_to_le32(0xe0e63802)
  32
  33/* SHA transform information */
  34#define SHA_TFM_HASH            cpu_to_le32(0x2 << 0)
  35#define SHA_TFM_SIZE(x)         cpu_to_le32((x) << 8)
  36#define SHA_TFM_START           cpu_to_le32(0x1 << 4)
  37#define SHA_TFM_CONTINUE        cpu_to_le32(0x1 << 5)
  38#define SHA_TFM_HASH_STORE      cpu_to_le32(0x1 << 19)
  39#define SHA_TFM_SHA1            cpu_to_le32(0x2 << 23)
  40#define SHA_TFM_SHA256          cpu_to_le32(0x3 << 23)
  41#define SHA_TFM_SHA224          cpu_to_le32(0x4 << 23)
  42#define SHA_TFM_SHA512          cpu_to_le32(0x5 << 23)
  43#define SHA_TFM_SHA384          cpu_to_le32(0x6 << 23)
  44#define SHA_TFM_DIGEST(x)       cpu_to_le32(((x) & GENMASK(3, 0)) << 24)
  45
  46/* SHA flags */
  47#define SHA_FLAGS_BUSY          BIT(0)
  48#define SHA_FLAGS_FINAL         BIT(1)
  49#define SHA_FLAGS_FINUP         BIT(2)
  50#define SHA_FLAGS_SG            BIT(3)
  51#define SHA_FLAGS_ALGO_MSK      GENMASK(8, 4)
  52#define SHA_FLAGS_SHA1          BIT(4)
  53#define SHA_FLAGS_SHA224        BIT(5)
  54#define SHA_FLAGS_SHA256        BIT(6)
  55#define SHA_FLAGS_SHA384        BIT(7)
  56#define SHA_FLAGS_SHA512        BIT(8)
  57#define SHA_FLAGS_HMAC          BIT(9)
  58#define SHA_FLAGS_PAD           BIT(10)
  59
  60/**
  61 * mtk_sha_info - hardware information of AES
  62 * @cmd:        command token, hardware instruction
  63 * @tfm:        transform state of cipher algorithm.
  64 * @state:      contains keys and initial vectors.
  65 *
  66 */
  67struct mtk_sha_info {
  68        __le32 ctrl[2];
  69        __le32 cmd[3];
  70        __le32 tfm[2];
  71        __le32 digest[SHA_MAX_DIGEST_BUF_SIZE];
  72};
  73
  74struct mtk_sha_reqctx {
  75        struct mtk_sha_info info;
  76        unsigned long flags;
  77        unsigned long op;
  78
  79        u64 digcnt;
  80        size_t bufcnt;
  81        dma_addr_t dma_addr;
  82
  83        __le32 ct_hdr;
  84        u32 ct_size;
  85        dma_addr_t ct_dma;
  86        dma_addr_t tfm_dma;
  87
  88        /* Walk state */
  89        struct scatterlist *sg;
  90        u32 offset;     /* Offset in current sg */
  91        u32 total;      /* Total request */
  92        size_t ds;
  93        size_t bs;
  94
  95        u8 *buffer;
  96};
  97
  98struct mtk_sha_hmac_ctx {
  99        struct crypto_shash     *shash;
 100        u8 ipad[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
 101        u8 opad[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
 102};
 103
 104struct mtk_sha_ctx {
 105        struct mtk_cryp *cryp;
 106        unsigned long flags;
 107        u8 id;
 108        u8 buf[SHA_BUF_SIZE] __aligned(sizeof(u32));
 109
 110        struct mtk_sha_hmac_ctx base[0];
 111};
 112
 113struct mtk_sha_drv {
 114        struct list_head dev_list;
 115        /* Device list lock */
 116        spinlock_t lock;
 117};
 118
 119static struct mtk_sha_drv mtk_sha = {
 120        .dev_list = LIST_HEAD_INIT(mtk_sha.dev_list),
 121        .lock = __SPIN_LOCK_UNLOCKED(mtk_sha.lock),
 122};
 123
 124static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id,
 125                                struct ahash_request *req);
 126
 127static inline u32 mtk_sha_read(struct mtk_cryp *cryp, u32 offset)
 128{
 129        return readl_relaxed(cryp->base + offset);
 130}
 131
 132static inline void mtk_sha_write(struct mtk_cryp *cryp,
 133                                 u32 offset, u32 value)
 134{
 135        writel_relaxed(value, cryp->base + offset);
 136}
 137
 138static inline void mtk_sha_ring_shift(struct mtk_ring *ring,
 139                                      struct mtk_desc **cmd_curr,
 140                                      struct mtk_desc **res_curr,
 141                                      int *count)
 142{
 143        *cmd_curr = ring->cmd_next++;
 144        *res_curr = ring->res_next++;
 145        (*count)++;
 146
 147        if (ring->cmd_next == ring->cmd_base + MTK_DESC_NUM) {
 148                ring->cmd_next = ring->cmd_base;
 149                ring->res_next = ring->res_base;
 150        }
 151}
 152
 153static struct mtk_cryp *mtk_sha_find_dev(struct mtk_sha_ctx *tctx)
 154{
 155        struct mtk_cryp *cryp = NULL;
 156        struct mtk_cryp *tmp;
 157
 158        spin_lock_bh(&mtk_sha.lock);
 159        if (!tctx->cryp) {
 160                list_for_each_entry(tmp, &mtk_sha.dev_list, sha_list) {
 161                        cryp = tmp;
 162                        break;
 163                }
 164                tctx->cryp = cryp;
 165        } else {
 166                cryp = tctx->cryp;
 167        }
 168
 169        /*
 170         * Assign record id to tfm in round-robin fashion, and this
 171         * will help tfm to bind  to corresponding descriptor rings.
 172         */
 173        tctx->id = cryp->rec;
 174        cryp->rec = !cryp->rec;
 175
 176        spin_unlock_bh(&mtk_sha.lock);
 177
 178        return cryp;
 179}
 180
 181static int mtk_sha_append_sg(struct mtk_sha_reqctx *ctx)
 182{
 183        size_t count;
 184
 185        while ((ctx->bufcnt < SHA_BUF_SIZE) && ctx->total) {
 186                count = min(ctx->sg->length - ctx->offset, ctx->total);
 187                count = min(count, SHA_BUF_SIZE - ctx->bufcnt);
 188
 189                if (count <= 0) {
 190                        /*
 191                         * Check if count <= 0 because the buffer is full or
 192                         * because the sg length is 0. In the latest case,
 193                         * check if there is another sg in the list, a 0 length
 194                         * sg doesn't necessarily mean the end of the sg list.
 195                         */
 196                        if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) {
 197                                ctx->sg = sg_next(ctx->sg);
 198                                continue;
 199                        } else {
 200                                break;
 201                        }
 202                }
 203
 204                scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
 205                                         ctx->offset, count, 0);
 206
 207                ctx->bufcnt += count;
 208                ctx->offset += count;
 209                ctx->total -= count;
 210
 211                if (ctx->offset == ctx->sg->length) {
 212                        ctx->sg = sg_next(ctx->sg);
 213                        if (ctx->sg)
 214                                ctx->offset = 0;
 215                        else
 216                                ctx->total = 0;
 217                }
 218        }
 219
 220        return 0;
 221}
 222
 223/*
 224 * The purpose of this padding is to ensure that the padded message is a
 225 * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
 226 * The bit "1" is appended at the end of the message followed by
 227 * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
 228 * 128 bits block (SHA384/SHA512) equals to the message length in bits
 229 * is appended.
 230 *
 231 * For SHA1/SHA224/SHA256, padlen is calculated as followed:
 232 *  - if message length < 56 bytes then padlen = 56 - message length
 233 *  - else padlen = 64 + 56 - message length
 234 *
 235 * For SHA384/SHA512, padlen is calculated as followed:
 236 *  - if message length < 112 bytes then padlen = 112 - message length
 237 *  - else padlen = 128 + 112 - message length
 238 */
 239static void mtk_sha_fill_padding(struct mtk_sha_reqctx *ctx, u32 len)
 240{
 241        u32 index, padlen;
 242        u64 bits[2];
 243        u64 size = ctx->digcnt;
 244
 245        size += ctx->bufcnt;
 246        size += len;
 247
 248        bits[1] = cpu_to_be64(size << 3);
 249        bits[0] = cpu_to_be64(size >> 61);
 250
 251        switch (ctx->flags & SHA_FLAGS_ALGO_MSK) {
 252        case SHA_FLAGS_SHA384:
 253        case SHA_FLAGS_SHA512:
 254                index = ctx->bufcnt & 0x7f;
 255                padlen = (index < 112) ? (112 - index) : ((128 + 112) - index);
 256                *(ctx->buffer + ctx->bufcnt) = 0x80;
 257                memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen - 1);
 258                memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16);
 259                ctx->bufcnt += padlen + 16;
 260                ctx->flags |= SHA_FLAGS_PAD;
 261                break;
 262
 263        default:
 264                index = ctx->bufcnt & 0x3f;
 265                padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
 266                *(ctx->buffer + ctx->bufcnt) = 0x80;
 267                memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen - 1);
 268                memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8);
 269                ctx->bufcnt += padlen + 8;
 270                ctx->flags |= SHA_FLAGS_PAD;
 271                break;
 272        }
 273}
 274
 275/* Initialize basic transform information of SHA */
 276static void mtk_sha_info_init(struct mtk_sha_reqctx *ctx)
 277{
 278        struct mtk_sha_info *info = &ctx->info;
 279
 280        ctx->ct_hdr = SHA_CT_CTRL_HDR;
 281        ctx->ct_size = SHA_CT_SIZE;
 282
 283        info->tfm[0] = SHA_TFM_HASH | SHA_TFM_SIZE(SIZE_IN_WORDS(ctx->ds));
 284
 285        switch (ctx->flags & SHA_FLAGS_ALGO_MSK) {
 286        case SHA_FLAGS_SHA1:
 287                info->tfm[0] |= SHA_TFM_SHA1;
 288                break;
 289        case SHA_FLAGS_SHA224:
 290                info->tfm[0] |= SHA_TFM_SHA224;
 291                break;
 292        case SHA_FLAGS_SHA256:
 293                info->tfm[0] |= SHA_TFM_SHA256;
 294                break;
 295        case SHA_FLAGS_SHA384:
 296                info->tfm[0] |= SHA_TFM_SHA384;
 297                break;
 298        case SHA_FLAGS_SHA512:
 299                info->tfm[0] |= SHA_TFM_SHA512;
 300                break;
 301
 302        default:
 303                /* Should not happen... */
 304                return;
 305        }
 306
 307        info->tfm[1] = SHA_TFM_HASH_STORE;
 308        info->ctrl[0] = info->tfm[0] | SHA_TFM_CONTINUE | SHA_TFM_START;
 309        info->ctrl[1] = info->tfm[1];
 310
 311        info->cmd[0] = SHA_CMD0;
 312        info->cmd[1] = SHA_CMD1;
 313        info->cmd[2] = SHA_CMD2 | SHA_TFM_DIGEST(SIZE_IN_WORDS(ctx->ds));
 314}
 315
 316/*
 317 * Update input data length field of transform information and
 318 * map it to DMA region.
 319 */
 320static int mtk_sha_info_update(struct mtk_cryp *cryp,
 321                               struct mtk_sha_rec *sha,
 322                               size_t len1, size_t len2)
 323{
 324        struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
 325        struct mtk_sha_info *info = &ctx->info;
 326
 327        ctx->ct_hdr &= ~SHA_DATA_LEN_MSK;
 328        ctx->ct_hdr |= cpu_to_le32(len1 + len2);
 329        info->cmd[0] &= ~SHA_DATA_LEN_MSK;
 330        info->cmd[0] |= cpu_to_le32(len1 + len2);
 331
 332        /* Setting SHA_TFM_START only for the first iteration */
 333        if (ctx->digcnt)
 334                info->ctrl[0] &= ~SHA_TFM_START;
 335
 336        ctx->digcnt += len1;
 337
 338        ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info),
 339                                     DMA_BIDIRECTIONAL);
 340        if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma))) {
 341                dev_err(cryp->dev, "dma %zu bytes error\n", sizeof(*info));
 342                return -EINVAL;
 343        }
 344
 345        ctx->tfm_dma = ctx->ct_dma + sizeof(info->ctrl) + sizeof(info->cmd);
 346
 347        return 0;
 348}
 349
 350/*
 351 * Because of hardware limitation, we must pre-calculate the inner
 352 * and outer digest that need to be processed firstly by engine, then
 353 * apply the result digest to the input message. These complex hashing
 354 * procedures limits HMAC performance, so we use fallback SW encoding.
 355 */
 356static int mtk_sha_finish_hmac(struct ahash_request *req)
 357{
 358        struct mtk_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
 359        struct mtk_sha_hmac_ctx *bctx = tctx->base;
 360        struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 361
 362        SHASH_DESC_ON_STACK(shash, bctx->shash);
 363
 364        shash->tfm = bctx->shash;
 365
 366        return crypto_shash_init(shash) ?:
 367               crypto_shash_update(shash, bctx->opad, ctx->bs) ?:
 368               crypto_shash_finup(shash, req->result, ctx->ds, req->result);
 369}
 370
 371/* Initialize request context */
 372static int mtk_sha_init(struct ahash_request *req)
 373{
 374        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 375        struct mtk_sha_ctx *tctx = crypto_ahash_ctx(tfm);
 376        struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 377
 378        ctx->flags = 0;
 379        ctx->ds = crypto_ahash_digestsize(tfm);
 380
 381        switch (ctx->ds) {
 382        case SHA1_DIGEST_SIZE:
 383                ctx->flags |= SHA_FLAGS_SHA1;
 384                ctx->bs = SHA1_BLOCK_SIZE;
 385                break;
 386        case SHA224_DIGEST_SIZE:
 387                ctx->flags |= SHA_FLAGS_SHA224;
 388                ctx->bs = SHA224_BLOCK_SIZE;
 389                break;
 390        case SHA256_DIGEST_SIZE:
 391                ctx->flags |= SHA_FLAGS_SHA256;
 392                ctx->bs = SHA256_BLOCK_SIZE;
 393                break;
 394        case SHA384_DIGEST_SIZE:
 395                ctx->flags |= SHA_FLAGS_SHA384;
 396                ctx->bs = SHA384_BLOCK_SIZE;
 397                break;
 398        case SHA512_DIGEST_SIZE:
 399                ctx->flags |= SHA_FLAGS_SHA512;
 400                ctx->bs = SHA512_BLOCK_SIZE;
 401                break;
 402        default:
 403                return -EINVAL;
 404        }
 405
 406        ctx->bufcnt = 0;
 407        ctx->digcnt = 0;
 408        ctx->buffer = tctx->buf;
 409
 410        if (tctx->flags & SHA_FLAGS_HMAC) {
 411                struct mtk_sha_hmac_ctx *bctx = tctx->base;
 412
 413                memcpy(ctx->buffer, bctx->ipad, ctx->bs);
 414                ctx->bufcnt = ctx->bs;
 415                ctx->flags |= SHA_FLAGS_HMAC;
 416        }
 417
 418        return 0;
 419}
 420
 421static int mtk_sha_xmit(struct mtk_cryp *cryp, struct mtk_sha_rec *sha,
 422                        dma_addr_t addr1, size_t len1,
 423                        dma_addr_t addr2, size_t len2)
 424{
 425        struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
 426        struct mtk_ring *ring = cryp->ring[sha->id];
 427        struct mtk_desc *cmd, *res;
 428        int err, count = 0;
 429
 430        err = mtk_sha_info_update(cryp, sha, len1, len2);
 431        if (err)
 432                return err;
 433
 434        /* Fill in the command/result descriptors */
 435        mtk_sha_ring_shift(ring, &cmd, &res, &count);
 436
 437        res->hdr = MTK_DESC_FIRST | MTK_DESC_BUF_LEN(len1);
 438        cmd->hdr = MTK_DESC_FIRST | MTK_DESC_BUF_LEN(len1) |
 439                   MTK_DESC_CT_LEN(ctx->ct_size);
 440        cmd->buf = cpu_to_le32(addr1);
 441        cmd->ct = cpu_to_le32(ctx->ct_dma);
 442        cmd->ct_hdr = ctx->ct_hdr;
 443        cmd->tfm = cpu_to_le32(ctx->tfm_dma);
 444
 445        if (len2) {
 446                mtk_sha_ring_shift(ring, &cmd, &res, &count);
 447
 448                res->hdr = MTK_DESC_BUF_LEN(len2);
 449                cmd->hdr = MTK_DESC_BUF_LEN(len2);
 450                cmd->buf = cpu_to_le32(addr2);
 451        }
 452
 453        cmd->hdr |= MTK_DESC_LAST;
 454        res->hdr |= MTK_DESC_LAST;
 455
 456        /*
 457         * Make sure that all changes to the DMA ring are done before we
 458         * start engine.
 459         */
 460        wmb();
 461        /* Start DMA transfer */
 462        mtk_sha_write(cryp, RDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count));
 463        mtk_sha_write(cryp, CDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count));
 464
 465        return -EINPROGRESS;
 466}
 467
 468static int mtk_sha_dma_map(struct mtk_cryp *cryp,
 469                           struct mtk_sha_rec *sha,
 470                           struct mtk_sha_reqctx *ctx,
 471                           size_t count)
 472{
 473        ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer,
 474                                       SHA_BUF_SIZE, DMA_TO_DEVICE);
 475        if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) {
 476                dev_err(cryp->dev, "dma map error\n");
 477                return -EINVAL;
 478        }
 479
 480        ctx->flags &= ~SHA_FLAGS_SG;
 481
 482        return mtk_sha_xmit(cryp, sha, ctx->dma_addr, count, 0, 0);
 483}
 484
 485static int mtk_sha_update_slow(struct mtk_cryp *cryp,
 486                               struct mtk_sha_rec *sha)
 487{
 488        struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
 489        size_t count;
 490        u32 final;
 491
 492        mtk_sha_append_sg(ctx);
 493
 494        final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
 495
 496        dev_dbg(cryp->dev, "slow: bufcnt: %zu\n", ctx->bufcnt);
 497
 498        if (final) {
 499                sha->flags |= SHA_FLAGS_FINAL;
 500                mtk_sha_fill_padding(ctx, 0);
 501        }
 502
 503        if (final || (ctx->bufcnt == SHA_BUF_SIZE && ctx->total)) {
 504                count = ctx->bufcnt;
 505                ctx->bufcnt = 0;
 506
 507                return mtk_sha_dma_map(cryp, sha, ctx, count);
 508        }
 509        return 0;
 510}
 511
 512static int mtk_sha_update_start(struct mtk_cryp *cryp,
 513                                struct mtk_sha_rec *sha)
 514{
 515        struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
 516        u32 len, final, tail;
 517        struct scatterlist *sg;
 518
 519        if (!ctx->total)
 520                return 0;
 521
 522        if (ctx->bufcnt || ctx->offset)
 523                return mtk_sha_update_slow(cryp, sha);
 524
 525        sg = ctx->sg;
 526
 527        if (!IS_ALIGNED(sg->offset, sizeof(u32)))
 528                return mtk_sha_update_slow(cryp, sha);
 529
 530        if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->bs))
 531                /* size is not ctx->bs aligned */
 532                return mtk_sha_update_slow(cryp, sha);
 533
 534        len = min(ctx->total, sg->length);
 535
 536        if (sg_is_last(sg)) {
 537                if (!(ctx->flags & SHA_FLAGS_FINUP)) {
 538                        /* not last sg must be ctx->bs aligned */
 539                        tail = len & (ctx->bs - 1);
 540                        len -= tail;
 541                }
 542        }
 543
 544        ctx->total -= len;
 545        ctx->offset = len; /* offset where to start slow */
 546
 547        final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
 548
 549        /* Add padding */
 550        if (final) {
 551                size_t count;
 552
 553                tail = len & (ctx->bs - 1);
 554                len -= tail;
 555                ctx->total += tail;
 556                ctx->offset = len; /* offset where to start slow */
 557
 558                sg = ctx->sg;
 559                mtk_sha_append_sg(ctx);
 560                mtk_sha_fill_padding(ctx, len);
 561
 562                ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer,
 563                                               SHA_BUF_SIZE, DMA_TO_DEVICE);
 564                if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) {
 565                        dev_err(cryp->dev, "dma map bytes error\n");
 566                        return -EINVAL;
 567                }
 568
 569                sha->flags |= SHA_FLAGS_FINAL;
 570                count = ctx->bufcnt;
 571                ctx->bufcnt = 0;
 572
 573                if (len == 0) {
 574                        ctx->flags &= ~SHA_FLAGS_SG;
 575                        return mtk_sha_xmit(cryp, sha, ctx->dma_addr,
 576                                            count, 0, 0);
 577
 578                } else {
 579                        ctx->sg = sg;
 580                        if (!dma_map_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
 581                                dev_err(cryp->dev, "dma_map_sg error\n");
 582                                return -EINVAL;
 583                        }
 584
 585                        ctx->flags |= SHA_FLAGS_SG;
 586                        return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg),
 587                                            len, ctx->dma_addr, count);
 588                }
 589        }
 590
 591        if (!dma_map_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
 592                dev_err(cryp->dev, "dma_map_sg  error\n");
 593                return -EINVAL;
 594        }
 595
 596        ctx->flags |= SHA_FLAGS_SG;
 597
 598        return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg),
 599                            len, 0, 0);
 600}
 601
 602static int mtk_sha_final_req(struct mtk_cryp *cryp,
 603                             struct mtk_sha_rec *sha)
 604{
 605        struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
 606        size_t count;
 607
 608        mtk_sha_fill_padding(ctx, 0);
 609
 610        sha->flags |= SHA_FLAGS_FINAL;
 611        count = ctx->bufcnt;
 612        ctx->bufcnt = 0;
 613
 614        return mtk_sha_dma_map(cryp, sha, ctx, count);
 615}
 616
 617/* Copy ready hash (+ finalize hmac) */
 618static int mtk_sha_finish(struct ahash_request *req)
 619{
 620        struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 621        __le32 *digest = ctx->info.digest;
 622        u32 *result = (u32 *)req->result;
 623        int i;
 624
 625        /* Get the hash from the digest buffer */
 626        for (i = 0; i < SIZE_IN_WORDS(ctx->ds); i++)
 627                result[i] = le32_to_cpu(digest[i]);
 628
 629        if (ctx->flags & SHA_FLAGS_HMAC)
 630                return mtk_sha_finish_hmac(req);
 631
 632        return 0;
 633}
 634
 635static void mtk_sha_finish_req(struct mtk_cryp *cryp,
 636                               struct mtk_sha_rec *sha,
 637                               int err)
 638{
 639        if (likely(!err && (SHA_FLAGS_FINAL & sha->flags)))
 640                err = mtk_sha_finish(sha->req);
 641
 642        sha->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL);
 643
 644        sha->req->base.complete(&sha->req->base, err);
 645
 646        /* Handle new request */
 647        tasklet_schedule(&sha->queue_task);
 648}
 649
 650static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id,
 651                                struct ahash_request *req)
 652{
 653        struct mtk_sha_rec *sha = cryp->sha[id];
 654        struct crypto_async_request *async_req, *backlog;
 655        struct mtk_sha_reqctx *ctx;
 656        unsigned long flags;
 657        int err = 0, ret = 0;
 658
 659        spin_lock_irqsave(&sha->lock, flags);
 660        if (req)
 661                ret = ahash_enqueue_request(&sha->queue, req);
 662
 663        if (SHA_FLAGS_BUSY & sha->flags) {
 664                spin_unlock_irqrestore(&sha->lock, flags);
 665                return ret;
 666        }
 667
 668        backlog = crypto_get_backlog(&sha->queue);
 669        async_req = crypto_dequeue_request(&sha->queue);
 670        if (async_req)
 671                sha->flags |= SHA_FLAGS_BUSY;
 672        spin_unlock_irqrestore(&sha->lock, flags);
 673
 674        if (!async_req)
 675                return ret;
 676
 677        if (backlog)
 678                backlog->complete(backlog, -EINPROGRESS);
 679
 680        req = ahash_request_cast(async_req);
 681        ctx = ahash_request_ctx(req);
 682
 683        sha->req = req;
 684
 685        mtk_sha_info_init(ctx);
 686
 687        if (ctx->op == SHA_OP_UPDATE) {
 688                err = mtk_sha_update_start(cryp, sha);
 689                if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP))
 690                        /* No final() after finup() */
 691                        err = mtk_sha_final_req(cryp, sha);
 692        } else if (ctx->op == SHA_OP_FINAL) {
 693                err = mtk_sha_final_req(cryp, sha);
 694        }
 695
 696        if (unlikely(err != -EINPROGRESS))
 697                /* Task will not finish it, so do it here */
 698                mtk_sha_finish_req(cryp, sha, err);
 699
 700        return ret;
 701}
 702
 703static int mtk_sha_enqueue(struct ahash_request *req, u32 op)
 704{
 705        struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 706        struct mtk_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
 707
 708        ctx->op = op;
 709
 710        return mtk_sha_handle_queue(tctx->cryp, tctx->id, req);
 711}
 712
 713static void mtk_sha_unmap(struct mtk_cryp *cryp, struct mtk_sha_rec *sha)
 714{
 715        struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
 716
 717        dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info),
 718                         DMA_BIDIRECTIONAL);
 719
 720        if (ctx->flags & SHA_FLAGS_SG) {
 721                dma_unmap_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE);
 722                if (ctx->sg->length == ctx->offset) {
 723                        ctx->sg = sg_next(ctx->sg);
 724                        if (ctx->sg)
 725                                ctx->offset = 0;
 726                }
 727                if (ctx->flags & SHA_FLAGS_PAD) {
 728                        dma_unmap_single(cryp->dev, ctx->dma_addr,
 729                                         SHA_BUF_SIZE, DMA_TO_DEVICE);
 730                }
 731        } else
 732                dma_unmap_single(cryp->dev, ctx->dma_addr,
 733                                 SHA_BUF_SIZE, DMA_TO_DEVICE);
 734}
 735
 736static void mtk_sha_complete(struct mtk_cryp *cryp,
 737                             struct mtk_sha_rec *sha)
 738{
 739        int err = 0;
 740
 741        err = mtk_sha_update_start(cryp, sha);
 742        if (err != -EINPROGRESS)
 743                mtk_sha_finish_req(cryp, sha, err);
 744}
 745
 746static int mtk_sha_update(struct ahash_request *req)
 747{
 748        struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 749
 750        ctx->total = req->nbytes;
 751        ctx->sg = req->src;
 752        ctx->offset = 0;
 753
 754        if ((ctx->bufcnt + ctx->total < SHA_BUF_SIZE) &&
 755            !(ctx->flags & SHA_FLAGS_FINUP))
 756                return mtk_sha_append_sg(ctx);
 757
 758        return mtk_sha_enqueue(req, SHA_OP_UPDATE);
 759}
 760
 761static int mtk_sha_final(struct ahash_request *req)
 762{
 763        struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 764
 765        ctx->flags |= SHA_FLAGS_FINUP;
 766
 767        if (ctx->flags & SHA_FLAGS_PAD)
 768                return mtk_sha_finish(req);
 769
 770        return mtk_sha_enqueue(req, SHA_OP_FINAL);
 771}
 772
 773static int mtk_sha_finup(struct ahash_request *req)
 774{
 775        struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 776        int err1, err2;
 777
 778        ctx->flags |= SHA_FLAGS_FINUP;
 779
 780        err1 = mtk_sha_update(req);
 781        if (err1 == -EINPROGRESS || err1 == -EBUSY)
 782                return err1;
 783        /*
 784         * final() has to be always called to cleanup resources
 785         * even if update() failed
 786         */
 787        err2 = mtk_sha_final(req);
 788
 789        return err1 ?: err2;
 790}
 791
 792static int mtk_sha_digest(struct ahash_request *req)
 793{
 794        return mtk_sha_init(req) ?: mtk_sha_finup(req);
 795}
 796
 797static int mtk_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
 798                          u32 keylen)
 799{
 800        struct mtk_sha_ctx *tctx = crypto_ahash_ctx(tfm);
 801        struct mtk_sha_hmac_ctx *bctx = tctx->base;
 802        size_t bs = crypto_shash_blocksize(bctx->shash);
 803        size_t ds = crypto_shash_digestsize(bctx->shash);
 804        int err, i;
 805
 806        SHASH_DESC_ON_STACK(shash, bctx->shash);
 807
 808        shash->tfm = bctx->shash;
 809
 810        if (keylen > bs) {
 811                err = crypto_shash_digest(shash, key, keylen, bctx->ipad);
 812                if (err)
 813                        return err;
 814                keylen = ds;
 815        } else {
 816                memcpy(bctx->ipad, key, keylen);
 817        }
 818
 819        memset(bctx->ipad + keylen, 0, bs - keylen);
 820        memcpy(bctx->opad, bctx->ipad, bs);
 821
 822        for (i = 0; i < bs; i++) {
 823                bctx->ipad[i] ^= HMAC_IPAD_VALUE;
 824                bctx->opad[i] ^= HMAC_OPAD_VALUE;
 825        }
 826
 827        return 0;
 828}
 829
 830static int mtk_sha_export(struct ahash_request *req, void *out)
 831{
 832        const struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 833
 834        memcpy(out, ctx, sizeof(*ctx));
 835        return 0;
 836}
 837
 838static int mtk_sha_import(struct ahash_request *req, const void *in)
 839{
 840        struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 841
 842        memcpy(ctx, in, sizeof(*ctx));
 843        return 0;
 844}
 845
 846static int mtk_sha_cra_init_alg(struct crypto_tfm *tfm,
 847                                const char *alg_base)
 848{
 849        struct mtk_sha_ctx *tctx = crypto_tfm_ctx(tfm);
 850        struct mtk_cryp *cryp = NULL;
 851
 852        cryp = mtk_sha_find_dev(tctx);
 853        if (!cryp)
 854                return -ENODEV;
 855
 856        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 857                                 sizeof(struct mtk_sha_reqctx));
 858
 859        if (alg_base) {
 860                struct mtk_sha_hmac_ctx *bctx = tctx->base;
 861
 862                tctx->flags |= SHA_FLAGS_HMAC;
 863                bctx->shash = crypto_alloc_shash(alg_base, 0,
 864                                        CRYPTO_ALG_NEED_FALLBACK);
 865                if (IS_ERR(bctx->shash)) {
 866                        pr_err("base driver %s could not be loaded.\n",
 867                               alg_base);
 868
 869                        return PTR_ERR(bctx->shash);
 870                }
 871        }
 872        return 0;
 873}
 874
 875static int mtk_sha_cra_init(struct crypto_tfm *tfm)
 876{
 877        return mtk_sha_cra_init_alg(tfm, NULL);
 878}
 879
 880static int mtk_sha_cra_sha1_init(struct crypto_tfm *tfm)
 881{
 882        return mtk_sha_cra_init_alg(tfm, "sha1");
 883}
 884
 885static int mtk_sha_cra_sha224_init(struct crypto_tfm *tfm)
 886{
 887        return mtk_sha_cra_init_alg(tfm, "sha224");
 888}
 889
 890static int mtk_sha_cra_sha256_init(struct crypto_tfm *tfm)
 891{
 892        return mtk_sha_cra_init_alg(tfm, "sha256");
 893}
 894
 895static int mtk_sha_cra_sha384_init(struct crypto_tfm *tfm)
 896{
 897        return mtk_sha_cra_init_alg(tfm, "sha384");
 898}
 899
 900static int mtk_sha_cra_sha512_init(struct crypto_tfm *tfm)
 901{
 902        return mtk_sha_cra_init_alg(tfm, "sha512");
 903}
 904
 905static void mtk_sha_cra_exit(struct crypto_tfm *tfm)
 906{
 907        struct mtk_sha_ctx *tctx = crypto_tfm_ctx(tfm);
 908
 909        if (tctx->flags & SHA_FLAGS_HMAC) {
 910                struct mtk_sha_hmac_ctx *bctx = tctx->base;
 911
 912                crypto_free_shash(bctx->shash);
 913        }
 914}
 915
 916static struct ahash_alg algs_sha1_sha224_sha256[] = {
 917{
 918        .init           = mtk_sha_init,
 919        .update         = mtk_sha_update,
 920        .final          = mtk_sha_final,
 921        .finup          = mtk_sha_finup,
 922        .digest         = mtk_sha_digest,
 923        .export         = mtk_sha_export,
 924        .import         = mtk_sha_import,
 925        .halg.digestsize        = SHA1_DIGEST_SIZE,
 926        .halg.statesize = sizeof(struct mtk_sha_reqctx),
 927        .halg.base      = {
 928                .cra_name               = "sha1",
 929                .cra_driver_name        = "mtk-sha1",
 930                .cra_priority           = 400,
 931                .cra_flags              = CRYPTO_ALG_ASYNC,
 932                .cra_blocksize          = SHA1_BLOCK_SIZE,
 933                .cra_ctxsize            = sizeof(struct mtk_sha_ctx),
 934                .cra_alignmask          = SHA_ALIGN_MSK,
 935                .cra_module             = THIS_MODULE,
 936                .cra_init               = mtk_sha_cra_init,
 937                .cra_exit               = mtk_sha_cra_exit,
 938        }
 939},
 940{
 941        .init           = mtk_sha_init,
 942        .update         = mtk_sha_update,
 943        .final          = mtk_sha_final,
 944        .finup          = mtk_sha_finup,
 945        .digest         = mtk_sha_digest,
 946        .export         = mtk_sha_export,
 947        .import         = mtk_sha_import,
 948        .halg.digestsize        = SHA224_DIGEST_SIZE,
 949        .halg.statesize = sizeof(struct mtk_sha_reqctx),
 950        .halg.base      = {
 951                .cra_name               = "sha224",
 952                .cra_driver_name        = "mtk-sha224",
 953                .cra_priority           = 400,
 954                .cra_flags              = CRYPTO_ALG_ASYNC,
 955                .cra_blocksize          = SHA224_BLOCK_SIZE,
 956                .cra_ctxsize            = sizeof(struct mtk_sha_ctx),
 957                .cra_alignmask          = SHA_ALIGN_MSK,
 958                .cra_module             = THIS_MODULE,
 959                .cra_init               = mtk_sha_cra_init,
 960                .cra_exit               = mtk_sha_cra_exit,
 961        }
 962},
 963{
 964        .init           = mtk_sha_init,
 965        .update         = mtk_sha_update,
 966        .final          = mtk_sha_final,
 967        .finup          = mtk_sha_finup,
 968        .digest         = mtk_sha_digest,
 969        .export         = mtk_sha_export,
 970        .import         = mtk_sha_import,
 971        .halg.digestsize        = SHA256_DIGEST_SIZE,
 972        .halg.statesize = sizeof(struct mtk_sha_reqctx),
 973        .halg.base      = {
 974                .cra_name               = "sha256",
 975                .cra_driver_name        = "mtk-sha256",
 976                .cra_priority           = 400,
 977                .cra_flags              = CRYPTO_ALG_ASYNC,
 978                .cra_blocksize          = SHA256_BLOCK_SIZE,
 979                .cra_ctxsize            = sizeof(struct mtk_sha_ctx),
 980                .cra_alignmask          = SHA_ALIGN_MSK,
 981                .cra_module             = THIS_MODULE,
 982                .cra_init               = mtk_sha_cra_init,
 983                .cra_exit               = mtk_sha_cra_exit,
 984        }
 985},
 986{
 987        .init           = mtk_sha_init,
 988        .update         = mtk_sha_update,
 989        .final          = mtk_sha_final,
 990        .finup          = mtk_sha_finup,
 991        .digest         = mtk_sha_digest,
 992        .export         = mtk_sha_export,
 993        .import         = mtk_sha_import,
 994        .setkey         = mtk_sha_setkey,
 995        .halg.digestsize        = SHA1_DIGEST_SIZE,
 996        .halg.statesize = sizeof(struct mtk_sha_reqctx),
 997        .halg.base      = {
 998                .cra_name               = "hmac(sha1)",
 999                .cra_driver_name        = "mtk-hmac-sha1",
1000                .cra_priority           = 400,
1001                .cra_flags              = CRYPTO_ALG_ASYNC |
1002                                          CRYPTO_ALG_NEED_FALLBACK,
1003                .cra_blocksize          = SHA1_BLOCK_SIZE,
1004                .cra_ctxsize            = sizeof(struct mtk_sha_ctx) +
1005                                        sizeof(struct mtk_sha_hmac_ctx),
1006                .cra_alignmask          = SHA_ALIGN_MSK,
1007                .cra_module             = THIS_MODULE,
1008                .cra_init               = mtk_sha_cra_sha1_init,
1009                .cra_exit               = mtk_sha_cra_exit,
1010        }
1011},
1012{
1013        .init           = mtk_sha_init,
1014        .update         = mtk_sha_update,
1015        .final          = mtk_sha_final,
1016        .finup          = mtk_sha_finup,
1017        .digest         = mtk_sha_digest,
1018        .export         = mtk_sha_export,
1019        .import         = mtk_sha_import,
1020        .setkey         = mtk_sha_setkey,
1021        .halg.digestsize        = SHA224_DIGEST_SIZE,
1022        .halg.statesize = sizeof(struct mtk_sha_reqctx),
1023        .halg.base      = {
1024                .cra_name               = "hmac(sha224)",
1025                .cra_driver_name        = "mtk-hmac-sha224",
1026                .cra_priority           = 400,
1027                .cra_flags              = CRYPTO_ALG_ASYNC |
1028                                          CRYPTO_ALG_NEED_FALLBACK,
1029                .cra_blocksize          = SHA224_BLOCK_SIZE,
1030                .cra_ctxsize            = sizeof(struct mtk_sha_ctx) +
1031                                        sizeof(struct mtk_sha_hmac_ctx),
1032                .cra_alignmask          = SHA_ALIGN_MSK,
1033                .cra_module             = THIS_MODULE,
1034                .cra_init               = mtk_sha_cra_sha224_init,
1035                .cra_exit               = mtk_sha_cra_exit,
1036        }
1037},
1038{
1039        .init           = mtk_sha_init,
1040        .update         = mtk_sha_update,
1041        .final          = mtk_sha_final,
1042        .finup          = mtk_sha_finup,
1043        .digest         = mtk_sha_digest,
1044        .export         = mtk_sha_export,
1045        .import         = mtk_sha_import,
1046        .setkey         = mtk_sha_setkey,
1047        .halg.digestsize        = SHA256_DIGEST_SIZE,
1048        .halg.statesize = sizeof(struct mtk_sha_reqctx),
1049        .halg.base      = {
1050                .cra_name               = "hmac(sha256)",
1051                .cra_driver_name        = "mtk-hmac-sha256",
1052                .cra_priority           = 400,
1053                .cra_flags              = CRYPTO_ALG_ASYNC |
1054                                          CRYPTO_ALG_NEED_FALLBACK,
1055                .cra_blocksize          = SHA256_BLOCK_SIZE,
1056                .cra_ctxsize            = sizeof(struct mtk_sha_ctx) +
1057                                        sizeof(struct mtk_sha_hmac_ctx),
1058                .cra_alignmask          = SHA_ALIGN_MSK,
1059                .cra_module             = THIS_MODULE,
1060                .cra_init               = mtk_sha_cra_sha256_init,
1061                .cra_exit               = mtk_sha_cra_exit,
1062        }
1063},
1064};
1065
1066static struct ahash_alg algs_sha384_sha512[] = {
1067{
1068        .init           = mtk_sha_init,
1069        .update         = mtk_sha_update,
1070        .final          = mtk_sha_final,
1071        .finup          = mtk_sha_finup,
1072        .digest         = mtk_sha_digest,
1073        .export         = mtk_sha_export,
1074        .import         = mtk_sha_import,
1075        .halg.digestsize        = SHA384_DIGEST_SIZE,
1076        .halg.statesize = sizeof(struct mtk_sha_reqctx),
1077        .halg.base      = {
1078                .cra_name               = "sha384",
1079                .cra_driver_name        = "mtk-sha384",
1080                .cra_priority           = 400,
1081                .cra_flags              = CRYPTO_ALG_ASYNC,
1082                .cra_blocksize          = SHA384_BLOCK_SIZE,
1083                .cra_ctxsize            = sizeof(struct mtk_sha_ctx),
1084                .cra_alignmask          = SHA_ALIGN_MSK,
1085                .cra_module             = THIS_MODULE,
1086                .cra_init               = mtk_sha_cra_init,
1087                .cra_exit               = mtk_sha_cra_exit,
1088        }
1089},
1090{
1091        .init           = mtk_sha_init,
1092        .update         = mtk_sha_update,
1093        .final          = mtk_sha_final,
1094        .finup          = mtk_sha_finup,
1095        .digest         = mtk_sha_digest,
1096        .export         = mtk_sha_export,
1097        .import         = mtk_sha_import,
1098        .halg.digestsize        = SHA512_DIGEST_SIZE,
1099        .halg.statesize = sizeof(struct mtk_sha_reqctx),
1100        .halg.base      = {
1101                .cra_name               = "sha512",
1102                .cra_driver_name        = "mtk-sha512",
1103                .cra_priority           = 400,
1104                .cra_flags              = CRYPTO_ALG_ASYNC,
1105                .cra_blocksize          = SHA512_BLOCK_SIZE,
1106                .cra_ctxsize            = sizeof(struct mtk_sha_ctx),
1107                .cra_alignmask          = SHA_ALIGN_MSK,
1108                .cra_module             = THIS_MODULE,
1109                .cra_init               = mtk_sha_cra_init,
1110                .cra_exit               = mtk_sha_cra_exit,
1111        }
1112},
1113{
1114        .init           = mtk_sha_init,
1115        .update         = mtk_sha_update,
1116        .final          = mtk_sha_final,
1117        .finup          = mtk_sha_finup,
1118        .digest         = mtk_sha_digest,
1119        .export         = mtk_sha_export,
1120        .import         = mtk_sha_import,
1121        .setkey         = mtk_sha_setkey,
1122        .halg.digestsize        = SHA384_DIGEST_SIZE,
1123        .halg.statesize = sizeof(struct mtk_sha_reqctx),
1124        .halg.base      = {
1125                .cra_name               = "hmac(sha384)",
1126                .cra_driver_name        = "mtk-hmac-sha384",
1127                .cra_priority           = 400,
1128                .cra_flags              = CRYPTO_ALG_ASYNC |
1129                                          CRYPTO_ALG_NEED_FALLBACK,
1130                .cra_blocksize          = SHA384_BLOCK_SIZE,
1131                .cra_ctxsize            = sizeof(struct mtk_sha_ctx) +
1132                                        sizeof(struct mtk_sha_hmac_ctx),
1133                .cra_alignmask          = SHA_ALIGN_MSK,
1134                .cra_module             = THIS_MODULE,
1135                .cra_init               = mtk_sha_cra_sha384_init,
1136                .cra_exit               = mtk_sha_cra_exit,
1137        }
1138},
1139{
1140        .init           = mtk_sha_init,
1141        .update         = mtk_sha_update,
1142        .final          = mtk_sha_final,
1143        .finup          = mtk_sha_finup,
1144        .digest         = mtk_sha_digest,
1145        .export         = mtk_sha_export,
1146        .import         = mtk_sha_import,
1147        .setkey         = mtk_sha_setkey,
1148        .halg.digestsize        = SHA512_DIGEST_SIZE,
1149        .halg.statesize = sizeof(struct mtk_sha_reqctx),
1150        .halg.base      = {
1151                .cra_name               = "hmac(sha512)",
1152                .cra_driver_name        = "mtk-hmac-sha512",
1153                .cra_priority           = 400,
1154                .cra_flags              = CRYPTO_ALG_ASYNC |
1155                                          CRYPTO_ALG_NEED_FALLBACK,
1156                .cra_blocksize          = SHA512_BLOCK_SIZE,
1157                .cra_ctxsize            = sizeof(struct mtk_sha_ctx) +
1158                                        sizeof(struct mtk_sha_hmac_ctx),
1159                .cra_alignmask          = SHA_ALIGN_MSK,
1160                .cra_module             = THIS_MODULE,
1161                .cra_init               = mtk_sha_cra_sha512_init,
1162                .cra_exit               = mtk_sha_cra_exit,
1163        }
1164},
1165};
1166
1167static void mtk_sha_queue_task(unsigned long data)
1168{
1169        struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data;
1170
1171        mtk_sha_handle_queue(sha->cryp, sha->id - MTK_RING2, NULL);
1172}
1173
1174static void mtk_sha_done_task(unsigned long data)
1175{
1176        struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data;
1177        struct mtk_cryp *cryp = sha->cryp;
1178
1179        mtk_sha_unmap(cryp, sha);
1180        mtk_sha_complete(cryp, sha);
1181}
1182
1183static irqreturn_t mtk_sha_irq(int irq, void *dev_id)
1184{
1185        struct mtk_sha_rec *sha = (struct mtk_sha_rec *)dev_id;
1186        struct mtk_cryp *cryp = sha->cryp;
1187        u32 val = mtk_sha_read(cryp, RDR_STAT(sha->id));
1188
1189        mtk_sha_write(cryp, RDR_STAT(sha->id), val);
1190
1191        if (likely((SHA_FLAGS_BUSY & sha->flags))) {
1192                mtk_sha_write(cryp, RDR_PROC_COUNT(sha->id), MTK_CNT_RST);
1193                mtk_sha_write(cryp, RDR_THRESH(sha->id),
1194                              MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
1195
1196                tasklet_schedule(&sha->done_task);
1197        } else {
1198                dev_warn(cryp->dev, "SHA interrupt when no active requests.\n");
1199        }
1200        return IRQ_HANDLED;
1201}
1202
1203/*
1204 * The purpose of two SHA records is used to get extra performance.
1205 * It is similar to mtk_aes_record_init().
1206 */
1207static int mtk_sha_record_init(struct mtk_cryp *cryp)
1208{
1209        struct mtk_sha_rec **sha = cryp->sha;
1210        int i, err = -ENOMEM;
1211
1212        for (i = 0; i < MTK_REC_NUM; i++) {
1213                sha[i] = kzalloc(sizeof(**sha), GFP_KERNEL);
1214                if (!sha[i])
1215                        goto err_cleanup;
1216
1217                sha[i]->cryp = cryp;
1218
1219                spin_lock_init(&sha[i]->lock);
1220                crypto_init_queue(&sha[i]->queue, SHA_QUEUE_SIZE);
1221
1222                tasklet_init(&sha[i]->queue_task, mtk_sha_queue_task,
1223                             (unsigned long)sha[i]);
1224                tasklet_init(&sha[i]->done_task, mtk_sha_done_task,
1225                             (unsigned long)sha[i]);
1226        }
1227
1228        /* Link to ring2 and ring3 respectively */
1229        sha[0]->id = MTK_RING2;
1230        sha[1]->id = MTK_RING3;
1231
1232        cryp->rec = 1;
1233
1234        return 0;
1235
1236err_cleanup:
1237        for (; i--; )
1238                kfree(sha[i]);
1239        return err;
1240}
1241
1242static void mtk_sha_record_free(struct mtk_cryp *cryp)
1243{
1244        int i;
1245
1246        for (i = 0; i < MTK_REC_NUM; i++) {
1247                tasklet_kill(&cryp->sha[i]->done_task);
1248                tasklet_kill(&cryp->sha[i]->queue_task);
1249
1250                kfree(cryp->sha[i]);
1251        }
1252}
1253
1254static void mtk_sha_unregister_algs(void)
1255{
1256        int i;
1257
1258        for (i = 0; i < ARRAY_SIZE(algs_sha1_sha224_sha256); i++)
1259                crypto_unregister_ahash(&algs_sha1_sha224_sha256[i]);
1260
1261        for (i = 0; i < ARRAY_SIZE(algs_sha384_sha512); i++)
1262                crypto_unregister_ahash(&algs_sha384_sha512[i]);
1263}
1264
1265static int mtk_sha_register_algs(void)
1266{
1267        int err, i;
1268
1269        for (i = 0; i < ARRAY_SIZE(algs_sha1_sha224_sha256); i++) {
1270                err = crypto_register_ahash(&algs_sha1_sha224_sha256[i]);
1271                if (err)
1272                        goto err_sha_224_256_algs;
1273        }
1274
1275        for (i = 0; i < ARRAY_SIZE(algs_sha384_sha512); i++) {
1276                err = crypto_register_ahash(&algs_sha384_sha512[i]);
1277                if (err)
1278                        goto err_sha_384_512_algs;
1279        }
1280
1281        return 0;
1282
1283err_sha_384_512_algs:
1284        for (; i--; )
1285                crypto_unregister_ahash(&algs_sha384_sha512[i]);
1286        i = ARRAY_SIZE(algs_sha1_sha224_sha256);
1287err_sha_224_256_algs:
1288        for (; i--; )
1289                crypto_unregister_ahash(&algs_sha1_sha224_sha256[i]);
1290
1291        return err;
1292}
1293
1294int mtk_hash_alg_register(struct mtk_cryp *cryp)
1295{
1296        int err;
1297
1298        INIT_LIST_HEAD(&cryp->sha_list);
1299
1300        /* Initialize two hash records */
1301        err = mtk_sha_record_init(cryp);
1302        if (err)
1303                goto err_record;
1304
1305        err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING2], mtk_sha_irq,
1306                               0, "mtk-sha", cryp->sha[0]);
1307        if (err) {
1308                dev_err(cryp->dev, "unable to request sha irq0.\n");
1309                goto err_res;
1310        }
1311
1312        err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING3], mtk_sha_irq,
1313                               0, "mtk-sha", cryp->sha[1]);
1314        if (err) {
1315                dev_err(cryp->dev, "unable to request sha irq1.\n");
1316                goto err_res;
1317        }
1318
1319        /* Enable ring2 and ring3 interrupt for hash */
1320        mtk_sha_write(cryp, AIC_ENABLE_SET(MTK_RING2), MTK_IRQ_RDR2);
1321        mtk_sha_write(cryp, AIC_ENABLE_SET(MTK_RING3), MTK_IRQ_RDR3);
1322
1323        spin_lock(&mtk_sha.lock);
1324        list_add_tail(&cryp->sha_list, &mtk_sha.dev_list);
1325        spin_unlock(&mtk_sha.lock);
1326
1327        err = mtk_sha_register_algs();
1328        if (err)
1329                goto err_algs;
1330
1331        return 0;
1332
1333err_algs:
1334        spin_lock(&mtk_sha.lock);
1335        list_del(&cryp->sha_list);
1336        spin_unlock(&mtk_sha.lock);
1337err_res:
1338        mtk_sha_record_free(cryp);
1339err_record:
1340
1341        dev_err(cryp->dev, "mtk-sha initialization failed.\n");
1342        return err;
1343}
1344
1345void mtk_hash_alg_release(struct mtk_cryp *cryp)
1346{
1347        spin_lock(&mtk_sha.lock);
1348        list_del(&cryp->sha_list);
1349        spin_unlock(&mtk_sha.lock);
1350
1351        mtk_sha_unregister_algs();
1352        mtk_sha_record_free(cryp);
1353}
1354