linux/drivers/crypto/mediatek/mtk-aes.c
<<
>>
Prefs
   1/*
   2 * Cryptographic API.
   3 *
   4 * Driver for EIP97 AES acceleration.
   5 *
   6 * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 *
  12 * Some ideas are from atmel-aes.c drivers.
  13 */
  14
  15#include <crypto/aes.h>
  16#include "mtk-platform.h"
  17
  18#define AES_QUEUE_SIZE          512
  19#define AES_BUF_ORDER           2
  20#define AES_BUF_SIZE            ((PAGE_SIZE << AES_BUF_ORDER) \
  21                                & ~(AES_BLOCK_SIZE - 1))
  22#define AES_MAX_STATE_BUF_SIZE  SIZE_IN_WORDS(AES_KEYSIZE_256 + \
  23                                AES_BLOCK_SIZE * 2)
  24#define AES_MAX_CT_SIZE         6
  25
  26#define AES_CT_CTRL_HDR         cpu_to_le32(0x00220000)
  27
  28/* AES-CBC/ECB/CTR command token */
  29#define AES_CMD0                cpu_to_le32(0x05000000)
  30#define AES_CMD1                cpu_to_le32(0x2d060000)
  31#define AES_CMD2                cpu_to_le32(0xe4a63806)
  32/* AES-GCM command token */
  33#define AES_GCM_CMD0            cpu_to_le32(0x0b000000)
  34#define AES_GCM_CMD1            cpu_to_le32(0xa0800000)
  35#define AES_GCM_CMD2            cpu_to_le32(0x25000010)
  36#define AES_GCM_CMD3            cpu_to_le32(0x0f020000)
  37#define AES_GCM_CMD4            cpu_to_le32(0x21e60000)
  38#define AES_GCM_CMD5            cpu_to_le32(0x40e60000)
  39#define AES_GCM_CMD6            cpu_to_le32(0xd0070000)
  40
  41/* AES transform information word 0 fields */
  42#define AES_TFM_BASIC_OUT       cpu_to_le32(0x4 << 0)
  43#define AES_TFM_BASIC_IN        cpu_to_le32(0x5 << 0)
  44#define AES_TFM_GCM_OUT         cpu_to_le32(0x6 << 0)
  45#define AES_TFM_GCM_IN          cpu_to_le32(0xf << 0)
  46#define AES_TFM_SIZE(x)         cpu_to_le32((x) << 8)
  47#define AES_TFM_128BITS         cpu_to_le32(0xb << 16)
  48#define AES_TFM_192BITS         cpu_to_le32(0xd << 16)
  49#define AES_TFM_256BITS         cpu_to_le32(0xf << 16)
  50#define AES_TFM_GHASH_DIGEST    cpu_to_le32(0x2 << 21)
  51#define AES_TFM_GHASH           cpu_to_le32(0x4 << 23)
  52/* AES transform information word 1 fields */
  53#define AES_TFM_ECB             cpu_to_le32(0x0 << 0)
  54#define AES_TFM_CBC             cpu_to_le32(0x1 << 0)
  55#define AES_TFM_CTR_INIT        cpu_to_le32(0x2 << 0)   /* init counter to 1 */
  56#define AES_TFM_CTR_LOAD        cpu_to_le32(0x6 << 0)   /* load/reuse counter */
  57#define AES_TFM_3IV             cpu_to_le32(0x7 << 5)   /* using IV 0-2 */
  58#define AES_TFM_FULL_IV         cpu_to_le32(0xf << 5)   /* using IV 0-3 */
  59#define AES_TFM_IV_CTR_MODE     cpu_to_le32(0x1 << 10)
  60#define AES_TFM_ENC_HASH        cpu_to_le32(0x1 << 17)
  61
  62/* AES flags */
  63#define AES_FLAGS_CIPHER_MSK    GENMASK(2, 0)
  64#define AES_FLAGS_ECB           BIT(0)
  65#define AES_FLAGS_CBC           BIT(1)
  66#define AES_FLAGS_CTR           BIT(2)
  67#define AES_FLAGS_GCM           BIT(3)
  68#define AES_FLAGS_ENCRYPT       BIT(4)
  69#define AES_FLAGS_BUSY          BIT(5)
  70
  71#define AES_AUTH_TAG_ERR        cpu_to_le32(BIT(26))
  72
  73/**
  74 * mtk_aes_info - hardware information of AES
  75 * @cmd:        command token, hardware instruction
  76 * @tfm:        transform state of cipher algorithm.
  77 * @state:      contains keys and initial vectors.
  78 *
  79 * Memory layout of GCM buffer:
  80 * /-----------\
  81 * |  AES KEY  | 128/196/256 bits
  82 * |-----------|
  83 * |  HASH KEY | a string 128 zero bits encrypted using the block cipher
  84 * |-----------|
  85 * |    IVs    | 4 * 4 bytes
  86 * \-----------/
  87 *
  88 * The engine requires all these info to do:
  89 * - Commands decoding and control of the engine's data path.
  90 * - Coordinating hardware data fetch and store operations.
  91 * - Result token construction and output.
  92 */
  93struct mtk_aes_info {
  94        __le32 cmd[AES_MAX_CT_SIZE];
  95        __le32 tfm[2];
  96        __le32 state[AES_MAX_STATE_BUF_SIZE];
  97};
  98
  99struct mtk_aes_reqctx {
 100        u64 mode;
 101};
 102
 103struct mtk_aes_base_ctx {
 104        struct mtk_cryp *cryp;
 105        u32 keylen;
 106        __le32 keymode;
 107
 108        mtk_aes_fn start;
 109
 110        struct mtk_aes_info info;
 111        dma_addr_t ct_dma;
 112        dma_addr_t tfm_dma;
 113
 114        __le32 ct_hdr;
 115        u32 ct_size;
 116};
 117
 118struct mtk_aes_ctx {
 119        struct mtk_aes_base_ctx base;
 120};
 121
 122struct mtk_aes_ctr_ctx {
 123        struct mtk_aes_base_ctx base;
 124
 125        u32     iv[AES_BLOCK_SIZE / sizeof(u32)];
 126        size_t offset;
 127        struct scatterlist src[2];
 128        struct scatterlist dst[2];
 129};
 130
 131struct mtk_aes_gcm_ctx {
 132        struct mtk_aes_base_ctx base;
 133
 134        u32 authsize;
 135        size_t textlen;
 136
 137        struct crypto_skcipher *ctr;
 138};
 139
 140struct mtk_aes_gcm_setkey_result {
 141        int err;
 142        struct completion completion;
 143};
 144
 145struct mtk_aes_drv {
 146        struct list_head dev_list;
 147        /* Device list lock */
 148        spinlock_t lock;
 149};
 150
 151static struct mtk_aes_drv mtk_aes = {
 152        .dev_list = LIST_HEAD_INIT(mtk_aes.dev_list),
 153        .lock = __SPIN_LOCK_UNLOCKED(mtk_aes.lock),
 154};
 155
 156static inline u32 mtk_aes_read(struct mtk_cryp *cryp, u32 offset)
 157{
 158        return readl_relaxed(cryp->base + offset);
 159}
 160
 161static inline void mtk_aes_write(struct mtk_cryp *cryp,
 162                                 u32 offset, u32 value)
 163{
 164        writel_relaxed(value, cryp->base + offset);
 165}
 166
 167static struct mtk_cryp *mtk_aes_find_dev(struct mtk_aes_base_ctx *ctx)
 168{
 169        struct mtk_cryp *cryp = NULL;
 170        struct mtk_cryp *tmp;
 171
 172        spin_lock_bh(&mtk_aes.lock);
 173        if (!ctx->cryp) {
 174                list_for_each_entry(tmp, &mtk_aes.dev_list, aes_list) {
 175                        cryp = tmp;
 176                        break;
 177                }
 178                ctx->cryp = cryp;
 179        } else {
 180                cryp = ctx->cryp;
 181        }
 182        spin_unlock_bh(&mtk_aes.lock);
 183
 184        return cryp;
 185}
 186
 187static inline size_t mtk_aes_padlen(size_t len)
 188{
 189        len &= AES_BLOCK_SIZE - 1;
 190        return len ? AES_BLOCK_SIZE - len : 0;
 191}
 192
 193static bool mtk_aes_check_aligned(struct scatterlist *sg, size_t len,
 194                                  struct mtk_aes_dma *dma)
 195{
 196        int nents;
 197
 198        if (!IS_ALIGNED(len, AES_BLOCK_SIZE))
 199                return false;
 200
 201        for (nents = 0; sg; sg = sg_next(sg), ++nents) {
 202                if (!IS_ALIGNED(sg->offset, sizeof(u32)))
 203                        return false;
 204
 205                if (len <= sg->length) {
 206                        if (!IS_ALIGNED(len, AES_BLOCK_SIZE))
 207                                return false;
 208
 209                        dma->nents = nents + 1;
 210                        dma->remainder = sg->length - len;
 211                        sg->length = len;
 212                        return true;
 213                }
 214
 215                if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
 216                        return false;
 217
 218                len -= sg->length;
 219        }
 220
 221        return false;
 222}
 223
 224static inline void mtk_aes_set_mode(struct mtk_aes_rec *aes,
 225                                    const struct mtk_aes_reqctx *rctx)
 226{
 227        /* Clear all but persistent flags and set request flags. */
 228        aes->flags = (aes->flags & AES_FLAGS_BUSY) | rctx->mode;
 229}
 230
 231static inline void mtk_aes_restore_sg(const struct mtk_aes_dma *dma)
 232{
 233        struct scatterlist *sg = dma->sg;
 234        int nents = dma->nents;
 235
 236        if (!dma->remainder)
 237                return;
 238
 239        while (--nents > 0 && sg)
 240                sg = sg_next(sg);
 241
 242        if (!sg)
 243                return;
 244
 245        sg->length += dma->remainder;
 246}
 247
 248static inline void mtk_aes_write_state_le(__le32 *dst, const u32 *src, u32 size)
 249{
 250        int i;
 251
 252        for (i = 0; i < SIZE_IN_WORDS(size); i++)
 253                dst[i] = cpu_to_le32(src[i]);
 254}
 255
 256static inline void mtk_aes_write_state_be(__be32 *dst, const u32 *src, u32 size)
 257{
 258        int i;
 259
 260        for (i = 0; i < SIZE_IN_WORDS(size); i++)
 261                dst[i] = cpu_to_be32(src[i]);
 262}
 263
 264static inline int mtk_aes_complete(struct mtk_cryp *cryp,
 265                                   struct mtk_aes_rec *aes,
 266                                   int err)
 267{
 268        aes->flags &= ~AES_FLAGS_BUSY;
 269        aes->areq->complete(aes->areq, err);
 270        /* Handle new request */
 271        tasklet_schedule(&aes->queue_task);
 272        return err;
 273}
 274
 275/*
 276 * Write descriptors for processing. This will configure the engine, load
 277 * the transform information and then start the packet processing.
 278 */
 279static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
 280{
 281        struct mtk_ring *ring = cryp->ring[aes->id];
 282        struct mtk_desc *cmd = NULL, *res = NULL;
 283        struct scatterlist *ssg = aes->src.sg, *dsg = aes->dst.sg;
 284        u32 slen = aes->src.sg_len, dlen = aes->dst.sg_len;
 285        int nents;
 286
 287        /* Write command descriptors */
 288        for (nents = 0; nents < slen; ++nents, ssg = sg_next(ssg)) {
 289                cmd = ring->cmd_next;
 290                cmd->hdr = MTK_DESC_BUF_LEN(ssg->length);
 291                cmd->buf = cpu_to_le32(sg_dma_address(ssg));
 292
 293                if (nents == 0) {
 294                        cmd->hdr |= MTK_DESC_FIRST |
 295                                    MTK_DESC_CT_LEN(aes->ctx->ct_size);
 296                        cmd->ct = cpu_to_le32(aes->ctx->ct_dma);
 297                        cmd->ct_hdr = aes->ctx->ct_hdr;
 298                        cmd->tfm = cpu_to_le32(aes->ctx->tfm_dma);
 299                }
 300
 301                /* Shift ring buffer and check boundary */
 302                if (++ring->cmd_next == ring->cmd_base + MTK_DESC_NUM)
 303                        ring->cmd_next = ring->cmd_base;
 304        }
 305        cmd->hdr |= MTK_DESC_LAST;
 306
 307        /* Prepare result descriptors */
 308        for (nents = 0; nents < dlen; ++nents, dsg = sg_next(dsg)) {
 309                res = ring->res_next;
 310                res->hdr = MTK_DESC_BUF_LEN(dsg->length);
 311                res->buf = cpu_to_le32(sg_dma_address(dsg));
 312
 313                if (nents == 0)
 314                        res->hdr |= MTK_DESC_FIRST;
 315
 316                /* Shift ring buffer and check boundary */
 317                if (++ring->res_next == ring->res_base + MTK_DESC_NUM)
 318                        ring->res_next = ring->res_base;
 319        }
 320        res->hdr |= MTK_DESC_LAST;
 321
 322        /* Pointer to current result descriptor */
 323        ring->res_prev = res;
 324
 325        /* Prepare enough space for authenticated tag */
 326        if (aes->flags & AES_FLAGS_GCM)
 327                res->hdr += AES_BLOCK_SIZE;
 328
 329        /*
 330         * Make sure that all changes to the DMA ring are done before we
 331         * start engine.
 332         */
 333        wmb();
 334        /* Start DMA transfer */
 335        mtk_aes_write(cryp, RDR_PREP_COUNT(aes->id), MTK_DESC_CNT(dlen));
 336        mtk_aes_write(cryp, CDR_PREP_COUNT(aes->id), MTK_DESC_CNT(slen));
 337
 338        return -EINPROGRESS;
 339}
 340
 341static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
 342{
 343        struct mtk_aes_base_ctx *ctx = aes->ctx;
 344
 345        dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info),
 346                         DMA_TO_DEVICE);
 347
 348        if (aes->src.sg == aes->dst.sg) {
 349                dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
 350                             DMA_BIDIRECTIONAL);
 351
 352                if (aes->src.sg != &aes->aligned_sg)
 353                        mtk_aes_restore_sg(&aes->src);
 354        } else {
 355                dma_unmap_sg(cryp->dev, aes->dst.sg, aes->dst.nents,
 356                             DMA_FROM_DEVICE);
 357
 358                if (aes->dst.sg != &aes->aligned_sg)
 359                        mtk_aes_restore_sg(&aes->dst);
 360
 361                dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
 362                             DMA_TO_DEVICE);
 363
 364                if (aes->src.sg != &aes->aligned_sg)
 365                        mtk_aes_restore_sg(&aes->src);
 366        }
 367
 368        if (aes->dst.sg == &aes->aligned_sg)
 369                sg_copy_from_buffer(aes->real_dst, sg_nents(aes->real_dst),
 370                                    aes->buf, aes->total);
 371}
 372
 373static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
 374{
 375        struct mtk_aes_base_ctx *ctx = aes->ctx;
 376        struct mtk_aes_info *info = &ctx->info;
 377
 378        ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info),
 379                                     DMA_TO_DEVICE);
 380        if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma)))
 381                goto exit;
 382
 383        ctx->tfm_dma = ctx->ct_dma + sizeof(info->cmd);
 384
 385        if (aes->src.sg == aes->dst.sg) {
 386                aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
 387                                             aes->src.nents,
 388                                             DMA_BIDIRECTIONAL);
 389                aes->dst.sg_len = aes->src.sg_len;
 390                if (unlikely(!aes->src.sg_len))
 391                        goto sg_map_err;
 392        } else {
 393                aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
 394                                             aes->src.nents, DMA_TO_DEVICE);
 395                if (unlikely(!aes->src.sg_len))
 396                        goto sg_map_err;
 397
 398                aes->dst.sg_len = dma_map_sg(cryp->dev, aes->dst.sg,
 399                                             aes->dst.nents, DMA_FROM_DEVICE);
 400                if (unlikely(!aes->dst.sg_len)) {
 401                        dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
 402                                     DMA_TO_DEVICE);
 403                        goto sg_map_err;
 404                }
 405        }
 406
 407        return mtk_aes_xmit(cryp, aes);
 408
 409sg_map_err:
 410        dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(*info), DMA_TO_DEVICE);
 411exit:
 412        return mtk_aes_complete(cryp, aes, -EINVAL);
 413}
 414
 415/* Initialize transform information of CBC/ECB/CTR mode */
 416static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
 417                              size_t len)
 418{
 419        struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
 420        struct mtk_aes_base_ctx *ctx = aes->ctx;
 421        struct mtk_aes_info *info = &ctx->info;
 422        u32 cnt = 0;
 423
 424        ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len);
 425        info->cmd[cnt++] = AES_CMD0 | cpu_to_le32(len);
 426        info->cmd[cnt++] = AES_CMD1;
 427
 428        info->tfm[0] = AES_TFM_SIZE(ctx->keylen) | ctx->keymode;
 429        if (aes->flags & AES_FLAGS_ENCRYPT)
 430                info->tfm[0] |= AES_TFM_BASIC_OUT;
 431        else
 432                info->tfm[0] |= AES_TFM_BASIC_IN;
 433
 434        switch (aes->flags & AES_FLAGS_CIPHER_MSK) {
 435        case AES_FLAGS_CBC:
 436                info->tfm[1] = AES_TFM_CBC;
 437                break;
 438        case AES_FLAGS_ECB:
 439                info->tfm[1] = AES_TFM_ECB;
 440                goto ecb;
 441        case AES_FLAGS_CTR:
 442                info->tfm[1] = AES_TFM_CTR_LOAD;
 443                goto ctr;
 444
 445        default:
 446                /* Should not happen... */
 447                return;
 448        }
 449
 450        mtk_aes_write_state_le(info->state + ctx->keylen, req->info,
 451                               AES_BLOCK_SIZE);
 452ctr:
 453        info->tfm[0] += AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE));
 454        info->tfm[1] |= AES_TFM_FULL_IV;
 455        info->cmd[cnt++] = AES_CMD2;
 456ecb:
 457        ctx->ct_size = cnt;
 458}
 459
 460static int mtk_aes_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
 461                       struct scatterlist *src, struct scatterlist *dst,
 462                       size_t len)
 463{
 464        size_t padlen = 0;
 465        bool src_aligned, dst_aligned;
 466
 467        aes->total = len;
 468        aes->src.sg = src;
 469        aes->dst.sg = dst;
 470        aes->real_dst = dst;
 471
 472        src_aligned = mtk_aes_check_aligned(src, len, &aes->src);
 473        if (src == dst)
 474                dst_aligned = src_aligned;
 475        else
 476                dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst);
 477
 478        if (!src_aligned || !dst_aligned) {
 479                padlen = mtk_aes_padlen(len);
 480
 481                if (len + padlen > AES_BUF_SIZE)
 482                        return mtk_aes_complete(cryp, aes, -ENOMEM);
 483
 484                if (!src_aligned) {
 485                        sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
 486                        aes->src.sg = &aes->aligned_sg;
 487                        aes->src.nents = 1;
 488                        aes->src.remainder = 0;
 489                }
 490
 491                if (!dst_aligned) {
 492                        aes->dst.sg = &aes->aligned_sg;
 493                        aes->dst.nents = 1;
 494                        aes->dst.remainder = 0;
 495                }
 496
 497                sg_init_table(&aes->aligned_sg, 1);
 498                sg_set_buf(&aes->aligned_sg, aes->buf, len + padlen);
 499        }
 500
 501        mtk_aes_info_init(cryp, aes, len + padlen);
 502
 503        return mtk_aes_map(cryp, aes);
 504}
 505
 506static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id,
 507                                struct crypto_async_request *new_areq)
 508{
 509        struct mtk_aes_rec *aes = cryp->aes[id];
 510        struct crypto_async_request *areq, *backlog;
 511        struct mtk_aes_base_ctx *ctx;
 512        unsigned long flags;
 513        int ret = 0;
 514
 515        spin_lock_irqsave(&aes->lock, flags);
 516        if (new_areq)
 517                ret = crypto_enqueue_request(&aes->queue, new_areq);
 518        if (aes->flags & AES_FLAGS_BUSY) {
 519                spin_unlock_irqrestore(&aes->lock, flags);
 520                return ret;
 521        }
 522        backlog = crypto_get_backlog(&aes->queue);
 523        areq = crypto_dequeue_request(&aes->queue);
 524        if (areq)
 525                aes->flags |= AES_FLAGS_BUSY;
 526        spin_unlock_irqrestore(&aes->lock, flags);
 527
 528        if (!areq)
 529                return ret;
 530
 531        if (backlog)
 532                backlog->complete(backlog, -EINPROGRESS);
 533
 534        ctx = crypto_tfm_ctx(areq->tfm);
 535
 536        aes->areq = areq;
 537        aes->ctx = ctx;
 538
 539        return ctx->start(cryp, aes);
 540}
 541
 542static int mtk_aes_transfer_complete(struct mtk_cryp *cryp,
 543                                     struct mtk_aes_rec *aes)
 544{
 545        return mtk_aes_complete(cryp, aes, 0);
 546}
 547
 548static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
 549{
 550        struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
 551        struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req);
 552
 553        mtk_aes_set_mode(aes, rctx);
 554        aes->resume = mtk_aes_transfer_complete;
 555
 556        return mtk_aes_dma(cryp, aes, req->src, req->dst, req->nbytes);
 557}
 558
 559static inline struct mtk_aes_ctr_ctx *
 560mtk_aes_ctr_ctx_cast(struct mtk_aes_base_ctx *ctx)
 561{
 562        return container_of(ctx, struct mtk_aes_ctr_ctx, base);
 563}
 564
 565static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
 566{
 567        struct mtk_aes_base_ctx *ctx = aes->ctx;
 568        struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(ctx);
 569        struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
 570        struct scatterlist *src, *dst;
 571        u32 start, end, ctr, blocks;
 572        size_t datalen;
 573        bool fragmented = false;
 574
 575        /* Check for transfer completion. */
 576        cctx->offset += aes->total;
 577        if (cctx->offset >= req->nbytes)
 578                return mtk_aes_transfer_complete(cryp, aes);
 579
 580        /* Compute data length. */
 581        datalen = req->nbytes - cctx->offset;
 582        blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
 583        ctr = be32_to_cpu(cctx->iv[3]);
 584
 585        /* Check 32bit counter overflow. */
 586        start = ctr;
 587        end = start + blocks - 1;
 588        if (end < start) {
 589                ctr |= 0xffffffff;
 590                datalen = AES_BLOCK_SIZE * -start;
 591                fragmented = true;
 592        }
 593
 594        /* Jump to offset. */
 595        src = scatterwalk_ffwd(cctx->src, req->src, cctx->offset);
 596        dst = ((req->src == req->dst) ? src :
 597               scatterwalk_ffwd(cctx->dst, req->dst, cctx->offset));
 598
 599        /* Write IVs into transform state buffer. */
 600        mtk_aes_write_state_le(ctx->info.state + ctx->keylen, cctx->iv,
 601                               AES_BLOCK_SIZE);
 602
 603        if (unlikely(fragmented)) {
 604        /*
 605         * Increment the counter manually to cope with the hardware
 606         * counter overflow.
 607         */
 608                cctx->iv[3] = cpu_to_be32(ctr);
 609                crypto_inc((u8 *)cctx->iv, AES_BLOCK_SIZE);
 610        }
 611
 612        return mtk_aes_dma(cryp, aes, src, dst, datalen);
 613}
 614
 615static int mtk_aes_ctr_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
 616{
 617        struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(aes->ctx);
 618        struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
 619        struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req);
 620
 621        mtk_aes_set_mode(aes, rctx);
 622
 623        memcpy(cctx->iv, req->info, AES_BLOCK_SIZE);
 624        cctx->offset = 0;
 625        aes->total = 0;
 626        aes->resume = mtk_aes_ctr_transfer;
 627
 628        return mtk_aes_ctr_transfer(cryp, aes);
 629}
 630
 631/* Check and set the AES key to transform state buffer */
 632static int mtk_aes_setkey(struct crypto_ablkcipher *tfm,
 633                          const u8 *key, u32 keylen)
 634{
 635        struct mtk_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 636
 637        switch (keylen) {
 638        case AES_KEYSIZE_128:
 639                ctx->keymode = AES_TFM_128BITS;
 640                break;
 641        case AES_KEYSIZE_192:
 642                ctx->keymode = AES_TFM_192BITS;
 643                break;
 644        case AES_KEYSIZE_256:
 645                ctx->keymode = AES_TFM_256BITS;
 646                break;
 647
 648        default:
 649                crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 650                return -EINVAL;
 651        }
 652
 653        ctx->keylen = SIZE_IN_WORDS(keylen);
 654        mtk_aes_write_state_le(ctx->info.state, (const u32 *)key, keylen);
 655
 656        return 0;
 657}
 658
 659static int mtk_aes_crypt(struct ablkcipher_request *req, u64 mode)
 660{
 661        struct mtk_aes_base_ctx *ctx;
 662        struct mtk_aes_reqctx *rctx;
 663
 664        ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
 665        rctx = ablkcipher_request_ctx(req);
 666        rctx->mode = mode;
 667
 668        return mtk_aes_handle_queue(ctx->cryp, !(mode & AES_FLAGS_ENCRYPT),
 669                                    &req->base);
 670}
 671
 672static int mtk_aes_ecb_encrypt(struct ablkcipher_request *req)
 673{
 674        return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_ECB);
 675}
 676
 677static int mtk_aes_ecb_decrypt(struct ablkcipher_request *req)
 678{
 679        return mtk_aes_crypt(req, AES_FLAGS_ECB);
 680}
 681
 682static int mtk_aes_cbc_encrypt(struct ablkcipher_request *req)
 683{
 684        return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
 685}
 686
 687static int mtk_aes_cbc_decrypt(struct ablkcipher_request *req)
 688{
 689        return mtk_aes_crypt(req, AES_FLAGS_CBC);
 690}
 691
 692static int mtk_aes_ctr_encrypt(struct ablkcipher_request *req)
 693{
 694        return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
 695}
 696
 697static int mtk_aes_ctr_decrypt(struct ablkcipher_request *req)
 698{
 699        return mtk_aes_crypt(req, AES_FLAGS_CTR);
 700}
 701
 702static int mtk_aes_cra_init(struct crypto_tfm *tfm)
 703{
 704        struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm);
 705        struct mtk_cryp *cryp = NULL;
 706
 707        cryp = mtk_aes_find_dev(&ctx->base);
 708        if (!cryp) {
 709                pr_err("can't find crypto device\n");
 710                return -ENODEV;
 711        }
 712
 713        tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx);
 714        ctx->base.start = mtk_aes_start;
 715        return 0;
 716}
 717
 718static int mtk_aes_ctr_cra_init(struct crypto_tfm *tfm)
 719{
 720        struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm);
 721        struct mtk_cryp *cryp = NULL;
 722
 723        cryp = mtk_aes_find_dev(&ctx->base);
 724        if (!cryp) {
 725                pr_err("can't find crypto device\n");
 726                return -ENODEV;
 727        }
 728
 729        tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx);
 730        ctx->base.start = mtk_aes_ctr_start;
 731        return 0;
 732}
 733
 734static struct crypto_alg aes_algs[] = {
 735{
 736        .cra_name               = "cbc(aes)",
 737        .cra_driver_name        = "cbc-aes-mtk",
 738        .cra_priority           = 400,
 739        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER |
 740                                  CRYPTO_ALG_ASYNC,
 741        .cra_init               = mtk_aes_cra_init,
 742        .cra_blocksize          = AES_BLOCK_SIZE,
 743        .cra_ctxsize            = sizeof(struct mtk_aes_ctx),
 744        .cra_alignmask          = 0xf,
 745        .cra_type               = &crypto_ablkcipher_type,
 746        .cra_module             = THIS_MODULE,
 747        .cra_u.ablkcipher = {
 748                .min_keysize    = AES_MIN_KEY_SIZE,
 749                .max_keysize    = AES_MAX_KEY_SIZE,
 750                .setkey         = mtk_aes_setkey,
 751                .encrypt        = mtk_aes_cbc_encrypt,
 752                .decrypt        = mtk_aes_cbc_decrypt,
 753                .ivsize         = AES_BLOCK_SIZE,
 754        }
 755},
 756{
 757        .cra_name               = "ecb(aes)",
 758        .cra_driver_name        = "ecb-aes-mtk",
 759        .cra_priority           = 400,
 760        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER |
 761                                  CRYPTO_ALG_ASYNC,
 762        .cra_init               = mtk_aes_cra_init,
 763        .cra_blocksize          = AES_BLOCK_SIZE,
 764        .cra_ctxsize            = sizeof(struct mtk_aes_ctx),
 765        .cra_alignmask          = 0xf,
 766        .cra_type               = &crypto_ablkcipher_type,
 767        .cra_module             = THIS_MODULE,
 768        .cra_u.ablkcipher = {
 769                .min_keysize    = AES_MIN_KEY_SIZE,
 770                .max_keysize    = AES_MAX_KEY_SIZE,
 771                .setkey         = mtk_aes_setkey,
 772                .encrypt        = mtk_aes_ecb_encrypt,
 773                .decrypt        = mtk_aes_ecb_decrypt,
 774        }
 775},
 776{
 777        .cra_name               = "ctr(aes)",
 778        .cra_driver_name        = "ctr-aes-mtk",
 779        .cra_priority           = 400,
 780        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER |
 781                                  CRYPTO_ALG_ASYNC,
 782        .cra_init               = mtk_aes_ctr_cra_init,
 783        .cra_blocksize          = 1,
 784        .cra_ctxsize            = sizeof(struct mtk_aes_ctr_ctx),
 785        .cra_alignmask          = 0xf,
 786        .cra_type               = &crypto_ablkcipher_type,
 787        .cra_module             = THIS_MODULE,
 788        .cra_u.ablkcipher = {
 789                .min_keysize    = AES_MIN_KEY_SIZE,
 790                .max_keysize    = AES_MAX_KEY_SIZE,
 791                .ivsize         = AES_BLOCK_SIZE,
 792                .setkey         = mtk_aes_setkey,
 793                .encrypt        = mtk_aes_ctr_encrypt,
 794                .decrypt        = mtk_aes_ctr_decrypt,
 795        }
 796},
 797};
 798
 799static inline struct mtk_aes_gcm_ctx *
 800mtk_aes_gcm_ctx_cast(struct mtk_aes_base_ctx *ctx)
 801{
 802        return container_of(ctx, struct mtk_aes_gcm_ctx, base);
 803}
 804
 805/*
 806 * Engine will verify and compare tag automatically, so we just need
 807 * to check returned status which stored in the result descriptor.
 808 */
 809static int mtk_aes_gcm_tag_verify(struct mtk_cryp *cryp,
 810                                  struct mtk_aes_rec *aes)
 811{
 812        u32 status = cryp->ring[aes->id]->res_prev->ct;
 813
 814        return mtk_aes_complete(cryp, aes, (status & AES_AUTH_TAG_ERR) ?
 815                                -EBADMSG : 0);
 816}
 817
 818/* Initialize transform information of GCM mode */
 819static void mtk_aes_gcm_info_init(struct mtk_cryp *cryp,
 820                                  struct mtk_aes_rec *aes,
 821                                  size_t len)
 822{
 823        struct aead_request *req = aead_request_cast(aes->areq);
 824        struct mtk_aes_base_ctx *ctx = aes->ctx;
 825        struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
 826        struct mtk_aes_info *info = &ctx->info;
 827        u32 ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
 828        u32 cnt = 0;
 829
 830        ctx->ct_hdr = AES_CT_CTRL_HDR | len;
 831
 832        info->cmd[cnt++] = AES_GCM_CMD0 | cpu_to_le32(req->assoclen);
 833        info->cmd[cnt++] = AES_GCM_CMD1 | cpu_to_le32(req->assoclen);
 834        info->cmd[cnt++] = AES_GCM_CMD2;
 835        info->cmd[cnt++] = AES_GCM_CMD3 | cpu_to_le32(gctx->textlen);
 836
 837        if (aes->flags & AES_FLAGS_ENCRYPT) {
 838                info->cmd[cnt++] = AES_GCM_CMD4 | cpu_to_le32(gctx->authsize);
 839                info->tfm[0] = AES_TFM_GCM_OUT;
 840        } else {
 841                info->cmd[cnt++] = AES_GCM_CMD5 | cpu_to_le32(gctx->authsize);
 842                info->cmd[cnt++] = AES_GCM_CMD6 | cpu_to_le32(gctx->authsize);
 843                info->tfm[0] = AES_TFM_GCM_IN;
 844        }
 845        ctx->ct_size = cnt;
 846
 847        info->tfm[0] |= AES_TFM_GHASH_DIGEST | AES_TFM_GHASH | AES_TFM_SIZE(
 848                        ctx->keylen + SIZE_IN_WORDS(AES_BLOCK_SIZE + ivsize)) |
 849                        ctx->keymode;
 850        info->tfm[1] = AES_TFM_CTR_INIT | AES_TFM_IV_CTR_MODE | AES_TFM_3IV |
 851                       AES_TFM_ENC_HASH;
 852
 853        mtk_aes_write_state_le(info->state + ctx->keylen + SIZE_IN_WORDS(
 854                               AES_BLOCK_SIZE), (const u32 *)req->iv, ivsize);
 855}
 856
 857static int mtk_aes_gcm_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
 858                           struct scatterlist *src, struct scatterlist *dst,
 859                           size_t len)
 860{
 861        bool src_aligned, dst_aligned;
 862
 863        aes->src.sg = src;
 864        aes->dst.sg = dst;
 865        aes->real_dst = dst;
 866
 867        src_aligned = mtk_aes_check_aligned(src, len, &aes->src);
 868        if (src == dst)
 869                dst_aligned = src_aligned;
 870        else
 871                dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst);
 872
 873        if (!src_aligned || !dst_aligned) {
 874                if (aes->total > AES_BUF_SIZE)
 875                        return mtk_aes_complete(cryp, aes, -ENOMEM);
 876
 877                if (!src_aligned) {
 878                        sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
 879                        aes->src.sg = &aes->aligned_sg;
 880                        aes->src.nents = 1;
 881                        aes->src.remainder = 0;
 882                }
 883
 884                if (!dst_aligned) {
 885                        aes->dst.sg = &aes->aligned_sg;
 886                        aes->dst.nents = 1;
 887                        aes->dst.remainder = 0;
 888                }
 889
 890                sg_init_table(&aes->aligned_sg, 1);
 891                sg_set_buf(&aes->aligned_sg, aes->buf, aes->total);
 892        }
 893
 894        mtk_aes_gcm_info_init(cryp, aes, len);
 895
 896        return mtk_aes_map(cryp, aes);
 897}
 898
 899/* Todo: GMAC */
 900static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
 901{
 902        struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(aes->ctx);
 903        struct aead_request *req = aead_request_cast(aes->areq);
 904        struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
 905        u32 len = req->assoclen + req->cryptlen;
 906
 907        mtk_aes_set_mode(aes, rctx);
 908
 909        if (aes->flags & AES_FLAGS_ENCRYPT) {
 910                u32 tag[4];
 911
 912                aes->resume = mtk_aes_transfer_complete;
 913                /* Compute total process length. */
 914                aes->total = len + gctx->authsize;
 915                /* Compute text length. */
 916                gctx->textlen = req->cryptlen;
 917                /* Hardware will append authenticated tag to output buffer */
 918                scatterwalk_map_and_copy(tag, req->dst, len, gctx->authsize, 1);
 919        } else {
 920                aes->resume = mtk_aes_gcm_tag_verify;
 921                aes->total = len;
 922                gctx->textlen = req->cryptlen - gctx->authsize;
 923        }
 924
 925        return mtk_aes_gcm_dma(cryp, aes, req->src, req->dst, len);
 926}
 927
 928static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode)
 929{
 930        struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
 931        struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
 932
 933        rctx->mode = AES_FLAGS_GCM | mode;
 934
 935        return mtk_aes_handle_queue(ctx->cryp, !!(mode & AES_FLAGS_ENCRYPT),
 936                                    &req->base);
 937}
 938
 939static void mtk_gcm_setkey_done(struct crypto_async_request *req, int err)
 940{
 941        struct mtk_aes_gcm_setkey_result *result = req->data;
 942
 943        if (err == -EINPROGRESS)
 944                return;
 945
 946        result->err = err;
 947        complete(&result->completion);
 948}
 949
 950/*
 951 * Because of the hardware limitation, we need to pre-calculate key(H)
 952 * for the GHASH operation. The result of the encryption operation
 953 * need to be stored in the transform state buffer.
 954 */
 955static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
 956                              u32 keylen)
 957{
 958        struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead);
 959        struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
 960        struct crypto_skcipher *ctr = gctx->ctr;
 961        struct {
 962                u32 hash[4];
 963                u8 iv[8];
 964
 965                struct mtk_aes_gcm_setkey_result result;
 966
 967                struct scatterlist sg[1];
 968                struct skcipher_request req;
 969        } *data;
 970        int err;
 971
 972        switch (keylen) {
 973        case AES_KEYSIZE_128:
 974                ctx->keymode = AES_TFM_128BITS;
 975                break;
 976        case AES_KEYSIZE_192:
 977                ctx->keymode = AES_TFM_192BITS;
 978                break;
 979        case AES_KEYSIZE_256:
 980                ctx->keymode = AES_TFM_256BITS;
 981                break;
 982
 983        default:
 984                crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
 985                return -EINVAL;
 986        }
 987
 988        ctx->keylen = SIZE_IN_WORDS(keylen);
 989
 990        /* Same as crypto_gcm_setkey() from crypto/gcm.c */
 991        crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
 992        crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
 993                                  CRYPTO_TFM_REQ_MASK);
 994        err = crypto_skcipher_setkey(ctr, key, keylen);
 995        crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctr) &
 996                              CRYPTO_TFM_RES_MASK);
 997        if (err)
 998                return err;
 999
1000        data = kzalloc(sizeof(*data) + crypto_skcipher_reqsize(ctr),
1001                       GFP_KERNEL);
1002        if (!data)
1003                return -ENOMEM;
1004
1005        init_completion(&data->result.completion);
1006        sg_init_one(data->sg, &data->hash, AES_BLOCK_SIZE);
1007        skcipher_request_set_tfm(&data->req, ctr);
1008        skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
1009                                      CRYPTO_TFM_REQ_MAY_BACKLOG,
1010                                      mtk_gcm_setkey_done, &data->result);
1011        skcipher_request_set_crypt(&data->req, data->sg, data->sg,
1012                                   AES_BLOCK_SIZE, data->iv);
1013
1014        err = crypto_skcipher_encrypt(&data->req);
1015        if (err == -EINPROGRESS || err == -EBUSY) {
1016                err = wait_for_completion_interruptible(
1017                        &data->result.completion);
1018                if (!err)
1019                        err = data->result.err;
1020        }
1021        if (err)
1022                goto out;
1023
1024        /* Write key into state buffer */
1025        mtk_aes_write_state_le(ctx->info.state, (const u32 *)key, keylen);
1026        /* Write key(H) into state buffer */
1027        mtk_aes_write_state_be(ctx->info.state + ctx->keylen, data->hash,
1028                               AES_BLOCK_SIZE);
1029out:
1030        kzfree(data);
1031        return err;
1032}
1033
1034static int mtk_aes_gcm_setauthsize(struct crypto_aead *aead,
1035                                   u32 authsize)
1036{
1037        struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead);
1038        struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
1039
1040        /* Same as crypto_gcm_authsize() from crypto/gcm.c */
1041        switch (authsize) {
1042        case 8:
1043        case 12:
1044        case 16:
1045                break;
1046        default:
1047                return -EINVAL;
1048        }
1049
1050        gctx->authsize = authsize;
1051        return 0;
1052}
1053
1054static int mtk_aes_gcm_encrypt(struct aead_request *req)
1055{
1056        return mtk_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT);
1057}
1058
1059static int mtk_aes_gcm_decrypt(struct aead_request *req)
1060{
1061        return mtk_aes_gcm_crypt(req, 0);
1062}
1063
1064static int mtk_aes_gcm_init(struct crypto_aead *aead)
1065{
1066        struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
1067        struct mtk_cryp *cryp = NULL;
1068
1069        cryp = mtk_aes_find_dev(&ctx->base);
1070        if (!cryp) {
1071                pr_err("can't find crypto device\n");
1072                return -ENODEV;
1073        }
1074
1075        ctx->ctr = crypto_alloc_skcipher("ctr(aes)", 0,
1076                                         CRYPTO_ALG_ASYNC);
1077        if (IS_ERR(ctx->ctr)) {
1078                pr_err("Error allocating ctr(aes)\n");
1079                return PTR_ERR(ctx->ctr);
1080        }
1081
1082        crypto_aead_set_reqsize(aead, sizeof(struct mtk_aes_reqctx));
1083        ctx->base.start = mtk_aes_gcm_start;
1084        return 0;
1085}
1086
1087static void mtk_aes_gcm_exit(struct crypto_aead *aead)
1088{
1089        struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
1090
1091        crypto_free_skcipher(ctx->ctr);
1092}
1093
1094static struct aead_alg aes_gcm_alg = {
1095        .setkey         = mtk_aes_gcm_setkey,
1096        .setauthsize    = mtk_aes_gcm_setauthsize,
1097        .encrypt        = mtk_aes_gcm_encrypt,
1098        .decrypt        = mtk_aes_gcm_decrypt,
1099        .init           = mtk_aes_gcm_init,
1100        .exit           = mtk_aes_gcm_exit,
1101        .ivsize         = 12,
1102        .maxauthsize    = AES_BLOCK_SIZE,
1103
1104        .base = {
1105                .cra_name               = "gcm(aes)",
1106                .cra_driver_name        = "gcm-aes-mtk",
1107                .cra_priority           = 400,
1108                .cra_flags              = CRYPTO_ALG_ASYNC,
1109                .cra_blocksize          = 1,
1110                .cra_ctxsize            = sizeof(struct mtk_aes_gcm_ctx),
1111                .cra_alignmask          = 0xf,
1112                .cra_module             = THIS_MODULE,
1113        },
1114};
1115
1116static void mtk_aes_queue_task(unsigned long data)
1117{
1118        struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
1119
1120        mtk_aes_handle_queue(aes->cryp, aes->id, NULL);
1121}
1122
1123static void mtk_aes_done_task(unsigned long data)
1124{
1125        struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
1126        struct mtk_cryp *cryp = aes->cryp;
1127
1128        mtk_aes_unmap(cryp, aes);
1129        aes->resume(cryp, aes);
1130}
1131
1132static irqreturn_t mtk_aes_irq(int irq, void *dev_id)
1133{
1134        struct mtk_aes_rec *aes  = (struct mtk_aes_rec *)dev_id;
1135        struct mtk_cryp *cryp = aes->cryp;
1136        u32 val = mtk_aes_read(cryp, RDR_STAT(aes->id));
1137
1138        mtk_aes_write(cryp, RDR_STAT(aes->id), val);
1139
1140        if (likely(AES_FLAGS_BUSY & aes->flags)) {
1141                mtk_aes_write(cryp, RDR_PROC_COUNT(aes->id), MTK_CNT_RST);
1142                mtk_aes_write(cryp, RDR_THRESH(aes->id),
1143                              MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
1144
1145                tasklet_schedule(&aes->done_task);
1146        } else {
1147                dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
1148        }
1149        return IRQ_HANDLED;
1150}
1151
1152/*
1153 * The purpose of creating encryption and decryption records is
1154 * to process outbound/inbound data in parallel, it can improve
1155 * performance in most use cases, such as IPSec VPN, especially
1156 * under heavy network traffic.
1157 */
1158static int mtk_aes_record_init(struct mtk_cryp *cryp)
1159{
1160        struct mtk_aes_rec **aes = cryp->aes;
1161        int i, err = -ENOMEM;
1162
1163        for (i = 0; i < MTK_REC_NUM; i++) {
1164                aes[i] = kzalloc(sizeof(**aes), GFP_KERNEL);
1165                if (!aes[i])
1166                        goto err_cleanup;
1167
1168                aes[i]->buf = (void *)__get_free_pages(GFP_KERNEL,
1169                                                AES_BUF_ORDER);
1170                if (!aes[i]->buf)
1171                        goto err_cleanup;
1172
1173                aes[i]->cryp = cryp;
1174
1175                spin_lock_init(&aes[i]->lock);
1176                crypto_init_queue(&aes[i]->queue, AES_QUEUE_SIZE);
1177
1178                tasklet_init(&aes[i]->queue_task, mtk_aes_queue_task,
1179                             (unsigned long)aes[i]);
1180                tasklet_init(&aes[i]->done_task, mtk_aes_done_task,
1181                             (unsigned long)aes[i]);
1182        }
1183
1184        /* Link to ring0 and ring1 respectively */
1185        aes[0]->id = MTK_RING0;
1186        aes[1]->id = MTK_RING1;
1187
1188        return 0;
1189
1190err_cleanup:
1191        for (; i--; ) {
1192                free_page((unsigned long)aes[i]->buf);
1193                kfree(aes[i]);
1194        }
1195
1196        return err;
1197}
1198
1199static void mtk_aes_record_free(struct mtk_cryp *cryp)
1200{
1201        int i;
1202
1203        for (i = 0; i < MTK_REC_NUM; i++) {
1204                tasklet_kill(&cryp->aes[i]->done_task);
1205                tasklet_kill(&cryp->aes[i]->queue_task);
1206
1207                free_page((unsigned long)cryp->aes[i]->buf);
1208                kfree(cryp->aes[i]);
1209        }
1210}
1211
1212static void mtk_aes_unregister_algs(void)
1213{
1214        int i;
1215
1216        crypto_unregister_aead(&aes_gcm_alg);
1217
1218        for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1219                crypto_unregister_alg(&aes_algs[i]);
1220}
1221
1222static int mtk_aes_register_algs(void)
1223{
1224        int err, i;
1225
1226        for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1227                err = crypto_register_alg(&aes_algs[i]);
1228                if (err)
1229                        goto err_aes_algs;
1230        }
1231
1232        err = crypto_register_aead(&aes_gcm_alg);
1233        if (err)
1234                goto err_aes_algs;
1235
1236        return 0;
1237
1238err_aes_algs:
1239        for (; i--; )
1240                crypto_unregister_alg(&aes_algs[i]);
1241
1242        return err;
1243}
1244
1245int mtk_cipher_alg_register(struct mtk_cryp *cryp)
1246{
1247        int ret;
1248
1249        INIT_LIST_HEAD(&cryp->aes_list);
1250
1251        /* Initialize two cipher records */
1252        ret = mtk_aes_record_init(cryp);
1253        if (ret)
1254                goto err_record;
1255
1256        ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING0], mtk_aes_irq,
1257                               0, "mtk-aes", cryp->aes[0]);
1258        if (ret) {
1259                dev_err(cryp->dev, "unable to request AES irq.\n");
1260                goto err_res;
1261        }
1262
1263        ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING1], mtk_aes_irq,
1264                               0, "mtk-aes", cryp->aes[1]);
1265        if (ret) {
1266                dev_err(cryp->dev, "unable to request AES irq.\n");
1267                goto err_res;
1268        }
1269
1270        /* Enable ring0 and ring1 interrupt */
1271        mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING0), MTK_IRQ_RDR0);
1272        mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING1), MTK_IRQ_RDR1);
1273
1274        spin_lock(&mtk_aes.lock);
1275        list_add_tail(&cryp->aes_list, &mtk_aes.dev_list);
1276        spin_unlock(&mtk_aes.lock);
1277
1278        ret = mtk_aes_register_algs();
1279        if (ret)
1280                goto err_algs;
1281
1282        return 0;
1283
1284err_algs:
1285        spin_lock(&mtk_aes.lock);
1286        list_del(&cryp->aes_list);
1287        spin_unlock(&mtk_aes.lock);
1288err_res:
1289        mtk_aes_record_free(cryp);
1290err_record:
1291
1292        dev_err(cryp->dev, "mtk-aes initialization failed.\n");
1293        return ret;
1294}
1295
1296void mtk_cipher_alg_release(struct mtk_cryp *cryp)
1297{
1298        spin_lock(&mtk_aes.lock);
1299        list_del(&cryp->aes_list);
1300        spin_unlock(&mtk_aes.lock);
1301
1302        mtk_aes_unregister_algs();
1303        mtk_aes_record_free(cryp);
1304}
1305