linux/drivers/crypto/marvell/cesa/cipher.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Cipher algorithms supported by the CESA: DES, 3DES and AES.
   4 *
   5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
   6 * Author: Arnaud Ebalard <arno@natisbad.org>
   7 *
   8 * This work is based on an initial version written by
   9 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
  10 */
  11
  12#include <crypto/aes.h>
  13#include <crypto/internal/des.h>
  14#include <linux/device.h>
  15#include <linux/dma-mapping.h>
  16
  17#include "cesa.h"
  18
  19struct mv_cesa_des_ctx {
  20        struct mv_cesa_ctx base;
  21        u8 key[DES_KEY_SIZE];
  22};
  23
  24struct mv_cesa_des3_ctx {
  25        struct mv_cesa_ctx base;
  26        u8 key[DES3_EDE_KEY_SIZE];
  27};
  28
  29struct mv_cesa_aes_ctx {
  30        struct mv_cesa_ctx base;
  31        struct crypto_aes_ctx aes;
  32};
  33
  34struct mv_cesa_skcipher_dma_iter {
  35        struct mv_cesa_dma_iter base;
  36        struct mv_cesa_sg_dma_iter src;
  37        struct mv_cesa_sg_dma_iter dst;
  38};
  39
  40static inline void
  41mv_cesa_skcipher_req_iter_init(struct mv_cesa_skcipher_dma_iter *iter,
  42                               struct skcipher_request *req)
  43{
  44        mv_cesa_req_dma_iter_init(&iter->base, req->cryptlen);
  45        mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
  46        mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE);
  47}
  48
  49static inline bool
  50mv_cesa_skcipher_req_iter_next_op(struct mv_cesa_skcipher_dma_iter *iter)
  51{
  52        iter->src.op_offset = 0;
  53        iter->dst.op_offset = 0;
  54
  55        return mv_cesa_req_dma_iter_next_op(&iter->base);
  56}
  57
  58static inline void
  59mv_cesa_skcipher_dma_cleanup(struct skcipher_request *req)
  60{
  61        struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
  62
  63        if (req->dst != req->src) {
  64                dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
  65                             DMA_FROM_DEVICE);
  66                dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
  67                             DMA_TO_DEVICE);
  68        } else {
  69                dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
  70                             DMA_BIDIRECTIONAL);
  71        }
  72        mv_cesa_dma_cleanup(&creq->base);
  73}
  74
  75static inline void mv_cesa_skcipher_cleanup(struct skcipher_request *req)
  76{
  77        struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
  78
  79        if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
  80                mv_cesa_skcipher_dma_cleanup(req);
  81}
  82
  83static void mv_cesa_skcipher_std_step(struct skcipher_request *req)
  84{
  85        struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
  86        struct mv_cesa_skcipher_std_req *sreq = &creq->std;
  87        struct mv_cesa_engine *engine = creq->base.engine;
  88        size_t  len = min_t(size_t, req->cryptlen - sreq->offset,
  89                            CESA_SA_SRAM_PAYLOAD_SIZE);
  90
  91        mv_cesa_adjust_op(engine, &sreq->op);
  92        if (engine->pool)
  93                memcpy(engine->sram_pool, &sreq->op, sizeof(sreq->op));
  94        else
  95                memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
  96
  97        len = mv_cesa_sg_copy_to_sram(engine, req->src, creq->src_nents,
  98                                      CESA_SA_DATA_SRAM_OFFSET, len,
  99                                      sreq->offset);
 100
 101        sreq->size = len;
 102        mv_cesa_set_crypt_op_len(&sreq->op, len);
 103
 104        /* FIXME: only update enc_len field */
 105        if (!sreq->skip_ctx) {
 106                if (engine->pool)
 107                        memcpy(engine->sram_pool, &sreq->op, sizeof(sreq->op));
 108                else
 109                        memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
 110                sreq->skip_ctx = true;
 111        } else if (engine->pool)
 112                memcpy(engine->sram_pool, &sreq->op, sizeof(sreq->op.desc));
 113        else
 114                memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op.desc));
 115
 116        mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
 117        writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
 118        WARN_ON(readl(engine->regs + CESA_SA_CMD) &
 119                CESA_SA_CMD_EN_CESA_SA_ACCL0);
 120        writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
 121}
 122
 123static int mv_cesa_skcipher_std_process(struct skcipher_request *req,
 124                                        u32 status)
 125{
 126        struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
 127        struct mv_cesa_skcipher_std_req *sreq = &creq->std;
 128        struct mv_cesa_engine *engine = creq->base.engine;
 129        size_t len;
 130
 131        len = mv_cesa_sg_copy_from_sram(engine, req->dst, creq->dst_nents,
 132                                        CESA_SA_DATA_SRAM_OFFSET, sreq->size,
 133                                        sreq->offset);
 134
 135        sreq->offset += len;
 136        if (sreq->offset < req->cryptlen)
 137                return -EINPROGRESS;
 138
 139        return 0;
 140}
 141
 142static int mv_cesa_skcipher_process(struct crypto_async_request *req,
 143                                    u32 status)
 144{
 145        struct skcipher_request *skreq = skcipher_request_cast(req);
 146        struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
 147        struct mv_cesa_req *basereq = &creq->base;
 148
 149        if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ)
 150                return mv_cesa_skcipher_std_process(skreq, status);
 151
 152        return mv_cesa_dma_process(basereq, status);
 153}
 154
 155static void mv_cesa_skcipher_step(struct crypto_async_request *req)
 156{
 157        struct skcipher_request *skreq = skcipher_request_cast(req);
 158        struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
 159
 160        if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
 161                mv_cesa_dma_step(&creq->base);
 162        else
 163                mv_cesa_skcipher_std_step(skreq);
 164}
 165
 166static inline void
 167mv_cesa_skcipher_dma_prepare(struct skcipher_request *req)
 168{
 169        struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
 170        struct mv_cesa_req *basereq = &creq->base;
 171
 172        mv_cesa_dma_prepare(basereq, basereq->engine);
 173}
 174
 175static inline void
 176mv_cesa_skcipher_std_prepare(struct skcipher_request *req)
 177{
 178        struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
 179        struct mv_cesa_skcipher_std_req *sreq = &creq->std;
 180
 181        sreq->size = 0;
 182        sreq->offset = 0;
 183}
 184
 185static inline void mv_cesa_skcipher_prepare(struct crypto_async_request *req,
 186                                            struct mv_cesa_engine *engine)
 187{
 188        struct skcipher_request *skreq = skcipher_request_cast(req);
 189        struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
 190
 191        creq->base.engine = engine;
 192
 193        if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
 194                mv_cesa_skcipher_dma_prepare(skreq);
 195        else
 196                mv_cesa_skcipher_std_prepare(skreq);
 197}
 198
 199static inline void
 200mv_cesa_skcipher_req_cleanup(struct crypto_async_request *req)
 201{
 202        struct skcipher_request *skreq = skcipher_request_cast(req);
 203
 204        mv_cesa_skcipher_cleanup(skreq);
 205}
 206
 207static void
 208mv_cesa_skcipher_complete(struct crypto_async_request *req)
 209{
 210        struct skcipher_request *skreq = skcipher_request_cast(req);
 211        struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
 212        struct mv_cesa_engine *engine = creq->base.engine;
 213        unsigned int ivsize;
 214
 215        atomic_sub(skreq->cryptlen, &engine->load);
 216        ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(skreq));
 217
 218        if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) {
 219                struct mv_cesa_req *basereq;
 220
 221                basereq = &creq->base;
 222                memcpy(skreq->iv, basereq->chain.last->op->ctx.skcipher.iv,
 223                       ivsize);
 224        } else if (engine->pool)
 225                memcpy(skreq->iv,
 226                       engine->sram_pool + CESA_SA_CRYPT_IV_SRAM_OFFSET,
 227                       ivsize);
 228        else
 229                memcpy_fromio(skreq->iv,
 230                              engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
 231                              ivsize);
 232}
 233
 234static const struct mv_cesa_req_ops mv_cesa_skcipher_req_ops = {
 235        .step = mv_cesa_skcipher_step,
 236        .process = mv_cesa_skcipher_process,
 237        .cleanup = mv_cesa_skcipher_req_cleanup,
 238        .complete = mv_cesa_skcipher_complete,
 239};
 240
 241static void mv_cesa_skcipher_cra_exit(struct crypto_tfm *tfm)
 242{
 243        void *ctx = crypto_tfm_ctx(tfm);
 244
 245        memzero_explicit(ctx, tfm->__crt_alg->cra_ctxsize);
 246}
 247
 248static int mv_cesa_skcipher_cra_init(struct crypto_tfm *tfm)
 249{
 250        struct mv_cesa_ctx *ctx = crypto_tfm_ctx(tfm);
 251
 252        ctx->ops = &mv_cesa_skcipher_req_ops;
 253
 254        crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
 255                                    sizeof(struct mv_cesa_skcipher_req));
 256
 257        return 0;
 258}
 259
 260static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
 261                              unsigned int len)
 262{
 263        struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
 264        struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
 265        int remaining;
 266        int offset;
 267        int ret;
 268        int i;
 269
 270        ret = aes_expandkey(&ctx->aes, key, len);
 271        if (ret)
 272                return ret;
 273
 274        remaining = (ctx->aes.key_length - 16) / 4;
 275        offset = ctx->aes.key_length + 24 - remaining;
 276        for (i = 0; i < remaining; i++)
 277                ctx->aes.key_dec[4 + i] = ctx->aes.key_enc[offset + i];
 278
 279        return 0;
 280}
 281
 282static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
 283                              unsigned int len)
 284{
 285        struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher);
 286        int err;
 287
 288        err = verify_skcipher_des_key(cipher, key);
 289        if (err)
 290                return err;
 291
 292        memcpy(ctx->key, key, DES_KEY_SIZE);
 293
 294        return 0;
 295}
 296
 297static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher,
 298                                   const u8 *key, unsigned int len)
 299{
 300        struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher);
 301        int err;
 302
 303        err = verify_skcipher_des3_key(cipher, key);
 304        if (err)
 305                return err;
 306
 307        memcpy(ctx->key, key, DES3_EDE_KEY_SIZE);
 308
 309        return 0;
 310}
 311
 312static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req,
 313                                         const struct mv_cesa_op_ctx *op_templ)
 314{
 315        struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
 316        gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
 317                      GFP_KERNEL : GFP_ATOMIC;
 318        struct mv_cesa_req *basereq = &creq->base;
 319        struct mv_cesa_skcipher_dma_iter iter;
 320        bool skip_ctx = false;
 321        int ret;
 322
 323        basereq->chain.first = NULL;
 324        basereq->chain.last = NULL;
 325
 326        if (req->src != req->dst) {
 327                ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
 328                                 DMA_TO_DEVICE);
 329                if (!ret)
 330                        return -ENOMEM;
 331
 332                ret = dma_map_sg(cesa_dev->dev, req->dst, creq->dst_nents,
 333                                 DMA_FROM_DEVICE);
 334                if (!ret) {
 335                        ret = -ENOMEM;
 336                        goto err_unmap_src;
 337                }
 338        } else {
 339                ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
 340                                 DMA_BIDIRECTIONAL);
 341                if (!ret)
 342                        return -ENOMEM;
 343        }
 344
 345        mv_cesa_tdma_desc_iter_init(&basereq->chain);
 346        mv_cesa_skcipher_req_iter_init(&iter, req);
 347
 348        do {
 349                struct mv_cesa_op_ctx *op;
 350
 351                op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx,
 352                                        flags);
 353                if (IS_ERR(op)) {
 354                        ret = PTR_ERR(op);
 355                        goto err_free_tdma;
 356                }
 357                skip_ctx = true;
 358
 359                mv_cesa_set_crypt_op_len(op, iter.base.op_len);
 360
 361                /* Add input transfers */
 362                ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
 363                                                   &iter.src, flags);
 364                if (ret)
 365                        goto err_free_tdma;
 366
 367                /* Add dummy desc to launch the crypto operation */
 368                ret = mv_cesa_dma_add_dummy_launch(&basereq->chain, flags);
 369                if (ret)
 370                        goto err_free_tdma;
 371
 372                /* Add output transfers */
 373                ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
 374                                                   &iter.dst, flags);
 375                if (ret)
 376                        goto err_free_tdma;
 377
 378        } while (mv_cesa_skcipher_req_iter_next_op(&iter));
 379
 380        /* Add output data for IV */
 381        ret = mv_cesa_dma_add_result_op(&basereq->chain,
 382                                        CESA_SA_CFG_SRAM_OFFSET,
 383                                        CESA_SA_DATA_SRAM_OFFSET,
 384                                        CESA_TDMA_SRC_IN_SRAM, flags);
 385
 386        if (ret)
 387                goto err_free_tdma;
 388
 389        basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
 390
 391        return 0;
 392
 393err_free_tdma:
 394        mv_cesa_dma_cleanup(basereq);
 395        if (req->dst != req->src)
 396                dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
 397                             DMA_FROM_DEVICE);
 398
 399err_unmap_src:
 400        dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
 401                     req->dst != req->src ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
 402
 403        return ret;
 404}
 405
 406static inline int
 407mv_cesa_skcipher_std_req_init(struct skcipher_request *req,
 408                              const struct mv_cesa_op_ctx *op_templ)
 409{
 410        struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
 411        struct mv_cesa_skcipher_std_req *sreq = &creq->std;
 412        struct mv_cesa_req *basereq = &creq->base;
 413
 414        sreq->op = *op_templ;
 415        sreq->skip_ctx = false;
 416        basereq->chain.first = NULL;
 417        basereq->chain.last = NULL;
 418
 419        return 0;
 420}
 421
 422static int mv_cesa_skcipher_req_init(struct skcipher_request *req,
 423                                     struct mv_cesa_op_ctx *tmpl)
 424{
 425        struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
 426        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 427        unsigned int blksize = crypto_skcipher_blocksize(tfm);
 428        int ret;
 429
 430        if (!IS_ALIGNED(req->cryptlen, blksize))
 431                return -EINVAL;
 432
 433        creq->src_nents = sg_nents_for_len(req->src, req->cryptlen);
 434        if (creq->src_nents < 0) {
 435                dev_err(cesa_dev->dev, "Invalid number of src SG");
 436                return creq->src_nents;
 437        }
 438        creq->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
 439        if (creq->dst_nents < 0) {
 440                dev_err(cesa_dev->dev, "Invalid number of dst SG");
 441                return creq->dst_nents;
 442        }
 443
 444        mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY,
 445                              CESA_SA_DESC_CFG_OP_MSK);
 446
 447        if (cesa_dev->caps->has_tdma)
 448                ret = mv_cesa_skcipher_dma_req_init(req, tmpl);
 449        else
 450                ret = mv_cesa_skcipher_std_req_init(req, tmpl);
 451
 452        return ret;
 453}
 454
 455static int mv_cesa_skcipher_queue_req(struct skcipher_request *req,
 456                                      struct mv_cesa_op_ctx *tmpl)
 457{
 458        int ret;
 459        struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
 460        struct mv_cesa_engine *engine;
 461
 462        ret = mv_cesa_skcipher_req_init(req, tmpl);
 463        if (ret)
 464                return ret;
 465
 466        engine = mv_cesa_select_engine(req->cryptlen);
 467        mv_cesa_skcipher_prepare(&req->base, engine);
 468
 469        ret = mv_cesa_queue_req(&req->base, &creq->base);
 470
 471        if (mv_cesa_req_needs_cleanup(&req->base, ret))
 472                mv_cesa_skcipher_cleanup(req);
 473
 474        return ret;
 475}
 476
 477static int mv_cesa_des_op(struct skcipher_request *req,
 478                          struct mv_cesa_op_ctx *tmpl)
 479{
 480        struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
 481
 482        mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES,
 483                              CESA_SA_DESC_CFG_CRYPTM_MSK);
 484
 485        memcpy(tmpl->ctx.skcipher.key, ctx->key, DES_KEY_SIZE);
 486
 487        return mv_cesa_skcipher_queue_req(req, tmpl);
 488}
 489
 490static int mv_cesa_ecb_des_encrypt(struct skcipher_request *req)
 491{
 492        struct mv_cesa_op_ctx tmpl;
 493
 494        mv_cesa_set_op_cfg(&tmpl,
 495                           CESA_SA_DESC_CFG_CRYPTCM_ECB |
 496                           CESA_SA_DESC_CFG_DIR_ENC);
 497
 498        return mv_cesa_des_op(req, &tmpl);
 499}
 500
 501static int mv_cesa_ecb_des_decrypt(struct skcipher_request *req)
 502{
 503        struct mv_cesa_op_ctx tmpl;
 504
 505        mv_cesa_set_op_cfg(&tmpl,
 506                           CESA_SA_DESC_CFG_CRYPTCM_ECB |
 507                           CESA_SA_DESC_CFG_DIR_DEC);
 508
 509        return mv_cesa_des_op(req, &tmpl);
 510}
 511
 512struct skcipher_alg mv_cesa_ecb_des_alg = {
 513        .setkey = mv_cesa_des_setkey,
 514        .encrypt = mv_cesa_ecb_des_encrypt,
 515        .decrypt = mv_cesa_ecb_des_decrypt,
 516        .min_keysize = DES_KEY_SIZE,
 517        .max_keysize = DES_KEY_SIZE,
 518        .base = {
 519                .cra_name = "ecb(des)",
 520                .cra_driver_name = "mv-ecb-des",
 521                .cra_priority = 300,
 522                .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
 523                             CRYPTO_ALG_ALLOCATES_MEMORY,
 524                .cra_blocksize = DES_BLOCK_SIZE,
 525                .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
 526                .cra_alignmask = 0,
 527                .cra_module = THIS_MODULE,
 528                .cra_init = mv_cesa_skcipher_cra_init,
 529                .cra_exit = mv_cesa_skcipher_cra_exit,
 530        },
 531};
 532
 533static int mv_cesa_cbc_des_op(struct skcipher_request *req,
 534                              struct mv_cesa_op_ctx *tmpl)
 535{
 536        mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
 537                              CESA_SA_DESC_CFG_CRYPTCM_MSK);
 538
 539        memcpy(tmpl->ctx.skcipher.iv, req->iv, DES_BLOCK_SIZE);
 540
 541        return mv_cesa_des_op(req, tmpl);
 542}
 543
 544static int mv_cesa_cbc_des_encrypt(struct skcipher_request *req)
 545{
 546        struct mv_cesa_op_ctx tmpl;
 547
 548        mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
 549
 550        return mv_cesa_cbc_des_op(req, &tmpl);
 551}
 552
 553static int mv_cesa_cbc_des_decrypt(struct skcipher_request *req)
 554{
 555        struct mv_cesa_op_ctx tmpl;
 556
 557        mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
 558
 559        return mv_cesa_cbc_des_op(req, &tmpl);
 560}
 561
 562struct skcipher_alg mv_cesa_cbc_des_alg = {
 563        .setkey = mv_cesa_des_setkey,
 564        .encrypt = mv_cesa_cbc_des_encrypt,
 565        .decrypt = mv_cesa_cbc_des_decrypt,
 566        .min_keysize = DES_KEY_SIZE,
 567        .max_keysize = DES_KEY_SIZE,
 568        .ivsize = DES_BLOCK_SIZE,
 569        .base = {
 570                .cra_name = "cbc(des)",
 571                .cra_driver_name = "mv-cbc-des",
 572                .cra_priority = 300,
 573                .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
 574                             CRYPTO_ALG_ALLOCATES_MEMORY,
 575                .cra_blocksize = DES_BLOCK_SIZE,
 576                .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
 577                .cra_alignmask = 0,
 578                .cra_module = THIS_MODULE,
 579                .cra_init = mv_cesa_skcipher_cra_init,
 580                .cra_exit = mv_cesa_skcipher_cra_exit,
 581        },
 582};
 583
 584static int mv_cesa_des3_op(struct skcipher_request *req,
 585                           struct mv_cesa_op_ctx *tmpl)
 586{
 587        struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
 588
 589        mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES,
 590                              CESA_SA_DESC_CFG_CRYPTM_MSK);
 591
 592        memcpy(tmpl->ctx.skcipher.key, ctx->key, DES3_EDE_KEY_SIZE);
 593
 594        return mv_cesa_skcipher_queue_req(req, tmpl);
 595}
 596
 597static int mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request *req)
 598{
 599        struct mv_cesa_op_ctx tmpl;
 600
 601        mv_cesa_set_op_cfg(&tmpl,
 602                           CESA_SA_DESC_CFG_CRYPTCM_ECB |
 603                           CESA_SA_DESC_CFG_3DES_EDE |
 604                           CESA_SA_DESC_CFG_DIR_ENC);
 605
 606        return mv_cesa_des3_op(req, &tmpl);
 607}
 608
 609static int mv_cesa_ecb_des3_ede_decrypt(struct skcipher_request *req)
 610{
 611        struct mv_cesa_op_ctx tmpl;
 612
 613        mv_cesa_set_op_cfg(&tmpl,
 614                           CESA_SA_DESC_CFG_CRYPTCM_ECB |
 615                           CESA_SA_DESC_CFG_3DES_EDE |
 616                           CESA_SA_DESC_CFG_DIR_DEC);
 617
 618        return mv_cesa_des3_op(req, &tmpl);
 619}
 620
 621struct skcipher_alg mv_cesa_ecb_des3_ede_alg = {
 622        .setkey = mv_cesa_des3_ede_setkey,
 623        .encrypt = mv_cesa_ecb_des3_ede_encrypt,
 624        .decrypt = mv_cesa_ecb_des3_ede_decrypt,
 625        .min_keysize = DES3_EDE_KEY_SIZE,
 626        .max_keysize = DES3_EDE_KEY_SIZE,
 627        .ivsize = DES3_EDE_BLOCK_SIZE,
 628        .base = {
 629                .cra_name = "ecb(des3_ede)",
 630                .cra_driver_name = "mv-ecb-des3-ede",
 631                .cra_priority = 300,
 632                .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
 633                             CRYPTO_ALG_ALLOCATES_MEMORY,
 634                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
 635                .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
 636                .cra_alignmask = 0,
 637                .cra_module = THIS_MODULE,
 638                .cra_init = mv_cesa_skcipher_cra_init,
 639                .cra_exit = mv_cesa_skcipher_cra_exit,
 640        },
 641};
 642
 643static int mv_cesa_cbc_des3_op(struct skcipher_request *req,
 644                               struct mv_cesa_op_ctx *tmpl)
 645{
 646        memcpy(tmpl->ctx.skcipher.iv, req->iv, DES3_EDE_BLOCK_SIZE);
 647
 648        return mv_cesa_des3_op(req, tmpl);
 649}
 650
 651static int mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request *req)
 652{
 653        struct mv_cesa_op_ctx tmpl;
 654
 655        mv_cesa_set_op_cfg(&tmpl,
 656                           CESA_SA_DESC_CFG_CRYPTCM_CBC |
 657                           CESA_SA_DESC_CFG_3DES_EDE |
 658                           CESA_SA_DESC_CFG_DIR_ENC);
 659
 660        return mv_cesa_cbc_des3_op(req, &tmpl);
 661}
 662
 663static int mv_cesa_cbc_des3_ede_decrypt(struct skcipher_request *req)
 664{
 665        struct mv_cesa_op_ctx tmpl;
 666
 667        mv_cesa_set_op_cfg(&tmpl,
 668                           CESA_SA_DESC_CFG_CRYPTCM_CBC |
 669                           CESA_SA_DESC_CFG_3DES_EDE |
 670                           CESA_SA_DESC_CFG_DIR_DEC);
 671
 672        return mv_cesa_cbc_des3_op(req, &tmpl);
 673}
 674
 675struct skcipher_alg mv_cesa_cbc_des3_ede_alg = {
 676        .setkey = mv_cesa_des3_ede_setkey,
 677        .encrypt = mv_cesa_cbc_des3_ede_encrypt,
 678        .decrypt = mv_cesa_cbc_des3_ede_decrypt,
 679        .min_keysize = DES3_EDE_KEY_SIZE,
 680        .max_keysize = DES3_EDE_KEY_SIZE,
 681        .ivsize = DES3_EDE_BLOCK_SIZE,
 682        .base = {
 683                .cra_name = "cbc(des3_ede)",
 684                .cra_driver_name = "mv-cbc-des3-ede",
 685                .cra_priority = 300,
 686                .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
 687                             CRYPTO_ALG_ALLOCATES_MEMORY,
 688                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
 689                .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
 690                .cra_alignmask = 0,
 691                .cra_module = THIS_MODULE,
 692                .cra_init = mv_cesa_skcipher_cra_init,
 693                .cra_exit = mv_cesa_skcipher_cra_exit,
 694        },
 695};
 696
 697static int mv_cesa_aes_op(struct skcipher_request *req,
 698                          struct mv_cesa_op_ctx *tmpl)
 699{
 700        struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
 701        int i;
 702        u32 *key;
 703        u32 cfg;
 704
 705        cfg = CESA_SA_DESC_CFG_CRYPTM_AES;
 706
 707        if (mv_cesa_get_op_cfg(tmpl) & CESA_SA_DESC_CFG_DIR_DEC)
 708                key = ctx->aes.key_dec;
 709        else
 710                key = ctx->aes.key_enc;
 711
 712        for (i = 0; i < ctx->aes.key_length / sizeof(u32); i++)
 713                tmpl->ctx.skcipher.key[i] = cpu_to_le32(key[i]);
 714
 715        if (ctx->aes.key_length == 24)
 716                cfg |= CESA_SA_DESC_CFG_AES_LEN_192;
 717        else if (ctx->aes.key_length == 32)
 718                cfg |= CESA_SA_DESC_CFG_AES_LEN_256;
 719
 720        mv_cesa_update_op_cfg(tmpl, cfg,
 721                              CESA_SA_DESC_CFG_CRYPTM_MSK |
 722                              CESA_SA_DESC_CFG_AES_LEN_MSK);
 723
 724        return mv_cesa_skcipher_queue_req(req, tmpl);
 725}
 726
 727static int mv_cesa_ecb_aes_encrypt(struct skcipher_request *req)
 728{
 729        struct mv_cesa_op_ctx tmpl;
 730
 731        mv_cesa_set_op_cfg(&tmpl,
 732                           CESA_SA_DESC_CFG_CRYPTCM_ECB |
 733                           CESA_SA_DESC_CFG_DIR_ENC);
 734
 735        return mv_cesa_aes_op(req, &tmpl);
 736}
 737
 738static int mv_cesa_ecb_aes_decrypt(struct skcipher_request *req)
 739{
 740        struct mv_cesa_op_ctx tmpl;
 741
 742        mv_cesa_set_op_cfg(&tmpl,
 743                           CESA_SA_DESC_CFG_CRYPTCM_ECB |
 744                           CESA_SA_DESC_CFG_DIR_DEC);
 745
 746        return mv_cesa_aes_op(req, &tmpl);
 747}
 748
 749struct skcipher_alg mv_cesa_ecb_aes_alg = {
 750        .setkey = mv_cesa_aes_setkey,
 751        .encrypt = mv_cesa_ecb_aes_encrypt,
 752        .decrypt = mv_cesa_ecb_aes_decrypt,
 753        .min_keysize = AES_MIN_KEY_SIZE,
 754        .max_keysize = AES_MAX_KEY_SIZE,
 755        .base = {
 756                .cra_name = "ecb(aes)",
 757                .cra_driver_name = "mv-ecb-aes",
 758                .cra_priority = 300,
 759                .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
 760                             CRYPTO_ALG_ALLOCATES_MEMORY,
 761                .cra_blocksize = AES_BLOCK_SIZE,
 762                .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
 763                .cra_alignmask = 0,
 764                .cra_module = THIS_MODULE,
 765                .cra_init = mv_cesa_skcipher_cra_init,
 766                .cra_exit = mv_cesa_skcipher_cra_exit,
 767        },
 768};
 769
 770static int mv_cesa_cbc_aes_op(struct skcipher_request *req,
 771                              struct mv_cesa_op_ctx *tmpl)
 772{
 773        mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
 774                              CESA_SA_DESC_CFG_CRYPTCM_MSK);
 775        memcpy(tmpl->ctx.skcipher.iv, req->iv, AES_BLOCK_SIZE);
 776
 777        return mv_cesa_aes_op(req, tmpl);
 778}
 779
 780static int mv_cesa_cbc_aes_encrypt(struct skcipher_request *req)
 781{
 782        struct mv_cesa_op_ctx tmpl;
 783
 784        mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
 785
 786        return mv_cesa_cbc_aes_op(req, &tmpl);
 787}
 788
 789static int mv_cesa_cbc_aes_decrypt(struct skcipher_request *req)
 790{
 791        struct mv_cesa_op_ctx tmpl;
 792
 793        mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
 794
 795        return mv_cesa_cbc_aes_op(req, &tmpl);
 796}
 797
 798struct skcipher_alg mv_cesa_cbc_aes_alg = {
 799        .setkey = mv_cesa_aes_setkey,
 800        .encrypt = mv_cesa_cbc_aes_encrypt,
 801        .decrypt = mv_cesa_cbc_aes_decrypt,
 802        .min_keysize = AES_MIN_KEY_SIZE,
 803        .max_keysize = AES_MAX_KEY_SIZE,
 804        .ivsize = AES_BLOCK_SIZE,
 805        .base = {
 806                .cra_name = "cbc(aes)",
 807                .cra_driver_name = "mv-cbc-aes",
 808                .cra_priority = 300,
 809                .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
 810                             CRYPTO_ALG_ALLOCATES_MEMORY,
 811                .cra_blocksize = AES_BLOCK_SIZE,
 812                .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
 813                .cra_alignmask = 0,
 814                .cra_module = THIS_MODULE,
 815                .cra_init = mv_cesa_skcipher_cra_init,
 816                .cra_exit = mv_cesa_skcipher_cra_exit,
 817        },
 818};
 819