linux/drivers/crypto/marvell/cesa.c
<<
>>
Prefs
   1/*
   2 * Support for Marvell's Cryptographic Engine and Security Accelerator (CESA)
   3 * that can be found on the following platform: Orion, Kirkwood, Armada. This
   4 * driver supports the TDMA engine on platforms on which it is available.
   5 *
   6 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
   7 * Author: Arnaud Ebalard <arno@natisbad.org>
   8 *
   9 * This work is based on an initial version written by
  10 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
  11 *
  12 * This program is free software; you can redistribute it and/or modify it
  13 * under the terms of the GNU General Public License version 2 as published
  14 * by the Free Software Foundation.
  15 */
  16
  17#include <linux/delay.h>
  18#include <linux/genalloc.h>
  19#include <linux/interrupt.h>
  20#include <linux/io.h>
  21#include <linux/kthread.h>
  22#include <linux/mbus.h>
  23#include <linux/platform_device.h>
  24#include <linux/scatterlist.h>
  25#include <linux/slab.h>
  26#include <linux/module.h>
  27#include <linux/clk.h>
  28#include <linux/of.h>
  29#include <linux/of_platform.h>
  30#include <linux/of_irq.h>
  31
  32#include "cesa.h"
  33
  34/* Limit of the crypto queue before reaching the backlog */
  35#define CESA_CRYPTO_DEFAULT_MAX_QLEN 128
  36
  37static int allhwsupport = !IS_ENABLED(CONFIG_CRYPTO_DEV_MV_CESA);
  38module_param_named(allhwsupport, allhwsupport, int, 0444);
  39MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if overlaps with the mv_cesa driver)");
  40
  41struct mv_cesa_dev *cesa_dev;
  42
  43struct crypto_async_request *
  44mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine,
  45                           struct crypto_async_request **backlog)
  46{
  47        struct crypto_async_request *req;
  48
  49        *backlog = crypto_get_backlog(&engine->queue);
  50        req = crypto_dequeue_request(&engine->queue);
  51
  52        if (!req)
  53                return NULL;
  54
  55        return req;
  56}
  57
  58static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine)
  59{
  60        struct crypto_async_request *req = NULL, *backlog = NULL;
  61        struct mv_cesa_ctx *ctx;
  62
  63
  64        spin_lock_bh(&engine->lock);
  65        if (!engine->req) {
  66                req = mv_cesa_dequeue_req_locked(engine, &backlog);
  67                engine->req = req;
  68        }
  69        spin_unlock_bh(&engine->lock);
  70
  71        if (!req)
  72                return;
  73
  74        if (backlog)
  75                backlog->complete(backlog, -EINPROGRESS);
  76
  77        ctx = crypto_tfm_ctx(req->tfm);
  78        ctx->ops->step(req);
  79
  80        return;
  81}
  82
  83static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status)
  84{
  85        struct crypto_async_request *req;
  86        struct mv_cesa_ctx *ctx;
  87        int res;
  88
  89        req = engine->req;
  90        ctx = crypto_tfm_ctx(req->tfm);
  91        res = ctx->ops->process(req, status);
  92
  93        if (res == 0) {
  94                ctx->ops->complete(req);
  95                mv_cesa_engine_enqueue_complete_request(engine, req);
  96        } else if (res == -EINPROGRESS) {
  97                ctx->ops->step(req);
  98        }
  99
 100        return res;
 101}
 102
 103static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status)
 104{
 105        if (engine->chain.first && engine->chain.last)
 106                return mv_cesa_tdma_process(engine, status);
 107
 108        return mv_cesa_std_process(engine, status);
 109}
 110
 111static inline void
 112mv_cesa_complete_req(struct mv_cesa_ctx *ctx, struct crypto_async_request *req,
 113                     int res)
 114{
 115        ctx->ops->cleanup(req);
 116        local_bh_disable();
 117        req->complete(req, res);
 118        local_bh_enable();
 119}
 120
 121static irqreturn_t mv_cesa_int(int irq, void *priv)
 122{
 123        struct mv_cesa_engine *engine = priv;
 124        struct crypto_async_request *req;
 125        struct mv_cesa_ctx *ctx;
 126        u32 status, mask;
 127        irqreturn_t ret = IRQ_NONE;
 128
 129        while (true) {
 130                int res;
 131
 132                mask = mv_cesa_get_int_mask(engine);
 133                status = readl(engine->regs + CESA_SA_INT_STATUS);
 134
 135                if (!(status & mask))
 136                        break;
 137
 138                /*
 139                 * TODO: avoid clearing the FPGA_INT_STATUS if this not
 140                 * relevant on some platforms.
 141                 */
 142                writel(~status, engine->regs + CESA_SA_FPGA_INT_STATUS);
 143                writel(~status, engine->regs + CESA_SA_INT_STATUS);
 144
 145                /* Process fetched requests */
 146                res = mv_cesa_int_process(engine, status & mask);
 147                ret = IRQ_HANDLED;
 148
 149                spin_lock_bh(&engine->lock);
 150                req = engine->req;
 151                if (res != -EINPROGRESS)
 152                        engine->req = NULL;
 153                spin_unlock_bh(&engine->lock);
 154
 155                ctx = crypto_tfm_ctx(req->tfm);
 156
 157                if (res && res != -EINPROGRESS)
 158                        mv_cesa_complete_req(ctx, req, res);
 159
 160                /* Launch the next pending request */
 161                mv_cesa_rearm_engine(engine);
 162
 163                /* Iterate over the complete queue */
 164                while (true) {
 165                        req = mv_cesa_engine_dequeue_complete_request(engine);
 166                        if (!req)
 167                                break;
 168
 169                        ctx = crypto_tfm_ctx(req->tfm);
 170                        mv_cesa_complete_req(ctx, req, 0);
 171                }
 172        }
 173
 174        return ret;
 175}
 176
 177int mv_cesa_queue_req(struct crypto_async_request *req,
 178                      struct mv_cesa_req *creq)
 179{
 180        int ret;
 181        struct mv_cesa_engine *engine = creq->engine;
 182
 183        spin_lock_bh(&engine->lock);
 184        ret = crypto_enqueue_request(&engine->queue, req);
 185        if ((mv_cesa_req_get_type(creq) == CESA_DMA_REQ) &&
 186            (ret == -EINPROGRESS ||
 187            (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
 188                mv_cesa_tdma_chain(engine, creq);
 189        spin_unlock_bh(&engine->lock);
 190
 191        if (ret != -EINPROGRESS)
 192                return ret;
 193
 194        mv_cesa_rearm_engine(engine);
 195
 196        return -EINPROGRESS;
 197}
 198
 199static int mv_cesa_add_algs(struct mv_cesa_dev *cesa)
 200{
 201        int ret;
 202        int i, j;
 203
 204        for (i = 0; i < cesa->caps->ncipher_algs; i++) {
 205                ret = crypto_register_alg(cesa->caps->cipher_algs[i]);
 206                if (ret)
 207                        goto err_unregister_crypto;
 208        }
 209
 210        for (i = 0; i < cesa->caps->nahash_algs; i++) {
 211                ret = crypto_register_ahash(cesa->caps->ahash_algs[i]);
 212                if (ret)
 213                        goto err_unregister_ahash;
 214        }
 215
 216        return 0;
 217
 218err_unregister_ahash:
 219        for (j = 0; j < i; j++)
 220                crypto_unregister_ahash(cesa->caps->ahash_algs[j]);
 221        i = cesa->caps->ncipher_algs;
 222
 223err_unregister_crypto:
 224        for (j = 0; j < i; j++)
 225                crypto_unregister_alg(cesa->caps->cipher_algs[j]);
 226
 227        return ret;
 228}
 229
 230static void mv_cesa_remove_algs(struct mv_cesa_dev *cesa)
 231{
 232        int i;
 233
 234        for (i = 0; i < cesa->caps->nahash_algs; i++)
 235                crypto_unregister_ahash(cesa->caps->ahash_algs[i]);
 236
 237        for (i = 0; i < cesa->caps->ncipher_algs; i++)
 238                crypto_unregister_alg(cesa->caps->cipher_algs[i]);
 239}
 240
 241static struct crypto_alg *orion_cipher_algs[] = {
 242        &mv_cesa_ecb_des_alg,
 243        &mv_cesa_cbc_des_alg,
 244        &mv_cesa_ecb_des3_ede_alg,
 245        &mv_cesa_cbc_des3_ede_alg,
 246        &mv_cesa_ecb_aes_alg,
 247        &mv_cesa_cbc_aes_alg,
 248};
 249
 250static struct ahash_alg *orion_ahash_algs[] = {
 251        &mv_md5_alg,
 252        &mv_sha1_alg,
 253        &mv_ahmac_md5_alg,
 254        &mv_ahmac_sha1_alg,
 255};
 256
 257static struct crypto_alg *armada_370_cipher_algs[] = {
 258        &mv_cesa_ecb_des_alg,
 259        &mv_cesa_cbc_des_alg,
 260        &mv_cesa_ecb_des3_ede_alg,
 261        &mv_cesa_cbc_des3_ede_alg,
 262        &mv_cesa_ecb_aes_alg,
 263        &mv_cesa_cbc_aes_alg,
 264};
 265
 266static struct ahash_alg *armada_370_ahash_algs[] = {
 267        &mv_md5_alg,
 268        &mv_sha1_alg,
 269        &mv_sha256_alg,
 270        &mv_ahmac_md5_alg,
 271        &mv_ahmac_sha1_alg,
 272        &mv_ahmac_sha256_alg,
 273};
 274
 275static const struct mv_cesa_caps orion_caps = {
 276        .nengines = 1,
 277        .cipher_algs = orion_cipher_algs,
 278        .ncipher_algs = ARRAY_SIZE(orion_cipher_algs),
 279        .ahash_algs = orion_ahash_algs,
 280        .nahash_algs = ARRAY_SIZE(orion_ahash_algs),
 281        .has_tdma = false,
 282};
 283
 284static const struct mv_cesa_caps kirkwood_caps = {
 285        .nengines = 1,
 286        .cipher_algs = orion_cipher_algs,
 287        .ncipher_algs = ARRAY_SIZE(orion_cipher_algs),
 288        .ahash_algs = orion_ahash_algs,
 289        .nahash_algs = ARRAY_SIZE(orion_ahash_algs),
 290        .has_tdma = true,
 291};
 292
 293static const struct mv_cesa_caps armada_370_caps = {
 294        .nengines = 1,
 295        .cipher_algs = armada_370_cipher_algs,
 296        .ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs),
 297        .ahash_algs = armada_370_ahash_algs,
 298        .nahash_algs = ARRAY_SIZE(armada_370_ahash_algs),
 299        .has_tdma = true,
 300};
 301
 302static const struct mv_cesa_caps armada_xp_caps = {
 303        .nengines = 2,
 304        .cipher_algs = armada_370_cipher_algs,
 305        .ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs),
 306        .ahash_algs = armada_370_ahash_algs,
 307        .nahash_algs = ARRAY_SIZE(armada_370_ahash_algs),
 308        .has_tdma = true,
 309};
 310
 311static const struct of_device_id mv_cesa_of_match_table[] = {
 312        { .compatible = "marvell,orion-crypto", .data = &orion_caps },
 313        { .compatible = "marvell,kirkwood-crypto", .data = &kirkwood_caps },
 314        { .compatible = "marvell,dove-crypto", .data = &kirkwood_caps },
 315        { .compatible = "marvell,armada-370-crypto", .data = &armada_370_caps },
 316        { .compatible = "marvell,armada-xp-crypto", .data = &armada_xp_caps },
 317        { .compatible = "marvell,armada-375-crypto", .data = &armada_xp_caps },
 318        { .compatible = "marvell,armada-38x-crypto", .data = &armada_xp_caps },
 319        {}
 320};
 321MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table);
 322
 323static void
 324mv_cesa_conf_mbus_windows(struct mv_cesa_engine *engine,
 325                          const struct mbus_dram_target_info *dram)
 326{
 327        void __iomem *iobase = engine->regs;
 328        int i;
 329
 330        for (i = 0; i < 4; i++) {
 331                writel(0, iobase + CESA_TDMA_WINDOW_CTRL(i));
 332                writel(0, iobase + CESA_TDMA_WINDOW_BASE(i));
 333        }
 334
 335        for (i = 0; i < dram->num_cs; i++) {
 336                const struct mbus_dram_window *cs = dram->cs + i;
 337
 338                writel(((cs->size - 1) & 0xffff0000) |
 339                       (cs->mbus_attr << 8) |
 340                       (dram->mbus_dram_target_id << 4) | 1,
 341                       iobase + CESA_TDMA_WINDOW_CTRL(i));
 342                writel(cs->base, iobase + CESA_TDMA_WINDOW_BASE(i));
 343        }
 344}
 345
 346static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
 347{
 348        struct device *dev = cesa->dev;
 349        struct mv_cesa_dev_dma *dma;
 350
 351        if (!cesa->caps->has_tdma)
 352                return 0;
 353
 354        dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
 355        if (!dma)
 356                return -ENOMEM;
 357
 358        dma->tdma_desc_pool = dmam_pool_create("tdma_desc", dev,
 359                                        sizeof(struct mv_cesa_tdma_desc),
 360                                        16, 0);
 361        if (!dma->tdma_desc_pool)
 362                return -ENOMEM;
 363
 364        dma->op_pool = dmam_pool_create("cesa_op", dev,
 365                                        sizeof(struct mv_cesa_op_ctx), 16, 0);
 366        if (!dma->op_pool)
 367                return -ENOMEM;
 368
 369        dma->cache_pool = dmam_pool_create("cesa_cache", dev,
 370                                           CESA_MAX_HASH_BLOCK_SIZE, 1, 0);
 371        if (!dma->cache_pool)
 372                return -ENOMEM;
 373
 374        dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0);
 375        if (!dma->padding_pool)
 376                return -ENOMEM;
 377
 378        dma->iv_pool = dmam_pool_create("cesa_iv", dev, 16, 1, 0);
 379        if (!dma->iv_pool)
 380                return -ENOMEM;
 381
 382        cesa->dma = dma;
 383
 384        return 0;
 385}
 386
 387static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
 388{
 389        struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
 390        struct mv_cesa_engine *engine = &cesa->engines[idx];
 391        const char *res_name = "sram";
 392        struct resource *res;
 393
 394        engine->pool = of_gen_pool_get(cesa->dev->of_node,
 395                                       "marvell,crypto-srams", idx);
 396        if (engine->pool) {
 397                engine->sram = gen_pool_dma_alloc(engine->pool,
 398                                                  cesa->sram_size,
 399                                                  &engine->sram_dma);
 400                if (engine->sram)
 401                        return 0;
 402
 403                engine->pool = NULL;
 404                return -ENOMEM;
 405        }
 406
 407        if (cesa->caps->nengines > 1) {
 408                if (!idx)
 409                        res_name = "sram0";
 410                else
 411                        res_name = "sram1";
 412        }
 413
 414        res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 415                                           res_name);
 416        if (!res || resource_size(res) < cesa->sram_size)
 417                return -EINVAL;
 418
 419        engine->sram = devm_ioremap_resource(cesa->dev, res);
 420        if (IS_ERR(engine->sram))
 421                return PTR_ERR(engine->sram);
 422
 423        engine->sram_dma = phys_to_dma(cesa->dev,
 424                                       (phys_addr_t)res->start);
 425
 426        return 0;
 427}
 428
 429static void mv_cesa_put_sram(struct platform_device *pdev, int idx)
 430{
 431        struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
 432        struct mv_cesa_engine *engine = &cesa->engines[idx];
 433
 434        if (!engine->pool)
 435                return;
 436
 437        gen_pool_free(engine->pool, (unsigned long)engine->sram,
 438                      cesa->sram_size);
 439}
 440
 441static int mv_cesa_probe(struct platform_device *pdev)
 442{
 443        const struct mv_cesa_caps *caps = &orion_caps;
 444        const struct mbus_dram_target_info *dram;
 445        const struct of_device_id *match;
 446        struct device *dev = &pdev->dev;
 447        struct mv_cesa_dev *cesa;
 448        struct mv_cesa_engine *engines;
 449        struct resource *res;
 450        int irq, ret, i;
 451        u32 sram_size;
 452
 453        if (cesa_dev) {
 454                dev_err(&pdev->dev, "Only one CESA device authorized\n");
 455                return -EEXIST;
 456        }
 457
 458        if (dev->of_node) {
 459                match = of_match_node(mv_cesa_of_match_table, dev->of_node);
 460                if (!match || !match->data)
 461                        return -ENOTSUPP;
 462
 463                caps = match->data;
 464        }
 465
 466        if ((caps == &orion_caps || caps == &kirkwood_caps) && !allhwsupport)
 467                return -ENOTSUPP;
 468
 469        cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL);
 470        if (!cesa)
 471                return -ENOMEM;
 472
 473        cesa->caps = caps;
 474        cesa->dev = dev;
 475
 476        sram_size = CESA_SA_DEFAULT_SRAM_SIZE;
 477        of_property_read_u32(cesa->dev->of_node, "marvell,crypto-sram-size",
 478                             &sram_size);
 479        if (sram_size < CESA_SA_MIN_SRAM_SIZE)
 480                sram_size = CESA_SA_MIN_SRAM_SIZE;
 481
 482        cesa->sram_size = sram_size;
 483        cesa->engines = devm_kzalloc(dev, caps->nengines * sizeof(*engines),
 484                                     GFP_KERNEL);
 485        if (!cesa->engines)
 486                return -ENOMEM;
 487
 488        spin_lock_init(&cesa->lock);
 489
 490        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
 491        cesa->regs = devm_ioremap_resource(dev, res);
 492        if (IS_ERR(cesa->regs))
 493                return PTR_ERR(cesa->regs);
 494
 495        ret = mv_cesa_dev_dma_init(cesa);
 496        if (ret)
 497                return ret;
 498
 499        dram = mv_mbus_dram_info_nooverlap();
 500
 501        platform_set_drvdata(pdev, cesa);
 502
 503        for (i = 0; i < caps->nengines; i++) {
 504                struct mv_cesa_engine *engine = &cesa->engines[i];
 505                char res_name[7];
 506
 507                engine->id = i;
 508                spin_lock_init(&engine->lock);
 509
 510                ret = mv_cesa_get_sram(pdev, i);
 511                if (ret)
 512                        goto err_cleanup;
 513
 514                irq = platform_get_irq(pdev, i);
 515                if (irq < 0) {
 516                        ret = irq;
 517                        goto err_cleanup;
 518                }
 519
 520                /*
 521                 * Not all platforms can gate the CESA clocks: do not complain
 522                 * if the clock does not exist.
 523                 */
 524                snprintf(res_name, sizeof(res_name), "cesa%d", i);
 525                engine->clk = devm_clk_get(dev, res_name);
 526                if (IS_ERR(engine->clk)) {
 527                        engine->clk = devm_clk_get(dev, NULL);
 528                        if (IS_ERR(engine->clk))
 529                                engine->clk = NULL;
 530                }
 531
 532                snprintf(res_name, sizeof(res_name), "cesaz%d", i);
 533                engine->zclk = devm_clk_get(dev, res_name);
 534                if (IS_ERR(engine->zclk))
 535                        engine->zclk = NULL;
 536
 537                ret = clk_prepare_enable(engine->clk);
 538                if (ret)
 539                        goto err_cleanup;
 540
 541                ret = clk_prepare_enable(engine->zclk);
 542                if (ret)
 543                        goto err_cleanup;
 544
 545                engine->regs = cesa->regs + CESA_ENGINE_OFF(i);
 546
 547                if (dram && cesa->caps->has_tdma)
 548                        mv_cesa_conf_mbus_windows(engine, dram);
 549
 550                writel(0, engine->regs + CESA_SA_INT_STATUS);
 551                writel(CESA_SA_CFG_STOP_DIG_ERR,
 552                       engine->regs + CESA_SA_CFG);
 553                writel(engine->sram_dma & CESA_SA_SRAM_MSK,
 554                       engine->regs + CESA_SA_DESC_P0);
 555
 556                ret = devm_request_threaded_irq(dev, irq, NULL, mv_cesa_int,
 557                                                IRQF_ONESHOT,
 558                                                dev_name(&pdev->dev),
 559                                                engine);
 560                if (ret)
 561                        goto err_cleanup;
 562
 563                crypto_init_queue(&engine->queue, CESA_CRYPTO_DEFAULT_MAX_QLEN);
 564                atomic_set(&engine->load, 0);
 565                INIT_LIST_HEAD(&engine->complete_queue);
 566        }
 567
 568        cesa_dev = cesa;
 569
 570        ret = mv_cesa_add_algs(cesa);
 571        if (ret) {
 572                cesa_dev = NULL;
 573                goto err_cleanup;
 574        }
 575
 576        dev_info(dev, "CESA device successfully registered\n");
 577
 578        return 0;
 579
 580err_cleanup:
 581        for (i = 0; i < caps->nengines; i++) {
 582                clk_disable_unprepare(cesa->engines[i].zclk);
 583                clk_disable_unprepare(cesa->engines[i].clk);
 584                mv_cesa_put_sram(pdev, i);
 585        }
 586
 587        return ret;
 588}
 589
 590static int mv_cesa_remove(struct platform_device *pdev)
 591{
 592        struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
 593        int i;
 594
 595        mv_cesa_remove_algs(cesa);
 596
 597        for (i = 0; i < cesa->caps->nengines; i++) {
 598                clk_disable_unprepare(cesa->engines[i].zclk);
 599                clk_disable_unprepare(cesa->engines[i].clk);
 600                mv_cesa_put_sram(pdev, i);
 601        }
 602
 603        return 0;
 604}
 605
 606static struct platform_driver marvell_cesa = {
 607        .probe          = mv_cesa_probe,
 608        .remove         = mv_cesa_remove,
 609        .driver         = {
 610                .name   = "marvell-cesa",
 611                .of_match_table = mv_cesa_of_match_table,
 612        },
 613};
 614module_platform_driver(marvell_cesa);
 615
 616MODULE_ALIAS("platform:mv_crypto");
 617MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
 618MODULE_AUTHOR("Arnaud Ebalard <arno@natisbad.org>");
 619MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
 620MODULE_LICENSE("GPL v2");
 621