linux/drivers/crypto/xilinx/zynqmp-sha.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Copyright (C) 2017 Xilinx, Inc.
   4 */
   5
   6#include <asm/cacheflush.h>
   7#include <linux/kernel.h>
   8#include <linux/module.h>
   9#include <linux/io.h>
  10#include <linux/platform_device.h>
  11#include <linux/device.h>
  12#include <linux/init.h>
  13#include <linux/scatterlist.h>
  14#include <linux/dma-mapping.h>
  15#include <linux/of_device.h>
  16#include <linux/crypto.h>
  17#include <crypto/scatterwalk.h>
  18#include <crypto/algapi.h>
  19#include <crypto/sha.h>
  20#include <crypto/hash.h>
  21#include <crypto/internal/hash.h>
  22#include <linux/firmware/xlnx-zynqmp.h>
  23#include <linux/mutex.h>
  24
  25#define ZYNQMP_SHA3_INIT        1
  26#define ZYNQMP_SHA3_UPDATE      2
  27#define ZYNQMP_SHA3_FINAL       4
  28
  29#define ZYNQMP_SHA_QUEUE_LENGTH 1
  30
  31static struct zynqmp_sha_dev *sha_dd;
  32
  33/*
  34 * .statesize = sizeof(struct zynqmp_sha_reqctx) must be <= PAGE_SIZE / 8 as
  35 * tested by the ahash_prepare_alg() function.
  36 */
  37struct zynqmp_sha_reqctx {
  38        struct zynqmp_sha_dev   *dd;
  39        unsigned long           flags;
  40};
  41
  42struct zynqmp_sha_ctx {
  43        struct zynqmp_sha_dev   *dd;
  44        unsigned long           flags;
  45};
  46
  47struct zynqmp_sha_dev {
  48        struct list_head        list;
  49        struct device           *dev;
  50        /* the lock protects queue and dev list*/
  51        spinlock_t              lock;
  52        int                     err;
  53
  54        unsigned long           flags;
  55        struct crypto_queue     queue;
  56        struct ahash_request    *req;
  57};
  58
  59struct zynqmp_sha_drv {
  60        struct list_head        dev_list;
  61        /* the lock protects queue and dev list*/
  62        spinlock_t              lock;
  63        /* the hw_engine_mutex makes the driver thread-safe */
  64        struct mutex            hw_engine_mutex;
  65};
  66
  67static struct zynqmp_sha_drv zynqmp_sha = {
  68        .dev_list = LIST_HEAD_INIT(zynqmp_sha.dev_list),
  69        .lock = __SPIN_LOCK_UNLOCKED(zynqmp_sha.lock),
  70};
  71
  72static int zynqmp_sha_init(struct ahash_request *req)
  73{
  74        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  75        struct zynqmp_sha_ctx *tctx = crypto_ahash_ctx(tfm);
  76        struct zynqmp_sha_reqctx *ctx = ahash_request_ctx(req);
  77        struct zynqmp_sha_dev *dd = sha_dd;
  78        int ret;
  79
  80        spin_lock_bh(&zynqmp_sha.lock);
  81        if (!tctx->dd)
  82                tctx->dd = dd;
  83        else
  84                dd = tctx->dd;
  85
  86        spin_unlock_bh(&zynqmp_sha.lock);
  87
  88        ctx->dd = dd;
  89        dev_dbg(dd->dev, "init: digest size: %d\n",
  90                crypto_ahash_digestsize(tfm));
  91
  92        ret = mutex_lock_interruptible(&zynqmp_sha.hw_engine_mutex);
  93        if (ret)
  94                goto end;
  95
  96        ret = zynqmp_pm_sha_hash(0, 0, ZYNQMP_SHA3_INIT);
  97
  98end:
  99        return ret;
 100}
 101
 102static int zynqmp_sha_update(struct ahash_request *req)
 103{
 104        struct zynqmp_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
 105        struct zynqmp_sha_dev *dd = tctx->dd;
 106        char *kbuf;
 107        size_t dma_size = req->nbytes;
 108        dma_addr_t dma_addr;
 109        int ret;
 110
 111        if (!req->nbytes)
 112                return 0;
 113
 114        kbuf = dma_alloc_coherent(dd->dev, dma_size, &dma_addr, GFP_KERNEL);
 115        if (!kbuf)
 116                return -ENOMEM;
 117
 118        scatterwalk_map_and_copy(kbuf, req->src, 0, req->nbytes, 0);
 119         __flush_cache_user_range((unsigned long)kbuf,
 120                                  (unsigned long)kbuf + dma_size);
 121        ret = zynqmp_pm_sha_hash(dma_addr, req->nbytes, ZYNQMP_SHA3_UPDATE);
 122        if (ret) {
 123                mutex_unlock(&zynqmp_sha.hw_engine_mutex);
 124                goto end;
 125        }
 126
 127        dma_free_coherent(dd->dev, dma_size, kbuf, dma_addr);
 128
 129end:
 130        return ret;
 131}
 132
 133static int zynqmp_sha_final(struct ahash_request *req)
 134{
 135        struct zynqmp_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
 136        struct zynqmp_sha_dev *dd = tctx->dd;
 137        char *kbuf;
 138        size_t dma_size = SHA384_DIGEST_SIZE;
 139        dma_addr_t dma_addr;
 140        int ret;
 141
 142        kbuf = dma_alloc_coherent(dd->dev, dma_size, &dma_addr, GFP_KERNEL);
 143        if (!kbuf)
 144                return -ENOMEM;
 145
 146        ret = zynqmp_pm_sha_hash(dma_addr, dma_size, ZYNQMP_SHA3_FINAL);
 147        memcpy(req->result, kbuf, 48);
 148        dma_free_coherent(dd->dev, dma_size, kbuf, dma_addr);
 149
 150        mutex_unlock(&zynqmp_sha.hw_engine_mutex);
 151        return ret;
 152}
 153
 154static int zynqmp_sha_finup(struct ahash_request *req)
 155{
 156        zynqmp_sha_update(req);
 157        zynqmp_sha_final(req);
 158
 159        return 0;
 160}
 161
 162static int zynqmp_sha_digest(struct ahash_request *req)
 163{
 164        zynqmp_sha_init(req);
 165        zynqmp_sha_update(req);
 166        zynqmp_sha_final(req);
 167
 168        return 0;
 169}
 170
 171static int zynqmp_sha_export(struct ahash_request *req, void *out)
 172{
 173        const struct zynqmp_sha_reqctx *ctx = ahash_request_ctx(req);
 174
 175        memcpy(out, ctx, sizeof(*ctx));
 176        return 0;
 177}
 178
 179static int zynqmp_sha_import(struct ahash_request *req, const void *in)
 180{
 181        struct zynqmp_sha_reqctx *ctx = ahash_request_ctx(req);
 182
 183        memcpy(ctx, in, sizeof(*ctx));
 184        return 0;
 185}
 186
 187static int zynqmp_sha_cra_init(struct crypto_tfm *tfm)
 188{
 189        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 190                                 sizeof(struct zynqmp_sha_reqctx));
 191
 192        return 0;
 193}
 194
 195static struct ahash_alg sha3_alg = {
 196        .init           = zynqmp_sha_init,
 197        .update         = zynqmp_sha_update,
 198        .final          = zynqmp_sha_final,
 199        .finup          = zynqmp_sha_finup,
 200        .digest         = zynqmp_sha_digest,
 201        .export         = zynqmp_sha_export,
 202        .import         = zynqmp_sha_import,
 203        .halg = {
 204                .digestsize     = SHA384_DIGEST_SIZE,
 205                .statesize      = sizeof(struct sha256_state),
 206                .base   = {
 207                        .cra_name               = "xilinx-keccak-384",
 208                        .cra_driver_name        = "zynqmp-keccak-384",
 209                        .cra_priority           = 300,
 210                        .cra_flags              = CRYPTO_ALG_ASYNC,
 211                        .cra_blocksize          = SHA384_BLOCK_SIZE,
 212                        .cra_ctxsize            = sizeof(struct zynqmp_sha_ctx),
 213                        .cra_alignmask          = 0,
 214                        .cra_module             = THIS_MODULE,
 215                        .cra_init               = zynqmp_sha_cra_init,
 216                }
 217        }
 218};
 219
 220static const struct of_device_id zynqmp_sha_dt_ids[] = {
 221        { .compatible = "xlnx,zynqmp-keccak-384" },
 222        { /* sentinel */ }
 223};
 224
 225MODULE_DEVICE_TABLE(of, zynqmp_sha_dt_ids);
 226
 227static int zynqmp_sha_probe(struct platform_device *pdev)
 228{
 229        struct device *dev = &pdev->dev;
 230        int err;
 231
 232        sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL);
 233        if (!sha_dd)
 234                return -ENOMEM;
 235
 236        sha_dd->dev = dev;
 237        platform_set_drvdata(pdev, sha_dd);
 238        INIT_LIST_HEAD(&sha_dd->list);
 239        spin_lock_init(&sha_dd->lock);
 240        mutex_init(&zynqmp_sha.hw_engine_mutex);
 241        crypto_init_queue(&sha_dd->queue, ZYNQMP_SHA_QUEUE_LENGTH);
 242        spin_lock(&zynqmp_sha.lock);
 243        list_add_tail(&sha_dd->list, &zynqmp_sha.dev_list);
 244        spin_unlock(&zynqmp_sha.lock);
 245
 246        err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
 247        if (err < 0)
 248                dev_err(dev, "no usable DMA configuration");
 249
 250        err = crypto_register_ahash(&sha3_alg);
 251        if (err)
 252                goto err_algs;
 253
 254        return 0;
 255
 256err_algs:
 257        spin_lock(&zynqmp_sha.lock);
 258        list_del(&sha_dd->list);
 259        spin_unlock(&zynqmp_sha.lock);
 260        dev_err(dev, "initialization failed.\n");
 261
 262        return err;
 263}
 264
 265static int zynqmp_sha_remove(struct platform_device *pdev)
 266{
 267        sha_dd = platform_get_drvdata(pdev);
 268
 269        if (!sha_dd)
 270                return -ENODEV;
 271
 272        spin_lock(&zynqmp_sha.lock);
 273        list_del(&sha_dd->list);
 274        spin_unlock(&zynqmp_sha.lock);
 275
 276        crypto_unregister_ahash(&sha3_alg);
 277
 278        return 0;
 279}
 280
 281static struct platform_driver zynqmp_sha_driver = {
 282        .probe          = zynqmp_sha_probe,
 283        .remove         = zynqmp_sha_remove,
 284        .driver         = {
 285                .name   = "zynqmp-keccak-384",
 286                .of_match_table = of_match_ptr(zynqmp_sha_dt_ids),
 287        },
 288};
 289
 290module_platform_driver(zynqmp_sha_driver);
 291
 292MODULE_DESCRIPTION("ZynqMP SHA3 hw acceleration support.");
 293MODULE_LICENSE("GPL");
 294MODULE_AUTHOR("Nava kishore Manne <navam@xilinx.com>");
 295