linux/drivers/crypto/xilinx/zynqmp-rsa.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Copyright (C) 2017 Xilinx, Inc.
   4 */
   5
   6#include <linux/module.h>
   7#include <linux/kernel.h>
   8#include <linux/crypto.h>
   9#include <linux/spinlock.h>
  10#include <crypto/algapi.h>
  11#include <crypto/aes.h>
  12#include <crypto/internal/skcipher.h>
  13#include <linux/io.h>
  14#include <linux/device.h>
  15#include <linux/of_device.h>
  16#include <linux/platform_device.h>
  17#include <linux/dma-mapping.h>
  18#include <linux/scatterlist.h>
  19#include <crypto/scatterwalk.h>
  20#include <linux/firmware/xlnx-zynqmp.h>
  21
  22#define ZYNQMP_RSA_QUEUE_LENGTH 1
  23#define ZYNQMP_RSA_MAX_KEY_SIZE 1024
  24#define ZYNQMP_RSA_BLOCKSIZE    64
  25
  26static struct zynqmp_rsa_dev *rsa_dd;
  27
  28struct zynqmp_rsa_op {
  29        struct zynqmp_rsa_dev    *dd;
  30        void *src;
  31        void *dst;
  32        int len;
  33        u8 key[ZYNQMP_RSA_MAX_KEY_SIZE];
  34        u8 *iv;
  35        u32 keylen;
  36};
  37
  38struct zynqmp_rsa_dev {
  39        struct list_head        list;
  40        struct device           *dev;
  41        /* the lock protects queue and dev list*/
  42        spinlock_t              lock;
  43        struct crypto_queue     queue;
  44};
  45
  46struct zynqmp_rsa_drv {
  47        struct list_head        dev_list;
  48        /* the lock protects queue and dev list*/
  49        spinlock_t              lock;
  50};
  51
  52static struct zynqmp_rsa_drv zynqmp_rsa = {
  53        .dev_list = LIST_HEAD_INIT(zynqmp_rsa.dev_list),
  54        .lock = __SPIN_LOCK_UNLOCKED(zynqmp_rsa.lock),
  55};
  56
  57static struct zynqmp_rsa_dev *zynqmp_rsa_find_dev(struct zynqmp_rsa_op *ctx)
  58{
  59        struct zynqmp_rsa_dev *dd = rsa_dd;
  60
  61        spin_lock_bh(&zynqmp_rsa.lock);
  62        if (!ctx->dd)
  63                ctx->dd = dd;
  64        else
  65                dd = ctx->dd;
  66        spin_unlock_bh(&zynqmp_rsa.lock);
  67
  68        return dd;
  69}
  70
  71static int zynqmp_setkey_blk(struct crypto_skcipher *tfm, const u8 *key,
  72                             unsigned int len)
  73{
  74        struct zynqmp_rsa_op *op = crypto_skcipher_ctx(tfm);
  75
  76        op->keylen = len;
  77        memcpy(op->key, key, len);
  78        return 0;
  79}
  80
  81static int zynqmp_rsa_xcrypt(struct skcipher_request *req, unsigned int flags)
  82{
  83        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  84        struct zynqmp_rsa_op *op = crypto_skcipher_ctx(tfm);
  85        struct zynqmp_rsa_dev *dd = zynqmp_rsa_find_dev(op);
  86        int err, datasize, src_data = 0, dst_data = 0;
  87        struct skcipher_walk walk = {0};
  88        unsigned int nbytes;
  89        char *kbuf;
  90        size_t dma_size;
  91        dma_addr_t dma_addr;
  92
  93        nbytes = req->cryptlen;
  94        dma_size = nbytes + op->keylen;
  95        kbuf = dma_alloc_coherent(dd->dev, dma_size, &dma_addr, GFP_KERNEL);
  96        if (!kbuf)
  97                return -ENOMEM;
  98
  99        err = skcipher_walk_virt(&walk, req, false);
 100        if (err)
 101                goto out;
 102
 103        while ((datasize = walk.nbytes)) {
 104                op->src = walk.src.virt.addr;
 105                memcpy(kbuf + src_data, op->src, datasize);
 106                src_data = src_data + datasize;
 107                err = skcipher_walk_done(&walk, 0);
 108                if (err)
 109                        goto out;
 110        }
 111        memcpy(kbuf + nbytes, op->key, op->keylen);
 112        zynqmp_pm_rsa(dma_addr, nbytes, flags);
 113
 114        err = skcipher_walk_virt(&walk, req, false);
 115        if (err)
 116                goto out;
 117
 118        while ((datasize = walk.nbytes)) {
 119                memcpy(walk.dst.virt.addr, kbuf + dst_data, datasize);
 120                dst_data = dst_data + datasize;
 121                err = skcipher_walk_done(&walk, 0);
 122        }
 123
 124out:
 125        dma_free_coherent(dd->dev, dma_size, kbuf, dma_addr);
 126        return err;
 127}
 128
 129static int zynqmp_rsa_decrypt(struct skcipher_request *req)
 130{
 131        return zynqmp_rsa_xcrypt(req, 0);
 132}
 133
 134static int zynqmp_rsa_encrypt(struct skcipher_request *req)
 135{
 136        return zynqmp_rsa_xcrypt(req, 1);
 137}
 138
 139static struct skcipher_alg zynqmp_alg = {
 140        .base.cra_name          =       "xilinx-zynqmp-rsa",
 141        .base.cra_driver_name   =       "zynqmp-rsa",
 142        .base.cra_priority      =       400,
 143        .base.cra_flags         =       CRYPTO_ALG_TYPE_SKCIPHER |
 144                                        CRYPTO_ALG_KERN_DRIVER_ONLY,
 145        .base.cra_blocksize     =       ZYNQMP_RSA_BLOCKSIZE,
 146        .base.cra_ctxsize       =       sizeof(struct zynqmp_rsa_op),
 147        .base.cra_alignmask     =       15,
 148        .base.cra_module        =       THIS_MODULE,
 149        .min_keysize            =       0,
 150        .max_keysize            =       ZYNQMP_RSA_MAX_KEY_SIZE,
 151        .setkey                 =       zynqmp_setkey_blk,
 152        .encrypt                =       zynqmp_rsa_encrypt,
 153        .decrypt                =       zynqmp_rsa_decrypt,
 154        .ivsize                 =       1,
 155};
 156
 157static const struct of_device_id zynqmp_rsa_dt_ids[] = {
 158        { .compatible = "xlnx,zynqmp-rsa" },
 159        { /* sentinel */ }
 160};
 161
 162MODULE_DEVICE_TABLE(of, zynqmp_rsa_dt_ids);
 163
 164static int zynqmp_rsa_probe(struct platform_device *pdev)
 165{
 166        struct device *dev = &pdev->dev;
 167        int ret;
 168
 169        rsa_dd = devm_kzalloc(&pdev->dev, sizeof(*rsa_dd), GFP_KERNEL);
 170        if (!rsa_dd)
 171                return -ENOMEM;
 172
 173        rsa_dd->dev = dev;
 174        platform_set_drvdata(pdev, rsa_dd);
 175
 176        ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
 177        if (ret < 0)
 178                dev_err(dev, "no usable DMA configuration");
 179
 180        INIT_LIST_HEAD(&rsa_dd->list);
 181        spin_lock_init(&rsa_dd->lock);
 182        crypto_init_queue(&rsa_dd->queue, ZYNQMP_RSA_QUEUE_LENGTH);
 183        spin_lock(&zynqmp_rsa.lock);
 184        list_add_tail(&rsa_dd->list, &zynqmp_rsa.dev_list);
 185        spin_unlock(&zynqmp_rsa.lock);
 186
 187        ret = crypto_register_skcipher(&zynqmp_alg);
 188        if (ret)
 189                goto err_algs;
 190
 191        return 0;
 192
 193err_algs:
 194        spin_lock(&zynqmp_rsa.lock);
 195        list_del(&rsa_dd->list);
 196        spin_unlock(&zynqmp_rsa.lock);
 197        dev_err(dev, "initialization failed.\n");
 198        return ret;
 199}
 200
 201static int zynqmp_rsa_remove(struct platform_device *pdev)
 202{
 203        crypto_unregister_skcipher(&zynqmp_alg);
 204        return 0;
 205}
 206
 207static struct platform_driver xilinx_rsa_driver = {
 208        .probe = zynqmp_rsa_probe,
 209        .remove = zynqmp_rsa_remove,
 210        .driver = {
 211                .name = "zynqmp_rsa",
 212                .of_match_table = of_match_ptr(zynqmp_rsa_dt_ids),
 213        },
 214};
 215
 216module_platform_driver(xilinx_rsa_driver);
 217
 218MODULE_DESCRIPTION("ZynqMP RSA hw acceleration support.");
 219MODULE_LICENSE("GPL");
 220MODULE_AUTHOR("Nava kishore Manne <navam@xilinx.com>");
 221