1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <asm/cacheflush.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/io.h>
18#include <linux/platform_device.h>
19#include <linux/device.h>
20#include <linux/init.h>
21#include <linux/scatterlist.h>
22#include <linux/dma-mapping.h>
23#include <linux/of_device.h>
24#include <linux/crypto.h>
25#include <linux/cryptohash.h>
26#include <crypto/scatterwalk.h>
27#include <crypto/algapi.h>
28#include <crypto/sha.h>
29#include <crypto/hash.h>
30#include <crypto/internal/hash.h>
31#include <linux/firmware/xilinx/zynqmp/firmware.h>
32
33#define ZYNQMP_SHA3_INIT 1
34#define ZYNQMP_SHA3_UPDATE 2
35#define ZYNQMP_SHA3_FINAL 4
36
37#define ZYNQMP_SHA_QUEUE_LENGTH 1
38
39struct zynqmp_sha_dev;
40
41
42
43
44
45struct zynqmp_sha_reqctx {
46 struct zynqmp_sha_dev *dd;
47 unsigned long flags;
48};
49
50struct zynqmp_sha_ctx {
51 struct zynqmp_sha_dev *dd;
52 unsigned long flags;
53};
54
55struct zynqmp_sha_dev {
56 struct list_head list;
57 struct device *dev;
58
59 spinlock_t lock;
60 int err;
61
62 unsigned long flags;
63 struct crypto_queue queue;
64 struct ahash_request *req;
65};
66
67struct zynqmp_sha_drv {
68 struct list_head dev_list;
69
70 spinlock_t lock;
71};
72
73static struct zynqmp_sha_drv zynqmp_sha = {
74 .dev_list = LIST_HEAD_INIT(zynqmp_sha.dev_list),
75 .lock = __SPIN_LOCK_UNLOCKED(zynqmp_sha.lock),
76};
77
78static int zynqmp_sha_init(struct ahash_request *req)
79{
80 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
81 struct zynqmp_sha_ctx *tctx = crypto_ahash_ctx(tfm);
82 struct zynqmp_sha_reqctx *ctx = ahash_request_ctx(req);
83 const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
84 struct zynqmp_sha_dev *dd = NULL;
85 struct zynqmp_sha_dev *tmp;
86 int ret;
87
88 if (!eemi_ops || !eemi_ops->sha_hash)
89 return -ENOTSUPP;
90
91 spin_lock_bh(&zynqmp_sha.lock);
92 if (!tctx->dd) {
93 list_for_each_entry(tmp, &zynqmp_sha.dev_list, list) {
94 dd = tmp;
95 break;
96 }
97 tctx->dd = dd;
98 } else {
99 dd = tctx->dd;
100 }
101 spin_unlock_bh(&zynqmp_sha.lock);
102
103 ctx->dd = dd;
104 dev_dbg(dd->dev, "init: digest size: %d\n",
105 crypto_ahash_digestsize(tfm));
106
107 ret = eemi_ops->sha_hash(0, 0, ZYNQMP_SHA3_INIT);
108
109 return ret;
110}
111
112static int zynqmp_sha_update(struct ahash_request *req)
113{
114 struct zynqmp_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
115 struct zynqmp_sha_dev *dd = tctx->dd;
116 const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
117 char *kbuf;
118 size_t dma_size = req->nbytes;
119 dma_addr_t dma_addr;
120 int ret;
121
122 if (!req->nbytes)
123 return 0;
124
125 if (!eemi_ops || !eemi_ops->sha_hash)
126 return -ENOTSUPP;
127
128 kbuf = dma_alloc_coherent(dd->dev, dma_size, &dma_addr, GFP_KERNEL);
129 if (!kbuf)
130 return -ENOMEM;
131
132 scatterwalk_map_and_copy(kbuf, req->src, 0, req->nbytes, 0);
133 __flush_cache_user_range((unsigned long)kbuf,
134 (unsigned long)kbuf + dma_size);
135 ret = eemi_ops->sha_hash(dma_addr, req->nbytes, ZYNQMP_SHA3_UPDATE);
136 dma_free_coherent(dd->dev, dma_size, kbuf, dma_addr);
137
138 return ret;
139}
140
141static int zynqmp_sha_final(struct ahash_request *req)
142{
143 struct zynqmp_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
144 struct zynqmp_sha_dev *dd = tctx->dd;
145 const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
146 char *kbuf;
147 size_t dma_size = SHA384_DIGEST_SIZE;
148 dma_addr_t dma_addr;
149 int ret;
150
151 if (!eemi_ops || !eemi_ops->sha_hash)
152 return -ENOTSUPP;
153
154 kbuf = dma_alloc_coherent(dd->dev, dma_size, &dma_addr, GFP_KERNEL);
155 if (!kbuf)
156 return -ENOMEM;
157
158 ret = eemi_ops->sha_hash(dma_addr, dma_size, ZYNQMP_SHA3_FINAL);
159 memcpy(req->result, kbuf, 48);
160 dma_free_coherent(dd->dev, dma_size, kbuf, dma_addr);
161
162 return ret;
163}
164
165static int zynqmp_sha_finup(struct ahash_request *req)
166{
167 zynqmp_sha_update(req);
168 zynqmp_sha_final(req);
169
170 return 0;
171}
172
173static int zynqmp_sha_digest(struct ahash_request *req)
174{
175 zynqmp_sha_init(req);
176 zynqmp_sha_update(req);
177 zynqmp_sha_final(req);
178
179 return 0;
180}
181
182static int zynqmp_sha_export(struct ahash_request *req, void *out)
183{
184 const struct zynqmp_sha_reqctx *ctx = ahash_request_ctx(req);
185
186 memcpy(out, ctx, sizeof(*ctx));
187 return 0;
188}
189
190static int zynqmp_sha_import(struct ahash_request *req, const void *in)
191{
192 struct zynqmp_sha_reqctx *ctx = ahash_request_ctx(req);
193
194 memcpy(ctx, in, sizeof(*ctx));
195 return 0;
196}
197
198static int zynqmp_sha_cra_init(struct crypto_tfm *tfm)
199{
200 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
201 sizeof(struct zynqmp_sha_reqctx));
202
203 return 0;
204}
205
206static struct ahash_alg sha3_alg = {
207 .init = zynqmp_sha_init,
208 .update = zynqmp_sha_update,
209 .final = zynqmp_sha_final,
210 .finup = zynqmp_sha_finup,
211 .digest = zynqmp_sha_digest,
212 .export = zynqmp_sha_export,
213 .import = zynqmp_sha_import,
214 .halg = {
215 .digestsize = SHA384_DIGEST_SIZE,
216 .statesize = sizeof(struct sha256_state),
217 .base = {
218 .cra_name = "xilinx-keccak-384",
219 .cra_driver_name = "zynqmp-keccak-384",
220 .cra_priority = 300,
221 .cra_flags = CRYPTO_ALG_ASYNC,
222 .cra_blocksize = SHA384_BLOCK_SIZE,
223 .cra_ctxsize = sizeof(struct zynqmp_sha_ctx),
224 .cra_alignmask = 0,
225 .cra_module = THIS_MODULE,
226 .cra_init = zynqmp_sha_cra_init,
227 }
228 }
229};
230
231static const struct of_device_id zynqmp_sha_dt_ids[] = {
232 { .compatible = "xlnx,zynqmp-keccak-384" },
233 { }
234};
235
236MODULE_DEVICE_TABLE(of, zynqmp_sha_dt_ids);
237
238static int zynqmp_sha_probe(struct platform_device *pdev)
239{
240 struct zynqmp_sha_dev *sha_dd;
241 struct device *dev = &pdev->dev;
242 int err;
243
244 sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL);
245 if (!sha_dd)
246 return -ENOMEM;
247
248 sha_dd->dev = dev;
249 platform_set_drvdata(pdev, sha_dd);
250 INIT_LIST_HEAD(&sha_dd->list);
251 spin_lock_init(&sha_dd->lock);
252 crypto_init_queue(&sha_dd->queue, ZYNQMP_SHA_QUEUE_LENGTH);
253 spin_lock(&zynqmp_sha.lock);
254 list_add_tail(&sha_dd->list, &zynqmp_sha.dev_list);
255 spin_unlock(&zynqmp_sha.lock);
256
257 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
258 if (err < 0)
259 dev_err(dev, "no usable DMA configuration");
260
261 err = crypto_register_ahash(&sha3_alg);
262 if (err)
263 goto err_algs;
264
265 return 0;
266
267err_algs:
268 spin_lock(&zynqmp_sha.lock);
269 list_del(&sha_dd->list);
270 spin_unlock(&zynqmp_sha.lock);
271 dev_err(dev, "initialization failed.\n");
272
273 return err;
274}
275
276static int zynqmp_sha_remove(struct platform_device *pdev)
277{
278 static struct zynqmp_sha_dev *sha_dd;
279
280 sha_dd = platform_get_drvdata(pdev);
281
282 if (!sha_dd)
283 return -ENODEV;
284
285 spin_lock(&zynqmp_sha.lock);
286 list_del(&sha_dd->list);
287 spin_unlock(&zynqmp_sha.lock);
288
289 crypto_unregister_ahash(&sha3_alg);
290
291 return 0;
292}
293
294static struct platform_driver zynqmp_sha_driver = {
295 .probe = zynqmp_sha_probe,
296 .remove = zynqmp_sha_remove,
297 .driver = {
298 .name = "zynqmp-keccak-384",
299 .of_match_table = of_match_ptr(zynqmp_sha_dt_ids),
300 },
301};
302
303module_platform_driver(zynqmp_sha_driver);
304
305MODULE_DESCRIPTION("ZynqMP SHA3 hw acceleration support.");
306MODULE_LICENSE("GPL");
307MODULE_AUTHOR("Nava kishore Manne <navam@xilinx.com>");
308