1
2
3#include <crypto/akcipher.h>
4#include <crypto/curve25519.h>
5#include <crypto/dh.h>
6#include <crypto/ecc_curve.h>
7#include <crypto/ecdh.h>
8#include <crypto/rng.h>
9#include <crypto/internal/akcipher.h>
10#include <crypto/internal/kpp.h>
11#include <crypto/internal/rsa.h>
12#include <crypto/kpp.h>
13#include <crypto/scatterwalk.h>
14#include <linux/dma-mapping.h>
15#include <linux/fips.h>
16#include <linux/module.h>
17#include <linux/time.h>
18#include "hpre.h"
19
20struct hpre_ctx;
21
22#define HPRE_CRYPTO_ALG_PRI 1000
23#define HPRE_ALIGN_SZ 64
24#define HPRE_BITS_2_BYTES_SHIFT 3
25#define HPRE_RSA_512BITS_KSZ 64
26#define HPRE_RSA_1536BITS_KSZ 192
27#define HPRE_CRT_PRMS 5
28#define HPRE_CRT_Q 2
29#define HPRE_CRT_P 3
30#define HPRE_CRT_INV 4
31#define HPRE_DH_G_FLAG 0x02
32#define HPRE_TRY_SEND_TIMES 100
33#define HPRE_INVLD_REQ_ID (-1)
34
35#define HPRE_SQE_ALG_BITS 5
36#define HPRE_SQE_DONE_SHIFT 30
37#define HPRE_DH_MAX_P_SZ 512
38
39#define HPRE_DFX_SEC_TO_US 1000000
40#define HPRE_DFX_US_TO_NS 1000
41
42
43#define HPRE_ECC_MAX_KSZ 66
44
45
46#define HPRE_ECC_NIST_P192_N_SIZE 24
47#define HPRE_ECC_NIST_P256_N_SIZE 32
48#define HPRE_ECC_NIST_P384_N_SIZE 48
49
50
51#define HPRE_ECC_HW256_KSZ_B 32
52#define HPRE_ECC_HW384_KSZ_B 48
53
54typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
55
56struct hpre_rsa_ctx {
57
58 char *pubkey;
59 dma_addr_t dma_pubkey;
60
61
62 char *prikey;
63 dma_addr_t dma_prikey;
64
65
66 char *crt_prikey;
67 dma_addr_t dma_crt_prikey;
68
69 struct crypto_akcipher *soft_tfm;
70};
71
72struct hpre_dh_ctx {
73
74
75
76
77
78
79
80
81 char *xa_p;
82 dma_addr_t dma_xa_p;
83
84 char *g;
85 dma_addr_t dma_g;
86};
87
88struct hpre_ecdh_ctx {
89
90 unsigned char *p;
91 dma_addr_t dma_p;
92
93
94 unsigned char *g;
95 dma_addr_t dma_g;
96};
97
98struct hpre_curve25519_ctx {
99
100 unsigned char *p;
101 dma_addr_t dma_p;
102
103
104 unsigned char *g;
105 dma_addr_t dma_g;
106};
107
108struct hpre_ctx {
109 struct hisi_qp *qp;
110 struct device *dev;
111 struct hpre_asym_request **req_list;
112 struct hpre *hpre;
113 spinlock_t req_lock;
114 unsigned int key_sz;
115 bool crt_g2_mode;
116 struct idr req_idr;
117 union {
118 struct hpre_rsa_ctx rsa;
119 struct hpre_dh_ctx dh;
120 struct hpre_ecdh_ctx ecdh;
121 struct hpre_curve25519_ctx curve25519;
122 };
123
124 unsigned int curve_id;
125};
126
127struct hpre_asym_request {
128 char *src;
129 char *dst;
130 struct hpre_sqe req;
131 struct hpre_ctx *ctx;
132 union {
133 struct akcipher_request *rsa;
134 struct kpp_request *dh;
135 struct kpp_request *ecdh;
136 struct kpp_request *curve25519;
137 } areq;
138 int err;
139 int req_id;
140 hpre_cb cb;
141 struct timespec64 req_time;
142};
143
144static int hpre_alloc_req_id(struct hpre_ctx *ctx)
145{
146 unsigned long flags;
147 int id;
148
149 spin_lock_irqsave(&ctx->req_lock, flags);
150 id = idr_alloc(&ctx->req_idr, NULL, 0, QM_Q_DEPTH, GFP_ATOMIC);
151 spin_unlock_irqrestore(&ctx->req_lock, flags);
152
153 return id;
154}
155
156static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id)
157{
158 unsigned long flags;
159
160 spin_lock_irqsave(&ctx->req_lock, flags);
161 idr_remove(&ctx->req_idr, req_id);
162 spin_unlock_irqrestore(&ctx->req_lock, flags);
163}
164
165static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
166{
167 struct hpre_ctx *ctx;
168 struct hpre_dfx *dfx;
169 int id;
170
171 ctx = hpre_req->ctx;
172 id = hpre_alloc_req_id(ctx);
173 if (unlikely(id < 0))
174 return -EINVAL;
175
176 ctx->req_list[id] = hpre_req;
177 hpre_req->req_id = id;
178
179 dfx = ctx->hpre->debug.dfx;
180 if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value))
181 ktime_get_ts64(&hpre_req->req_time);
182
183 return id;
184}
185
186static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)
187{
188 struct hpre_ctx *ctx = hpre_req->ctx;
189 int id = hpre_req->req_id;
190
191 if (hpre_req->req_id >= 0) {
192 hpre_req->req_id = HPRE_INVLD_REQ_ID;
193 ctx->req_list[id] = NULL;
194 hpre_free_req_id(ctx, id);
195 }
196}
197
198static struct hisi_qp *hpre_get_qp_and_start(u8 type)
199{
200 struct hisi_qp *qp;
201 int ret;
202
203 qp = hpre_create_qp(type);
204 if (!qp) {
205 pr_err("Can not create hpre qp!\n");
206 return ERR_PTR(-ENODEV);
207 }
208
209 ret = hisi_qm_start_qp(qp, 0);
210 if (ret < 0) {
211 hisi_qm_free_qps(&qp, 1);
212 pci_err(qp->qm->pdev, "Can not start qp!\n");
213 return ERR_PTR(-EINVAL);
214 }
215
216 return qp;
217}
218
219static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
220 struct scatterlist *data, unsigned int len,
221 int is_src, dma_addr_t *tmp)
222{
223 struct device *dev = hpre_req->ctx->dev;
224 enum dma_data_direction dma_dir;
225
226 if (is_src) {
227 hpre_req->src = NULL;
228 dma_dir = DMA_TO_DEVICE;
229 } else {
230 hpre_req->dst = NULL;
231 dma_dir = DMA_FROM_DEVICE;
232 }
233 *tmp = dma_map_single(dev, sg_virt(data), len, dma_dir);
234 if (unlikely(dma_mapping_error(dev, *tmp))) {
235 dev_err(dev, "dma map data err!\n");
236 return -ENOMEM;
237 }
238
239 return 0;
240}
241
242static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
243 struct scatterlist *data, unsigned int len,
244 int is_src, dma_addr_t *tmp)
245{
246 struct hpre_ctx *ctx = hpre_req->ctx;
247 struct device *dev = ctx->dev;
248 void *ptr;
249 int shift;
250
251 shift = ctx->key_sz - len;
252 if (unlikely(shift < 0))
253 return -EINVAL;
254
255 ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_KERNEL);
256 if (unlikely(!ptr))
257 return -ENOMEM;
258
259 if (is_src) {
260 scatterwalk_map_and_copy(ptr + shift, data, 0, len, 0);
261 hpre_req->src = ptr;
262 } else {
263 hpre_req->dst = ptr;
264 }
265
266 return 0;
267}
268
269static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
270 struct scatterlist *data, unsigned int len,
271 int is_src, int is_dh)
272{
273 struct hpre_sqe *msg = &hpre_req->req;
274 struct hpre_ctx *ctx = hpre_req->ctx;
275 dma_addr_t tmp = 0;
276 int ret;
277
278
279 if ((sg_is_last(data) && len == ctx->key_sz) &&
280 ((is_dh && !is_src) || !is_dh))
281 ret = hpre_get_data_dma_addr(hpre_req, data, len, is_src, &tmp);
282 else
283 ret = hpre_prepare_dma_buf(hpre_req, data, len, is_src, &tmp);
284
285 if (unlikely(ret))
286 return ret;
287
288 if (is_src)
289 msg->in = cpu_to_le64(tmp);
290 else
291 msg->out = cpu_to_le64(tmp);
292
293 return 0;
294}
295
296static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
297 struct hpre_asym_request *req,
298 struct scatterlist *dst,
299 struct scatterlist *src)
300{
301 struct device *dev = ctx->dev;
302 struct hpre_sqe *sqe = &req->req;
303 dma_addr_t tmp;
304
305 tmp = le64_to_cpu(sqe->in);
306 if (unlikely(dma_mapping_error(dev, tmp)))
307 return;
308
309 if (src) {
310 if (req->src)
311 dma_free_coherent(dev, ctx->key_sz, req->src, tmp);
312 else
313 dma_unmap_single(dev, tmp, ctx->key_sz, DMA_TO_DEVICE);
314 }
315
316 tmp = le64_to_cpu(sqe->out);
317 if (unlikely(dma_mapping_error(dev, tmp)))
318 return;
319
320 if (req->dst) {
321 if (dst)
322 scatterwalk_map_and_copy(req->dst, dst, 0,
323 ctx->key_sz, 1);
324 dma_free_coherent(dev, ctx->key_sz, req->dst, tmp);
325 } else {
326 dma_unmap_single(dev, tmp, ctx->key_sz, DMA_FROM_DEVICE);
327 }
328}
329
330static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
331 void **kreq)
332{
333 struct hpre_asym_request *req;
334 unsigned int err, done, alg;
335 int id;
336
337#define HPRE_NO_HW_ERR 0
338#define HPRE_HW_TASK_DONE 3
339#define HREE_HW_ERR_MASK GENMASK(10, 0)
340#define HREE_SQE_DONE_MASK GENMASK(1, 0)
341#define HREE_ALG_TYPE_MASK GENMASK(4, 0)
342 id = (int)le16_to_cpu(sqe->tag);
343 req = ctx->req_list[id];
344 hpre_rm_req_from_ctx(req);
345 *kreq = req;
346
347 err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) &
348 HREE_HW_ERR_MASK;
349
350 done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) &
351 HREE_SQE_DONE_MASK;
352
353 if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE))
354 return 0;
355
356 alg = le32_to_cpu(sqe->dw0) & HREE_ALG_TYPE_MASK;
357 dev_err_ratelimited(ctx->dev, "alg[0x%x] error: done[0x%x], etype[0x%x]\n",
358 alg, done, err);
359
360 return -EINVAL;
361}
362
363static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen)
364{
365 struct hpre *hpre;
366
367 if (!ctx || !qp || qlen < 0)
368 return -EINVAL;
369
370 spin_lock_init(&ctx->req_lock);
371 ctx->qp = qp;
372 ctx->dev = &qp->qm->pdev->dev;
373
374 hpre = container_of(ctx->qp->qm, struct hpre, qm);
375 ctx->hpre = hpre;
376 ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL);
377 if (!ctx->req_list)
378 return -ENOMEM;
379 ctx->key_sz = 0;
380 ctx->crt_g2_mode = false;
381 idr_init(&ctx->req_idr);
382
383 return 0;
384}
385
386static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
387{
388 if (is_clear_all) {
389 idr_destroy(&ctx->req_idr);
390 kfree(ctx->req_list);
391 hisi_qm_free_qps(&ctx->qp, 1);
392 }
393
394 ctx->crt_g2_mode = false;
395 ctx->key_sz = 0;
396}
397
398static bool hpre_is_bd_timeout(struct hpre_asym_request *req,
399 u64 overtime_thrhld)
400{
401 struct timespec64 reply_time;
402 u64 time_use_us;
403
404 ktime_get_ts64(&reply_time);
405 time_use_us = (reply_time.tv_sec - req->req_time.tv_sec) *
406 HPRE_DFX_SEC_TO_US +
407 (reply_time.tv_nsec - req->req_time.tv_nsec) /
408 HPRE_DFX_US_TO_NS;
409
410 if (time_use_us <= overtime_thrhld)
411 return false;
412
413 return true;
414}
415
416static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp)
417{
418 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
419 struct hpre_asym_request *req;
420 struct kpp_request *areq;
421 u64 overtime_thrhld;
422 int ret;
423
424 ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
425 areq = req->areq.dh;
426 areq->dst_len = ctx->key_sz;
427
428 overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
429 if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
430 atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
431
432 hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
433 kpp_request_complete(areq, ret);
434 atomic64_inc(&dfx[HPRE_RECV_CNT].value);
435}
436
437static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp)
438{
439 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
440 struct hpre_asym_request *req;
441 struct akcipher_request *areq;
442 u64 overtime_thrhld;
443 int ret;
444
445 ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
446
447 overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
448 if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
449 atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
450
451 areq = req->areq.rsa;
452 areq->dst_len = ctx->key_sz;
453 hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
454 akcipher_request_complete(areq, ret);
455 atomic64_inc(&dfx[HPRE_RECV_CNT].value);
456}
457
458static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
459{
460 struct hpre_ctx *ctx = qp->qp_ctx;
461 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
462 struct hpre_sqe *sqe = resp;
463 struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)];
464
465 if (unlikely(!req)) {
466 atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value);
467 return;
468 }
469
470 req->cb(ctx, resp);
471}
472
473static void hpre_stop_qp_and_put(struct hisi_qp *qp)
474{
475 hisi_qm_stop_qp(qp);
476 hisi_qm_free_qps(&qp, 1);
477}
478
479static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type)
480{
481 struct hisi_qp *qp;
482 int ret;
483
484 qp = hpre_get_qp_and_start(type);
485 if (IS_ERR(qp))
486 return PTR_ERR(qp);
487
488 qp->qp_ctx = ctx;
489 qp->req_cb = hpre_alg_cb;
490
491 ret = hpre_ctx_set(ctx, qp, QM_Q_DEPTH);
492 if (ret)
493 hpre_stop_qp_and_put(qp);
494
495 return ret;
496}
497
498static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
499{
500 struct hpre_asym_request *h_req;
501 struct hpre_sqe *msg;
502 int req_id;
503 void *tmp;
504
505 if (is_rsa) {
506 struct akcipher_request *akreq = req;
507
508 if (akreq->dst_len < ctx->key_sz) {
509 akreq->dst_len = ctx->key_sz;
510 return -EOVERFLOW;
511 }
512
513 tmp = akcipher_request_ctx(akreq);
514 h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
515 h_req->cb = hpre_rsa_cb;
516 h_req->areq.rsa = akreq;
517 msg = &h_req->req;
518 memset(msg, 0, sizeof(*msg));
519 } else {
520 struct kpp_request *kreq = req;
521
522 if (kreq->dst_len < ctx->key_sz) {
523 kreq->dst_len = ctx->key_sz;
524 return -EOVERFLOW;
525 }
526
527 tmp = kpp_request_ctx(kreq);
528 h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
529 h_req->cb = hpre_dh_cb;
530 h_req->areq.dh = kreq;
531 msg = &h_req->req;
532 memset(msg, 0, sizeof(*msg));
533 msg->key = cpu_to_le64(ctx->dh.dma_xa_p);
534 }
535
536 msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
537 msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
538 msg->dw0 |= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT);
539 msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
540 h_req->ctx = ctx;
541
542 req_id = hpre_add_req_to_ctx(h_req);
543 if (req_id < 0)
544 return -EBUSY;
545
546 msg->tag = cpu_to_le16((u16)req_id);
547
548 return 0;
549}
550
551static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
552{
553 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
554 int ctr = 0;
555 int ret;
556
557 do {
558 atomic64_inc(&dfx[HPRE_SEND_CNT].value);
559 ret = hisi_qp_send(ctx->qp, msg);
560 if (ret != -EBUSY)
561 break;
562 atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value);
563 } while (ctr++ < HPRE_TRY_SEND_TIMES);
564
565 if (likely(!ret))
566 return ret;
567
568 if (ret != -EBUSY)
569 atomic64_inc(&dfx[HPRE_SEND_FAIL_CNT].value);
570
571 return ret;
572}
573
574static int hpre_dh_compute_value(struct kpp_request *req)
575{
576 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
577 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
578 void *tmp = kpp_request_ctx(req);
579 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
580 struct hpre_sqe *msg = &hpre_req->req;
581 int ret;
582
583 ret = hpre_msg_request_set(ctx, req, false);
584 if (unlikely(ret))
585 return ret;
586
587 if (req->src) {
588 ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1);
589 if (unlikely(ret))
590 goto clear_all;
591 } else {
592 msg->in = cpu_to_le64(ctx->dh.dma_g);
593 }
594
595 ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1);
596 if (unlikely(ret))
597 goto clear_all;
598
599 if (ctx->crt_g2_mode && !req->src)
600 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2);
601 else
602 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH);
603
604
605 ret = hpre_send(ctx, msg);
606 if (likely(!ret))
607 return -EINPROGRESS;
608
609clear_all:
610 hpre_rm_req_from_ctx(hpre_req);
611 hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
612
613 return ret;
614}
615
616static int hpre_is_dh_params_length_valid(unsigned int key_sz)
617{
618#define _HPRE_DH_GRP1 768
619#define _HPRE_DH_GRP2 1024
620#define _HPRE_DH_GRP5 1536
621#define _HPRE_DH_GRP14 2048
622#define _HPRE_DH_GRP15 3072
623#define _HPRE_DH_GRP16 4096
624 switch (key_sz) {
625 case _HPRE_DH_GRP1:
626 case _HPRE_DH_GRP2:
627 case _HPRE_DH_GRP5:
628 case _HPRE_DH_GRP14:
629 case _HPRE_DH_GRP15:
630 case _HPRE_DH_GRP16:
631 return 0;
632 default:
633 return -EINVAL;
634 }
635}
636
637static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params)
638{
639 struct device *dev = ctx->dev;
640 unsigned int sz;
641
642 if (params->p_size > HPRE_DH_MAX_P_SZ)
643 return -EINVAL;
644
645 if (hpre_is_dh_params_length_valid(params->p_size <<
646 HPRE_BITS_2_BYTES_SHIFT))
647 return -EINVAL;
648
649 sz = ctx->key_sz = params->p_size;
650 ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1,
651 &ctx->dh.dma_xa_p, GFP_KERNEL);
652 if (!ctx->dh.xa_p)
653 return -ENOMEM;
654
655 memcpy(ctx->dh.xa_p + sz, params->p, sz);
656
657
658 if (params->g_size == 1 && *(char *)params->g == HPRE_DH_G_FLAG) {
659 ctx->crt_g2_mode = true;
660 return 0;
661 }
662
663 ctx->dh.g = dma_alloc_coherent(dev, sz, &ctx->dh.dma_g, GFP_KERNEL);
664 if (!ctx->dh.g) {
665 dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
666 ctx->dh.dma_xa_p);
667 ctx->dh.xa_p = NULL;
668 return -ENOMEM;
669 }
670
671 memcpy(ctx->dh.g + (sz - params->g_size), params->g, params->g_size);
672
673 return 0;
674}
675
676static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
677{
678 struct device *dev = ctx->dev;
679 unsigned int sz = ctx->key_sz;
680
681 if (is_clear_all)
682 hisi_qm_stop_qp(ctx->qp);
683
684 if (ctx->dh.g) {
685 dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g);
686 ctx->dh.g = NULL;
687 }
688
689 if (ctx->dh.xa_p) {
690 memzero_explicit(ctx->dh.xa_p, sz);
691 dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
692 ctx->dh.dma_xa_p);
693 ctx->dh.xa_p = NULL;
694 }
695
696 hpre_ctx_clear(ctx, is_clear_all);
697}
698
699static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
700 unsigned int len)
701{
702 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
703 struct dh params;
704 int ret;
705
706 if (crypto_dh_decode_key(buf, len, ¶ms) < 0)
707 return -EINVAL;
708
709
710 hpre_dh_clear_ctx(ctx, false);
711
712 ret = hpre_dh_set_params(ctx, ¶ms);
713 if (ret < 0)
714 goto err_clear_ctx;
715
716 memcpy(ctx->dh.xa_p + (ctx->key_sz - params.key_size), params.key,
717 params.key_size);
718
719 return 0;
720
721err_clear_ctx:
722 hpre_dh_clear_ctx(ctx, false);
723 return ret;
724}
725
726static unsigned int hpre_dh_max_size(struct crypto_kpp *tfm)
727{
728 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
729
730 return ctx->key_sz;
731}
732
733static int hpre_dh_init_tfm(struct crypto_kpp *tfm)
734{
735 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
736
737 return hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
738}
739
740static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)
741{
742 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
743
744 hpre_dh_clear_ctx(ctx, true);
745}
746
747static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len)
748{
749 while (!**ptr && *len) {
750 (*ptr)++;
751 (*len)--;
752 }
753}
754
755static bool hpre_rsa_key_size_is_support(unsigned int len)
756{
757 unsigned int bits = len << HPRE_BITS_2_BYTES_SHIFT;
758
759#define _RSA_1024BITS_KEY_WDTH 1024
760#define _RSA_2048BITS_KEY_WDTH 2048
761#define _RSA_3072BITS_KEY_WDTH 3072
762#define _RSA_4096BITS_KEY_WDTH 4096
763
764 switch (bits) {
765 case _RSA_1024BITS_KEY_WDTH:
766 case _RSA_2048BITS_KEY_WDTH:
767 case _RSA_3072BITS_KEY_WDTH:
768 case _RSA_4096BITS_KEY_WDTH:
769 return true;
770 default:
771 return false;
772 }
773}
774
775static int hpre_rsa_enc(struct akcipher_request *req)
776{
777 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
778 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
779 void *tmp = akcipher_request_ctx(req);
780 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
781 struct hpre_sqe *msg = &hpre_req->req;
782 int ret;
783
784
785 if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
786 ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
787 akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
788 ret = crypto_akcipher_encrypt(req);
789 akcipher_request_set_tfm(req, tfm);
790 return ret;
791 }
792
793 if (unlikely(!ctx->rsa.pubkey))
794 return -EINVAL;
795
796 ret = hpre_msg_request_set(ctx, req, true);
797 if (unlikely(ret))
798 return ret;
799
800 msg->dw0 |= cpu_to_le32(HPRE_ALG_NC_NCRT);
801 msg->key = cpu_to_le64(ctx->rsa.dma_pubkey);
802
803 ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
804 if (unlikely(ret))
805 goto clear_all;
806
807 ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
808 if (unlikely(ret))
809 goto clear_all;
810
811
812 ret = hpre_send(ctx, msg);
813 if (likely(!ret))
814 return -EINPROGRESS;
815
816clear_all:
817 hpre_rm_req_from_ctx(hpre_req);
818 hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
819
820 return ret;
821}
822
823static int hpre_rsa_dec(struct akcipher_request *req)
824{
825 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
826 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
827 void *tmp = akcipher_request_ctx(req);
828 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
829 struct hpre_sqe *msg = &hpre_req->req;
830 int ret;
831
832
833 if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
834 ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
835 akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
836 ret = crypto_akcipher_decrypt(req);
837 akcipher_request_set_tfm(req, tfm);
838 return ret;
839 }
840
841 if (unlikely(!ctx->rsa.prikey))
842 return -EINVAL;
843
844 ret = hpre_msg_request_set(ctx, req, true);
845 if (unlikely(ret))
846 return ret;
847
848 if (ctx->crt_g2_mode) {
849 msg->key = cpu_to_le64(ctx->rsa.dma_crt_prikey);
850 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
851 HPRE_ALG_NC_CRT);
852 } else {
853 msg->key = cpu_to_le64(ctx->rsa.dma_prikey);
854 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
855 HPRE_ALG_NC_NCRT);
856 }
857
858 ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
859 if (unlikely(ret))
860 goto clear_all;
861
862 ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
863 if (unlikely(ret))
864 goto clear_all;
865
866
867 ret = hpre_send(ctx, msg);
868 if (likely(!ret))
869 return -EINPROGRESS;
870
871clear_all:
872 hpre_rm_req_from_ctx(hpre_req);
873 hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
874
875 return ret;
876}
877
878static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value,
879 size_t vlen, bool private)
880{
881 const char *ptr = value;
882
883 hpre_rsa_drop_leading_zeros(&ptr, &vlen);
884
885 ctx->key_sz = vlen;
886
887
888 if (!hpre_rsa_key_size_is_support(ctx->key_sz))
889 return 0;
890
891 ctx->rsa.pubkey = dma_alloc_coherent(ctx->dev, vlen << 1,
892 &ctx->rsa.dma_pubkey,
893 GFP_KERNEL);
894 if (!ctx->rsa.pubkey)
895 return -ENOMEM;
896
897 if (private) {
898 ctx->rsa.prikey = dma_alloc_coherent(ctx->dev, vlen << 1,
899 &ctx->rsa.dma_prikey,
900 GFP_KERNEL);
901 if (!ctx->rsa.prikey) {
902 dma_free_coherent(ctx->dev, vlen << 1,
903 ctx->rsa.pubkey,
904 ctx->rsa.dma_pubkey);
905 ctx->rsa.pubkey = NULL;
906 return -ENOMEM;
907 }
908 memcpy(ctx->rsa.prikey + vlen, ptr, vlen);
909 }
910 memcpy(ctx->rsa.pubkey + vlen, ptr, vlen);
911
912
913 return 1;
914}
915
916static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value,
917 size_t vlen)
918{
919 const char *ptr = value;
920
921 hpre_rsa_drop_leading_zeros(&ptr, &vlen);
922
923 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
924 return -EINVAL;
925
926 memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen);
927
928 return 0;
929}
930
931static int hpre_rsa_set_d(struct hpre_ctx *ctx, const char *value,
932 size_t vlen)
933{
934 const char *ptr = value;
935
936 hpre_rsa_drop_leading_zeros(&ptr, &vlen);
937
938 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
939 return -EINVAL;
940
941 memcpy(ctx->rsa.prikey + ctx->key_sz - vlen, ptr, vlen);
942
943 return 0;
944}
945
946static int hpre_crt_para_get(char *para, size_t para_sz,
947 const char *raw, size_t raw_sz)
948{
949 const char *ptr = raw;
950 size_t len = raw_sz;
951
952 hpre_rsa_drop_leading_zeros(&ptr, &len);
953 if (!len || len > para_sz)
954 return -EINVAL;
955
956 memcpy(para + para_sz - len, ptr, len);
957
958 return 0;
959}
960
961static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
962{
963 unsigned int hlf_ksz = ctx->key_sz >> 1;
964 struct device *dev = ctx->dev;
965 u64 offset;
966 int ret;
967
968 ctx->rsa.crt_prikey = dma_alloc_coherent(dev, hlf_ksz * HPRE_CRT_PRMS,
969 &ctx->rsa.dma_crt_prikey,
970 GFP_KERNEL);
971 if (!ctx->rsa.crt_prikey)
972 return -ENOMEM;
973
974 ret = hpre_crt_para_get(ctx->rsa.crt_prikey, hlf_ksz,
975 rsa_key->dq, rsa_key->dq_sz);
976 if (ret)
977 goto free_key;
978
979 offset = hlf_ksz;
980 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
981 rsa_key->dp, rsa_key->dp_sz);
982 if (ret)
983 goto free_key;
984
985 offset = hlf_ksz * HPRE_CRT_Q;
986 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
987 rsa_key->q, rsa_key->q_sz);
988 if (ret)
989 goto free_key;
990
991 offset = hlf_ksz * HPRE_CRT_P;
992 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
993 rsa_key->p, rsa_key->p_sz);
994 if (ret)
995 goto free_key;
996
997 offset = hlf_ksz * HPRE_CRT_INV;
998 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
999 rsa_key->qinv, rsa_key->qinv_sz);
1000 if (ret)
1001 goto free_key;
1002
1003 ctx->crt_g2_mode = true;
1004
1005 return 0;
1006
1007free_key:
1008 offset = hlf_ksz * HPRE_CRT_PRMS;
1009 memzero_explicit(ctx->rsa.crt_prikey, offset);
1010 dma_free_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey,
1011 ctx->rsa.dma_crt_prikey);
1012 ctx->rsa.crt_prikey = NULL;
1013 ctx->crt_g2_mode = false;
1014
1015 return ret;
1016}
1017
1018
1019static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
1020{
1021 unsigned int half_key_sz = ctx->key_sz >> 1;
1022 struct device *dev = ctx->dev;
1023
1024 if (is_clear_all)
1025 hisi_qm_stop_qp(ctx->qp);
1026
1027 if (ctx->rsa.pubkey) {
1028 dma_free_coherent(dev, ctx->key_sz << 1,
1029 ctx->rsa.pubkey, ctx->rsa.dma_pubkey);
1030 ctx->rsa.pubkey = NULL;
1031 }
1032
1033 if (ctx->rsa.crt_prikey) {
1034 memzero_explicit(ctx->rsa.crt_prikey,
1035 half_key_sz * HPRE_CRT_PRMS);
1036 dma_free_coherent(dev, half_key_sz * HPRE_CRT_PRMS,
1037 ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey);
1038 ctx->rsa.crt_prikey = NULL;
1039 }
1040
1041 if (ctx->rsa.prikey) {
1042 memzero_explicit(ctx->rsa.prikey, ctx->key_sz);
1043 dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey,
1044 ctx->rsa.dma_prikey);
1045 ctx->rsa.prikey = NULL;
1046 }
1047
1048 hpre_ctx_clear(ctx, is_clear_all);
1049}
1050
1051
1052
1053
1054
1055static bool hpre_is_crt_key(struct rsa_key *key)
1056{
1057 u16 len = key->p_sz + key->q_sz + key->dp_sz + key->dq_sz +
1058 key->qinv_sz;
1059
1060#define LEN_OF_NCRT_PARA 5
1061
1062
1063 return len > LEN_OF_NCRT_PARA;
1064}
1065
1066static int hpre_rsa_setkey(struct hpre_ctx *ctx, const void *key,
1067 unsigned int keylen, bool private)
1068{
1069 struct rsa_key rsa_key;
1070 int ret;
1071
1072 hpre_rsa_clear_ctx(ctx, false);
1073
1074 if (private)
1075 ret = rsa_parse_priv_key(&rsa_key, key, keylen);
1076 else
1077 ret = rsa_parse_pub_key(&rsa_key, key, keylen);
1078 if (ret < 0)
1079 return ret;
1080
1081 ret = hpre_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz, private);
1082 if (ret <= 0)
1083 return ret;
1084
1085 if (private) {
1086 ret = hpre_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
1087 if (ret < 0)
1088 goto free;
1089
1090 if (hpre_is_crt_key(&rsa_key)) {
1091 ret = hpre_rsa_setkey_crt(ctx, &rsa_key);
1092 if (ret < 0)
1093 goto free;
1094 }
1095 }
1096
1097 ret = hpre_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
1098 if (ret < 0)
1099 goto free;
1100
1101 if ((private && !ctx->rsa.prikey) || !ctx->rsa.pubkey) {
1102 ret = -EINVAL;
1103 goto free;
1104 }
1105
1106 return 0;
1107
1108free:
1109 hpre_rsa_clear_ctx(ctx, false);
1110 return ret;
1111}
1112
1113static int hpre_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
1114 unsigned int keylen)
1115{
1116 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1117 int ret;
1118
1119 ret = crypto_akcipher_set_pub_key(ctx->rsa.soft_tfm, key, keylen);
1120 if (ret)
1121 return ret;
1122
1123 return hpre_rsa_setkey(ctx, key, keylen, false);
1124}
1125
1126static int hpre_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
1127 unsigned int keylen)
1128{
1129 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1130 int ret;
1131
1132 ret = crypto_akcipher_set_priv_key(ctx->rsa.soft_tfm, key, keylen);
1133 if (ret)
1134 return ret;
1135
1136 return hpre_rsa_setkey(ctx, key, keylen, true);
1137}
1138
1139static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm)
1140{
1141 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1142
1143
1144 if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
1145 ctx->key_sz == HPRE_RSA_1536BITS_KSZ)
1146 return crypto_akcipher_maxsize(ctx->rsa.soft_tfm);
1147
1148 return ctx->key_sz;
1149}
1150
1151static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
1152{
1153 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1154 int ret;
1155
1156 ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0);
1157 if (IS_ERR(ctx->rsa.soft_tfm)) {
1158 pr_err("Can not alloc_akcipher!\n");
1159 return PTR_ERR(ctx->rsa.soft_tfm);
1160 }
1161
1162 ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
1163 if (ret)
1164 crypto_free_akcipher(ctx->rsa.soft_tfm);
1165
1166 return ret;
1167}
1168
1169static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
1170{
1171 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1172
1173 hpre_rsa_clear_ctx(ctx, true);
1174 crypto_free_akcipher(ctx->rsa.soft_tfm);
1175}
1176
1177static void hpre_key_to_big_end(u8 *data, int len)
1178{
1179 int i, j;
1180 u8 tmp;
1181
1182 for (i = 0; i < len / 2; i++) {
1183 j = len - i - 1;
1184 tmp = data[j];
1185 data[j] = data[i];
1186 data[i] = tmp;
1187 }
1188}
1189
1190static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all,
1191 bool is_ecdh)
1192{
1193 struct device *dev = ctx->dev;
1194 unsigned int sz = ctx->key_sz;
1195 unsigned int shift = sz << 1;
1196
1197 if (is_clear_all)
1198 hisi_qm_stop_qp(ctx->qp);
1199
1200 if (is_ecdh && ctx->ecdh.p) {
1201
1202 memzero_explicit(ctx->ecdh.p + shift, sz);
1203 dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
1204 ctx->ecdh.p = NULL;
1205 } else if (!is_ecdh && ctx->curve25519.p) {
1206
1207 memzero_explicit(ctx->curve25519.p + shift, sz);
1208 dma_free_coherent(dev, sz << 2, ctx->curve25519.p,
1209 ctx->curve25519.dma_p);
1210 ctx->curve25519.p = NULL;
1211 }
1212
1213 hpre_ctx_clear(ctx, is_clear_all);
1214}
1215
1216
1217
1218
1219
1220
1221
1222
1223static unsigned int hpre_ecdh_supported_curve(unsigned short id)
1224{
1225 switch (id) {
1226 case ECC_CURVE_NIST_P192:
1227 case ECC_CURVE_NIST_P256:
1228 return HPRE_ECC_HW256_KSZ_B;
1229 case ECC_CURVE_NIST_P384:
1230 return HPRE_ECC_HW384_KSZ_B;
1231 default:
1232 break;
1233 }
1234
1235 return 0;
1236}
1237
1238static void fill_curve_param(void *addr, u64 *param, unsigned int cur_sz, u8 ndigits)
1239{
1240 unsigned int sz = cur_sz - (ndigits - 1) * sizeof(u64);
1241 u8 i = 0;
1242
1243 while (i < ndigits - 1) {
1244 memcpy(addr + sizeof(u64) * i, ¶m[i], sizeof(u64));
1245 i++;
1246 }
1247
1248 memcpy(addr + sizeof(u64) * i, ¶m[ndigits - 1], sz);
1249 hpre_key_to_big_end((u8 *)addr, cur_sz);
1250}
1251
1252static int hpre_ecdh_fill_curve(struct hpre_ctx *ctx, struct ecdh *params,
1253 unsigned int cur_sz)
1254{
1255 unsigned int shifta = ctx->key_sz << 1;
1256 unsigned int shiftb = ctx->key_sz << 2;
1257 void *p = ctx->ecdh.p + ctx->key_sz - cur_sz;
1258 void *a = ctx->ecdh.p + shifta - cur_sz;
1259 void *b = ctx->ecdh.p + shiftb - cur_sz;
1260 void *x = ctx->ecdh.g + ctx->key_sz - cur_sz;
1261 void *y = ctx->ecdh.g + shifta - cur_sz;
1262 const struct ecc_curve *curve = ecc_get_curve(ctx->curve_id);
1263 char *n;
1264
1265 if (unlikely(!curve))
1266 return -EINVAL;
1267
1268 n = kzalloc(ctx->key_sz, GFP_KERNEL);
1269 if (!n)
1270 return -ENOMEM;
1271
1272 fill_curve_param(p, curve->p, cur_sz, curve->g.ndigits);
1273 fill_curve_param(a, curve->a, cur_sz, curve->g.ndigits);
1274 fill_curve_param(b, curve->b, cur_sz, curve->g.ndigits);
1275 fill_curve_param(x, curve->g.x, cur_sz, curve->g.ndigits);
1276 fill_curve_param(y, curve->g.y, cur_sz, curve->g.ndigits);
1277 fill_curve_param(n, curve->n, cur_sz, curve->g.ndigits);
1278
1279 if (params->key_size == cur_sz && memcmp(params->key, n, cur_sz) >= 0) {
1280 kfree(n);
1281 return -EINVAL;
1282 }
1283
1284 kfree(n);
1285 return 0;
1286}
1287
1288static unsigned int hpre_ecdh_get_curvesz(unsigned short id)
1289{
1290 switch (id) {
1291 case ECC_CURVE_NIST_P192:
1292 return HPRE_ECC_NIST_P192_N_SIZE;
1293 case ECC_CURVE_NIST_P256:
1294 return HPRE_ECC_NIST_P256_N_SIZE;
1295 case ECC_CURVE_NIST_P384:
1296 return HPRE_ECC_NIST_P384_N_SIZE;
1297 default:
1298 break;
1299 }
1300
1301 return 0;
1302}
1303
1304static int hpre_ecdh_set_param(struct hpre_ctx *ctx, struct ecdh *params)
1305{
1306 struct device *dev = ctx->dev;
1307 unsigned int sz, shift, curve_sz;
1308 int ret;
1309
1310 ctx->key_sz = hpre_ecdh_supported_curve(ctx->curve_id);
1311 if (!ctx->key_sz)
1312 return -EINVAL;
1313
1314 curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
1315 if (!curve_sz || params->key_size > curve_sz)
1316 return -EINVAL;
1317
1318 sz = ctx->key_sz;
1319
1320 if (!ctx->ecdh.p) {
1321 ctx->ecdh.p = dma_alloc_coherent(dev, sz << 3, &ctx->ecdh.dma_p,
1322 GFP_KERNEL);
1323 if (!ctx->ecdh.p)
1324 return -ENOMEM;
1325 }
1326
1327 shift = sz << 2;
1328 ctx->ecdh.g = ctx->ecdh.p + shift;
1329 ctx->ecdh.dma_g = ctx->ecdh.dma_p + shift;
1330
1331 ret = hpre_ecdh_fill_curve(ctx, params, curve_sz);
1332 if (ret) {
1333 dev_err(dev, "failed to fill curve_param, ret = %d!\n", ret);
1334 dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
1335 ctx->ecdh.p = NULL;
1336 return ret;
1337 }
1338
1339 return 0;
1340}
1341
1342static bool hpre_key_is_zero(char *key, unsigned short key_sz)
1343{
1344 int i;
1345
1346 for (i = 0; i < key_sz; i++)
1347 if (key[i])
1348 return false;
1349
1350 return true;
1351}
1352
1353static int ecdh_gen_privkey(struct hpre_ctx *ctx, struct ecdh *params)
1354{
1355 struct device *dev = ctx->dev;
1356 int ret;
1357
1358 ret = crypto_get_default_rng();
1359 if (ret) {
1360 dev_err(dev, "failed to get default rng, ret = %d!\n", ret);
1361 return ret;
1362 }
1363
1364 ret = crypto_rng_get_bytes(crypto_default_rng, (u8 *)params->key,
1365 params->key_size);
1366 crypto_put_default_rng();
1367 if (ret)
1368 dev_err(dev, "failed to get rng, ret = %d!\n", ret);
1369
1370 return ret;
1371}
1372
1373static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
1374 unsigned int len)
1375{
1376 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1377 struct device *dev = ctx->dev;
1378 char key[HPRE_ECC_MAX_KSZ];
1379 unsigned int sz, sz_shift;
1380 struct ecdh params;
1381 int ret;
1382
1383 if (crypto_ecdh_decode_key(buf, len, ¶ms) < 0) {
1384 dev_err(dev, "failed to decode ecdh key!\n");
1385 return -EINVAL;
1386 }
1387
1388
1389 if (!params.key || !params.key_size) {
1390 params.key = key;
1391 params.key_size = hpre_ecdh_get_curvesz(ctx->curve_id);
1392 ret = ecdh_gen_privkey(ctx, ¶ms);
1393 if (ret)
1394 return ret;
1395 }
1396
1397 if (hpre_key_is_zero(params.key, params.key_size)) {
1398 dev_err(dev, "Invalid hpre key!\n");
1399 return -EINVAL;
1400 }
1401
1402 hpre_ecc_clear_ctx(ctx, false, true);
1403
1404 ret = hpre_ecdh_set_param(ctx, ¶ms);
1405 if (ret < 0) {
1406 dev_err(dev, "failed to set hpre param, ret = %d!\n", ret);
1407 return ret;
1408 }
1409
1410 sz = ctx->key_sz;
1411 sz_shift = (sz << 1) + sz - params.key_size;
1412 memcpy(ctx->ecdh.p + sz_shift, params.key, params.key_size);
1413
1414 return 0;
1415}
1416
1417static void hpre_ecdh_hw_data_clr_all(struct hpre_ctx *ctx,
1418 struct hpre_asym_request *req,
1419 struct scatterlist *dst,
1420 struct scatterlist *src)
1421{
1422 struct device *dev = ctx->dev;
1423 struct hpre_sqe *sqe = &req->req;
1424 dma_addr_t dma;
1425
1426 dma = le64_to_cpu(sqe->in);
1427 if (unlikely(dma_mapping_error(dev, dma)))
1428 return;
1429
1430 if (src && req->src)
1431 dma_free_coherent(dev, ctx->key_sz << 2, req->src, dma);
1432
1433 dma = le64_to_cpu(sqe->out);
1434 if (unlikely(dma_mapping_error(dev, dma)))
1435 return;
1436
1437 if (req->dst)
1438 dma_free_coherent(dev, ctx->key_sz << 1, req->dst, dma);
1439 if (dst)
1440 dma_unmap_single(dev, dma, ctx->key_sz << 1, DMA_FROM_DEVICE);
1441}
1442
1443static void hpre_ecdh_cb(struct hpre_ctx *ctx, void *resp)
1444{
1445 unsigned int curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
1446 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
1447 struct hpre_asym_request *req = NULL;
1448 struct kpp_request *areq;
1449 u64 overtime_thrhld;
1450 char *p;
1451 int ret;
1452
1453 ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
1454 areq = req->areq.ecdh;
1455 areq->dst_len = ctx->key_sz << 1;
1456
1457 overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
1458 if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
1459 atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
1460
1461 p = sg_virt(areq->dst);
1462 memmove(p, p + ctx->key_sz - curve_sz, curve_sz);
1463 memmove(p + curve_sz, p + areq->dst_len - curve_sz, curve_sz);
1464
1465 hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src);
1466 kpp_request_complete(areq, ret);
1467
1468 atomic64_inc(&dfx[HPRE_RECV_CNT].value);
1469}
1470
1471static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx,
1472 struct kpp_request *req)
1473{
1474 struct hpre_asym_request *h_req;
1475 struct hpre_sqe *msg;
1476 int req_id;
1477 void *tmp;
1478
1479 if (req->dst_len < ctx->key_sz << 1) {
1480 req->dst_len = ctx->key_sz << 1;
1481 return -EINVAL;
1482 }
1483
1484 tmp = kpp_request_ctx(req);
1485 h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
1486 h_req->cb = hpre_ecdh_cb;
1487 h_req->areq.ecdh = req;
1488 msg = &h_req->req;
1489 memset(msg, 0, sizeof(*msg));
1490 msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
1491 msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
1492 msg->key = cpu_to_le64(ctx->ecdh.dma_p);
1493
1494 msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
1495 msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
1496 h_req->ctx = ctx;
1497
1498 req_id = hpre_add_req_to_ctx(h_req);
1499 if (req_id < 0)
1500 return -EBUSY;
1501
1502 msg->tag = cpu_to_le16((u16)req_id);
1503 return 0;
1504}
1505
1506static int hpre_ecdh_src_data_init(struct hpre_asym_request *hpre_req,
1507 struct scatterlist *data, unsigned int len)
1508{
1509 struct hpre_sqe *msg = &hpre_req->req;
1510 struct hpre_ctx *ctx = hpre_req->ctx;
1511 struct device *dev = ctx->dev;
1512 unsigned int tmpshift;
1513 dma_addr_t dma = 0;
1514 void *ptr;
1515 int shift;
1516
1517
1518 shift = ctx->key_sz - (len >> 1);
1519 if (unlikely(shift < 0))
1520 return -EINVAL;
1521
1522 ptr = dma_alloc_coherent(dev, ctx->key_sz << 2, &dma, GFP_KERNEL);
1523 if (unlikely(!ptr))
1524 return -ENOMEM;
1525
1526 tmpshift = ctx->key_sz << 1;
1527 scatterwalk_map_and_copy(ptr + tmpshift, data, 0, len, 0);
1528 memcpy(ptr + shift, ptr + tmpshift, len >> 1);
1529 memcpy(ptr + ctx->key_sz + shift, ptr + tmpshift + (len >> 1), len >> 1);
1530
1531 hpre_req->src = ptr;
1532 msg->in = cpu_to_le64(dma);
1533 return 0;
1534}
1535
1536static int hpre_ecdh_dst_data_init(struct hpre_asym_request *hpre_req,
1537 struct scatterlist *data, unsigned int len)
1538{
1539 struct hpre_sqe *msg = &hpre_req->req;
1540 struct hpre_ctx *ctx = hpre_req->ctx;
1541 struct device *dev = ctx->dev;
1542 dma_addr_t dma;
1543
1544 if (unlikely(!data || !sg_is_last(data) || len != ctx->key_sz << 1)) {
1545 dev_err(dev, "data or data length is illegal!\n");
1546 return -EINVAL;
1547 }
1548
1549 hpre_req->dst = NULL;
1550 dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
1551 if (unlikely(dma_mapping_error(dev, dma))) {
1552 dev_err(dev, "dma map data err!\n");
1553 return -ENOMEM;
1554 }
1555
1556 msg->out = cpu_to_le64(dma);
1557 return 0;
1558}
1559
1560static int hpre_ecdh_compute_value(struct kpp_request *req)
1561{
1562 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
1563 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1564 struct device *dev = ctx->dev;
1565 void *tmp = kpp_request_ctx(req);
1566 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
1567 struct hpre_sqe *msg = &hpre_req->req;
1568 int ret;
1569
1570 ret = hpre_ecdh_msg_request_set(ctx, req);
1571 if (unlikely(ret)) {
1572 dev_err(dev, "failed to set ecdh request, ret = %d!\n", ret);
1573 return ret;
1574 }
1575
1576 if (req->src) {
1577 ret = hpre_ecdh_src_data_init(hpre_req, req->src, req->src_len);
1578 if (unlikely(ret)) {
1579 dev_err(dev, "failed to init src data, ret = %d!\n", ret);
1580 goto clear_all;
1581 }
1582 } else {
1583 msg->in = cpu_to_le64(ctx->ecdh.dma_g);
1584 }
1585
1586 ret = hpre_ecdh_dst_data_init(hpre_req, req->dst, req->dst_len);
1587 if (unlikely(ret)) {
1588 dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
1589 goto clear_all;
1590 }
1591
1592 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_ECC_MUL);
1593 ret = hpre_send(ctx, msg);
1594 if (likely(!ret))
1595 return -EINPROGRESS;
1596
1597clear_all:
1598 hpre_rm_req_from_ctx(hpre_req);
1599 hpre_ecdh_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
1600 return ret;
1601}
1602
1603static unsigned int hpre_ecdh_max_size(struct crypto_kpp *tfm)
1604{
1605 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1606
1607
1608 return ctx->key_sz << 1;
1609}
1610
1611static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm)
1612{
1613 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1614
1615 ctx->curve_id = ECC_CURVE_NIST_P192;
1616
1617 return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
1618}
1619
1620static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
1621{
1622 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1623
1624 ctx->curve_id = ECC_CURVE_NIST_P256;
1625
1626 return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
1627}
1628
1629static int hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm)
1630{
1631 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1632
1633 ctx->curve_id = ECC_CURVE_NIST_P384;
1634
1635 return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
1636}
1637
1638static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm)
1639{
1640 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1641
1642 hpre_ecc_clear_ctx(ctx, true, true);
1643}
1644
1645static void hpre_curve25519_fill_curve(struct hpre_ctx *ctx, const void *buf,
1646 unsigned int len)
1647{
1648 u8 secret[CURVE25519_KEY_SIZE] = { 0 };
1649 unsigned int sz = ctx->key_sz;
1650 const struct ecc_curve *curve;
1651 unsigned int shift = sz << 1;
1652 void *p;
1653
1654
1655
1656
1657
1658
1659
1660 memcpy(secret, buf, len);
1661 curve25519_clamp_secret(secret);
1662 hpre_key_to_big_end(secret, CURVE25519_KEY_SIZE);
1663
1664 p = ctx->curve25519.p + sz - len;
1665
1666 curve = ecc_get_curve25519();
1667
1668
1669 fill_curve_param(p, curve->p, len, curve->g.ndigits);
1670 fill_curve_param(p + sz, curve->a, len, curve->g.ndigits);
1671 memcpy(p + shift, secret, len);
1672 fill_curve_param(p + shift + sz, curve->g.x, len, curve->g.ndigits);
1673 memzero_explicit(secret, CURVE25519_KEY_SIZE);
1674}
1675
1676static int hpre_curve25519_set_param(struct hpre_ctx *ctx, const void *buf,
1677 unsigned int len)
1678{
1679 struct device *dev = ctx->dev;
1680 unsigned int sz = ctx->key_sz;
1681 unsigned int shift = sz << 1;
1682
1683
1684 if (!ctx->curve25519.p) {
1685 ctx->curve25519.p = dma_alloc_coherent(dev, sz << 2,
1686 &ctx->curve25519.dma_p,
1687 GFP_KERNEL);
1688 if (!ctx->curve25519.p)
1689 return -ENOMEM;
1690 }
1691
1692 ctx->curve25519.g = ctx->curve25519.p + shift + sz;
1693 ctx->curve25519.dma_g = ctx->curve25519.dma_p + shift + sz;
1694
1695 hpre_curve25519_fill_curve(ctx, buf, len);
1696
1697 return 0;
1698}
1699
1700static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
1701 unsigned int len)
1702{
1703 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1704 struct device *dev = ctx->dev;
1705 int ret = -EINVAL;
1706
1707 if (len != CURVE25519_KEY_SIZE ||
1708 !crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) {
1709 dev_err(dev, "key is null or key len is not 32bytes!\n");
1710 return ret;
1711 }
1712
1713
1714 hpre_ecc_clear_ctx(ctx, false, false);
1715
1716 ctx->key_sz = CURVE25519_KEY_SIZE;
1717 ret = hpre_curve25519_set_param(ctx, buf, CURVE25519_KEY_SIZE);
1718 if (ret) {
1719 dev_err(dev, "failed to set curve25519 param, ret = %d!\n", ret);
1720 hpre_ecc_clear_ctx(ctx, false, false);
1721 return ret;
1722 }
1723
1724 return 0;
1725}
1726
1727static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx,
1728 struct hpre_asym_request *req,
1729 struct scatterlist *dst,
1730 struct scatterlist *src)
1731{
1732 struct device *dev = ctx->dev;
1733 struct hpre_sqe *sqe = &req->req;
1734 dma_addr_t dma;
1735
1736 dma = le64_to_cpu(sqe->in);
1737 if (unlikely(dma_mapping_error(dev, dma)))
1738 return;
1739
1740 if (src && req->src)
1741 dma_free_coherent(dev, ctx->key_sz, req->src, dma);
1742
1743 dma = le64_to_cpu(sqe->out);
1744 if (unlikely(dma_mapping_error(dev, dma)))
1745 return;
1746
1747 if (req->dst)
1748 dma_free_coherent(dev, ctx->key_sz, req->dst, dma);
1749 if (dst)
1750 dma_unmap_single(dev, dma, ctx->key_sz, DMA_FROM_DEVICE);
1751}
1752
1753static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp)
1754{
1755 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
1756 struct hpre_asym_request *req = NULL;
1757 struct kpp_request *areq;
1758 u64 overtime_thrhld;
1759 int ret;
1760
1761 ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
1762 areq = req->areq.curve25519;
1763 areq->dst_len = ctx->key_sz;
1764
1765 overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
1766 if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
1767 atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
1768
1769 hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE);
1770
1771 hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src);
1772 kpp_request_complete(areq, ret);
1773
1774 atomic64_inc(&dfx[HPRE_RECV_CNT].value);
1775}
1776
1777static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx,
1778 struct kpp_request *req)
1779{
1780 struct hpre_asym_request *h_req;
1781 struct hpre_sqe *msg;
1782 int req_id;
1783 void *tmp;
1784
1785 if (unlikely(req->dst_len < ctx->key_sz)) {
1786 req->dst_len = ctx->key_sz;
1787 return -EINVAL;
1788 }
1789
1790 tmp = kpp_request_ctx(req);
1791 h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
1792 h_req->cb = hpre_curve25519_cb;
1793 h_req->areq.curve25519 = req;
1794 msg = &h_req->req;
1795 memset(msg, 0, sizeof(*msg));
1796 msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
1797 msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
1798 msg->key = cpu_to_le64(ctx->curve25519.dma_p);
1799
1800 msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
1801 msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
1802 h_req->ctx = ctx;
1803
1804 req_id = hpre_add_req_to_ctx(h_req);
1805 if (req_id < 0)
1806 return -EBUSY;
1807
1808 msg->tag = cpu_to_le16((u16)req_id);
1809 return 0;
1810}
1811
1812static void hpre_curve25519_src_modulo_p(u8 *ptr)
1813{
1814 int i;
1815
1816 for (i = 0; i < CURVE25519_KEY_SIZE - 1; i++)
1817 ptr[i] = 0;
1818
1819
1820 ptr[i] -= 0xed;
1821}
1822
1823static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req,
1824 struct scatterlist *data, unsigned int len)
1825{
1826 struct hpre_sqe *msg = &hpre_req->req;
1827 struct hpre_ctx *ctx = hpre_req->ctx;
1828 struct device *dev = ctx->dev;
1829 u8 p[CURVE25519_KEY_SIZE] = { 0 };
1830 const struct ecc_curve *curve;
1831 dma_addr_t dma = 0;
1832 u8 *ptr;
1833
1834 if (len != CURVE25519_KEY_SIZE) {
1835 dev_err(dev, "sourc_data len is not 32bytes, len = %u!\n", len);
1836 return -EINVAL;
1837 }
1838
1839 ptr = dma_alloc_coherent(dev, ctx->key_sz, &dma, GFP_KERNEL);
1840 if (unlikely(!ptr))
1841 return -ENOMEM;
1842
1843 scatterwalk_map_and_copy(ptr, data, 0, len, 0);
1844
1845 if (!crypto_memneq(ptr, curve25519_null_point, CURVE25519_KEY_SIZE)) {
1846 dev_err(dev, "gx is null!\n");
1847 goto err;
1848 }
1849
1850
1851
1852
1853
1854
1855 ptr[31] &= 0x7f;
1856 hpre_key_to_big_end(ptr, CURVE25519_KEY_SIZE);
1857
1858 curve = ecc_get_curve25519();
1859
1860 fill_curve_param(p, curve->p, CURVE25519_KEY_SIZE, curve->g.ndigits);
1861
1862
1863
1864
1865
1866 if (memcmp(ptr, p, ctx->key_sz) == 0) {
1867 dev_err(dev, "gx is p!\n");
1868 return -EINVAL;
1869 } else if (memcmp(ptr, p, ctx->key_sz) > 0) {
1870 hpre_curve25519_src_modulo_p(ptr);
1871 }
1872
1873 hpre_req->src = ptr;
1874 msg->in = cpu_to_le64(dma);
1875 return 0;
1876
1877err:
1878 dma_free_coherent(dev, ctx->key_sz, ptr, dma);
1879 return -EINVAL;
1880}
1881
1882static int hpre_curve25519_dst_init(struct hpre_asym_request *hpre_req,
1883 struct scatterlist *data, unsigned int len)
1884{
1885 struct hpre_sqe *msg = &hpre_req->req;
1886 struct hpre_ctx *ctx = hpre_req->ctx;
1887 struct device *dev = ctx->dev;
1888 dma_addr_t dma;
1889
1890 if (!data || !sg_is_last(data) || len != ctx->key_sz) {
1891 dev_err(dev, "data or data length is illegal!\n");
1892 return -EINVAL;
1893 }
1894
1895 hpre_req->dst = NULL;
1896 dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
1897 if (unlikely(dma_mapping_error(dev, dma))) {
1898 dev_err(dev, "dma map data err!\n");
1899 return -ENOMEM;
1900 }
1901
1902 msg->out = cpu_to_le64(dma);
1903 return 0;
1904}
1905
1906static int hpre_curve25519_compute_value(struct kpp_request *req)
1907{
1908 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
1909 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1910 struct device *dev = ctx->dev;
1911 void *tmp = kpp_request_ctx(req);
1912 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
1913 struct hpre_sqe *msg = &hpre_req->req;
1914 int ret;
1915
1916 ret = hpre_curve25519_msg_request_set(ctx, req);
1917 if (unlikely(ret)) {
1918 dev_err(dev, "failed to set curve25519 request, ret = %d!\n", ret);
1919 return ret;
1920 }
1921
1922 if (req->src) {
1923 ret = hpre_curve25519_src_init(hpre_req, req->src, req->src_len);
1924 if (unlikely(ret)) {
1925 dev_err(dev, "failed to init src data, ret = %d!\n",
1926 ret);
1927 goto clear_all;
1928 }
1929 } else {
1930 msg->in = cpu_to_le64(ctx->curve25519.dma_g);
1931 }
1932
1933 ret = hpre_curve25519_dst_init(hpre_req, req->dst, req->dst_len);
1934 if (unlikely(ret)) {
1935 dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
1936 goto clear_all;
1937 }
1938
1939 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_CURVE25519_MUL);
1940 ret = hpre_send(ctx, msg);
1941 if (likely(!ret))
1942 return -EINPROGRESS;
1943
1944clear_all:
1945 hpre_rm_req_from_ctx(hpre_req);
1946 hpre_curve25519_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
1947 return ret;
1948}
1949
1950static unsigned int hpre_curve25519_max_size(struct crypto_kpp *tfm)
1951{
1952 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1953
1954 return ctx->key_sz;
1955}
1956
1957static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm)
1958{
1959 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1960
1961 return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
1962}
1963
1964static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm)
1965{
1966 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1967
1968 hpre_ecc_clear_ctx(ctx, true, false);
1969}
1970
1971static struct akcipher_alg rsa = {
1972 .sign = hpre_rsa_dec,
1973 .verify = hpre_rsa_enc,
1974 .encrypt = hpre_rsa_enc,
1975 .decrypt = hpre_rsa_dec,
1976 .set_pub_key = hpre_rsa_setpubkey,
1977 .set_priv_key = hpre_rsa_setprivkey,
1978 .max_size = hpre_rsa_max_size,
1979 .init = hpre_rsa_init_tfm,
1980 .exit = hpre_rsa_exit_tfm,
1981 .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
1982 .base = {
1983 .cra_ctxsize = sizeof(struct hpre_ctx),
1984 .cra_priority = HPRE_CRYPTO_ALG_PRI,
1985 .cra_name = "rsa",
1986 .cra_driver_name = "hpre-rsa",
1987 .cra_module = THIS_MODULE,
1988 },
1989};
1990
1991static struct kpp_alg dh = {
1992 .set_secret = hpre_dh_set_secret,
1993 .generate_public_key = hpre_dh_compute_value,
1994 .compute_shared_secret = hpre_dh_compute_value,
1995 .max_size = hpre_dh_max_size,
1996 .init = hpre_dh_init_tfm,
1997 .exit = hpre_dh_exit_tfm,
1998 .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
1999 .base = {
2000 .cra_ctxsize = sizeof(struct hpre_ctx),
2001 .cra_priority = HPRE_CRYPTO_ALG_PRI,
2002 .cra_name = "dh",
2003 .cra_driver_name = "hpre-dh",
2004 .cra_module = THIS_MODULE,
2005 },
2006};
2007
2008static struct kpp_alg ecdh_nist_p192 = {
2009 .set_secret = hpre_ecdh_set_secret,
2010 .generate_public_key = hpre_ecdh_compute_value,
2011 .compute_shared_secret = hpre_ecdh_compute_value,
2012 .max_size = hpre_ecdh_max_size,
2013 .init = hpre_ecdh_nist_p192_init_tfm,
2014 .exit = hpre_ecdh_exit_tfm,
2015 .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
2016 .base = {
2017 .cra_ctxsize = sizeof(struct hpre_ctx),
2018 .cra_priority = HPRE_CRYPTO_ALG_PRI,
2019 .cra_name = "ecdh-nist-p192",
2020 .cra_driver_name = "hpre-ecdh-nist-p192",
2021 .cra_module = THIS_MODULE,
2022 },
2023};
2024
2025static struct kpp_alg ecdh_nist_p256 = {
2026 .set_secret = hpre_ecdh_set_secret,
2027 .generate_public_key = hpre_ecdh_compute_value,
2028 .compute_shared_secret = hpre_ecdh_compute_value,
2029 .max_size = hpre_ecdh_max_size,
2030 .init = hpre_ecdh_nist_p256_init_tfm,
2031 .exit = hpre_ecdh_exit_tfm,
2032 .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
2033 .base = {
2034 .cra_ctxsize = sizeof(struct hpre_ctx),
2035 .cra_priority = HPRE_CRYPTO_ALG_PRI,
2036 .cra_name = "ecdh-nist-p256",
2037 .cra_driver_name = "hpre-ecdh-nist-p256",
2038 .cra_module = THIS_MODULE,
2039 },
2040};
2041
2042static struct kpp_alg ecdh_nist_p384 = {
2043 .set_secret = hpre_ecdh_set_secret,
2044 .generate_public_key = hpre_ecdh_compute_value,
2045 .compute_shared_secret = hpre_ecdh_compute_value,
2046 .max_size = hpre_ecdh_max_size,
2047 .init = hpre_ecdh_nist_p384_init_tfm,
2048 .exit = hpre_ecdh_exit_tfm,
2049 .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
2050 .base = {
2051 .cra_ctxsize = sizeof(struct hpre_ctx),
2052 .cra_priority = HPRE_CRYPTO_ALG_PRI,
2053 .cra_name = "ecdh-nist-p384",
2054 .cra_driver_name = "hpre-ecdh-nist-p384",
2055 .cra_module = THIS_MODULE,
2056 },
2057};
2058
2059static struct kpp_alg curve25519_alg = {
2060 .set_secret = hpre_curve25519_set_secret,
2061 .generate_public_key = hpre_curve25519_compute_value,
2062 .compute_shared_secret = hpre_curve25519_compute_value,
2063 .max_size = hpre_curve25519_max_size,
2064 .init = hpre_curve25519_init_tfm,
2065 .exit = hpre_curve25519_exit_tfm,
2066 .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
2067 .base = {
2068 .cra_ctxsize = sizeof(struct hpre_ctx),
2069 .cra_priority = HPRE_CRYPTO_ALG_PRI,
2070 .cra_name = "curve25519",
2071 .cra_driver_name = "hpre-curve25519",
2072 .cra_module = THIS_MODULE,
2073 },
2074};
2075
2076
2077static int hpre_register_ecdh(void)
2078{
2079 int ret;
2080
2081 ret = crypto_register_kpp(&ecdh_nist_p192);
2082 if (ret)
2083 return ret;
2084
2085 ret = crypto_register_kpp(&ecdh_nist_p256);
2086 if (ret)
2087 goto unregister_ecdh_p192;
2088
2089 ret = crypto_register_kpp(&ecdh_nist_p384);
2090 if (ret)
2091 goto unregister_ecdh_p256;
2092
2093 return 0;
2094
2095unregister_ecdh_p256:
2096 crypto_unregister_kpp(&ecdh_nist_p256);
2097unregister_ecdh_p192:
2098 crypto_unregister_kpp(&ecdh_nist_p192);
2099 return ret;
2100}
2101
2102static void hpre_unregister_ecdh(void)
2103{
2104 crypto_unregister_kpp(&ecdh_nist_p384);
2105 crypto_unregister_kpp(&ecdh_nist_p256);
2106 crypto_unregister_kpp(&ecdh_nist_p192);
2107}
2108
2109int hpre_algs_register(struct hisi_qm *qm)
2110{
2111 int ret;
2112
2113 rsa.base.cra_flags = 0;
2114 ret = crypto_register_akcipher(&rsa);
2115 if (ret)
2116 return ret;
2117
2118 ret = crypto_register_kpp(&dh);
2119 if (ret)
2120 goto unreg_rsa;
2121
2122 if (qm->ver >= QM_HW_V3) {
2123 ret = hpre_register_ecdh();
2124 if (ret)
2125 goto unreg_dh;
2126 ret = crypto_register_kpp(&curve25519_alg);
2127 if (ret)
2128 goto unreg_ecdh;
2129 }
2130 return 0;
2131
2132unreg_ecdh:
2133 hpre_unregister_ecdh();
2134unreg_dh:
2135 crypto_unregister_kpp(&dh);
2136unreg_rsa:
2137 crypto_unregister_akcipher(&rsa);
2138 return ret;
2139}
2140
2141void hpre_algs_unregister(struct hisi_qm *qm)
2142{
2143 if (qm->ver >= QM_HW_V3) {
2144 crypto_unregister_kpp(&curve25519_alg);
2145 hpre_unregister_ecdh();
2146 }
2147
2148 crypto_unregister_kpp(&dh);
2149 crypto_unregister_akcipher(&rsa);
2150}
2151