1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48#include <linux/module.h>
49#include <crypto/internal/rsa.h>
50#include <crypto/internal/akcipher.h>
51#include <crypto/akcipher.h>
52#include <crypto/kpp.h>
53#include <crypto/internal/kpp.h>
54#include <crypto/dh.h>
55#include <linux/dma-mapping.h>
56#include <linux/fips.h>
57#include <crypto/scatterwalk.h>
58#include "icp_qat_fw_pke.h"
59#include "adf_accel_devices.h"
60#include "adf_transport.h"
61#include "adf_common_drv.h"
62#include "qat_crypto.h"
63
64static DEFINE_MUTEX(algs_lock);
65static unsigned int active_devs;
66
67struct qat_rsa_input_params {
68 union {
69 struct {
70 dma_addr_t m;
71 dma_addr_t e;
72 dma_addr_t n;
73 } enc;
74 struct {
75 dma_addr_t c;
76 dma_addr_t d;
77 dma_addr_t n;
78 } dec;
79 struct {
80 dma_addr_t c;
81 dma_addr_t p;
82 dma_addr_t q;
83 dma_addr_t dp;
84 dma_addr_t dq;
85 dma_addr_t qinv;
86 } dec_crt;
87 u64 in_tab[8];
88 };
89} __packed __aligned(64);
90
91struct qat_rsa_output_params {
92 union {
93 struct {
94 dma_addr_t c;
95 } enc;
96 struct {
97 dma_addr_t m;
98 } dec;
99 u64 out_tab[8];
100 };
101} __packed __aligned(64);
102
103struct qat_rsa_ctx {
104 char *n;
105 char *e;
106 char *d;
107 char *p;
108 char *q;
109 char *dp;
110 char *dq;
111 char *qinv;
112 dma_addr_t dma_n;
113 dma_addr_t dma_e;
114 dma_addr_t dma_d;
115 dma_addr_t dma_p;
116 dma_addr_t dma_q;
117 dma_addr_t dma_dp;
118 dma_addr_t dma_dq;
119 dma_addr_t dma_qinv;
120 unsigned int key_sz;
121 bool crt_mode;
122 struct qat_crypto_instance *inst;
123} __packed __aligned(64);
124
125struct qat_dh_input_params {
126 union {
127 struct {
128 dma_addr_t b;
129 dma_addr_t xa;
130 dma_addr_t p;
131 } in;
132 struct {
133 dma_addr_t xa;
134 dma_addr_t p;
135 } in_g2;
136 u64 in_tab[8];
137 };
138} __packed __aligned(64);
139
140struct qat_dh_output_params {
141 union {
142 dma_addr_t r;
143 u64 out_tab[8];
144 };
145} __packed __aligned(64);
146
147struct qat_dh_ctx {
148 char *g;
149 char *xa;
150 char *p;
151 dma_addr_t dma_g;
152 dma_addr_t dma_xa;
153 dma_addr_t dma_p;
154 unsigned int p_size;
155 bool g2;
156 struct qat_crypto_instance *inst;
157} __packed __aligned(64);
158
159struct qat_asym_request {
160 union {
161 struct qat_rsa_input_params rsa;
162 struct qat_dh_input_params dh;
163 } in;
164 union {
165 struct qat_rsa_output_params rsa;
166 struct qat_dh_output_params dh;
167 } out;
168 dma_addr_t phy_in;
169 dma_addr_t phy_out;
170 char *src_align;
171 char *dst_align;
172 struct icp_qat_fw_pke_request req;
173 union {
174 struct qat_rsa_ctx *rsa;
175 struct qat_dh_ctx *dh;
176 } ctx;
177 union {
178 struct akcipher_request *rsa;
179 struct kpp_request *dh;
180 } areq;
181 int err;
182 void (*cb)(struct icp_qat_fw_pke_resp *resp);
183} __aligned(64);
184
185static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
186{
187 struct qat_asym_request *req = (void *)(__force long)resp->opaque;
188 struct kpp_request *areq = req->areq.dh;
189 struct device *dev = &GET_DEV(req->ctx.dh->inst->accel_dev);
190 int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
191 resp->pke_resp_hdr.comn_resp_flags);
192
193 err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
194
195 if (areq->src) {
196 if (req->src_align)
197 dma_free_coherent(dev, req->ctx.dh->p_size,
198 req->src_align, req->in.dh.in.b);
199 else
200 dma_unmap_single(dev, req->in.dh.in.b,
201 req->ctx.dh->p_size, DMA_TO_DEVICE);
202 }
203
204 areq->dst_len = req->ctx.dh->p_size;
205 if (req->dst_align) {
206 scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
207 areq->dst_len, 1);
208
209 dma_free_coherent(dev, req->ctx.dh->p_size, req->dst_align,
210 req->out.dh.r);
211 } else {
212 dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
213 DMA_FROM_DEVICE);
214 }
215
216 dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
217 DMA_TO_DEVICE);
218 dma_unmap_single(dev, req->phy_out,
219 sizeof(struct qat_dh_output_params),
220 DMA_TO_DEVICE);
221
222 kpp_request_complete(areq, err);
223}
224
225#define PKE_DH_1536 0x390c1a49
226#define PKE_DH_G2_1536 0x2e0b1a3e
227#define PKE_DH_2048 0x4d0c1a60
228#define PKE_DH_G2_2048 0x3e0b1a55
229#define PKE_DH_3072 0x510c1a77
230#define PKE_DH_G2_3072 0x3a0b1a6c
231#define PKE_DH_4096 0x690c1a8e
232#define PKE_DH_G2_4096 0x4a0b1a83
233
234static unsigned long qat_dh_fn_id(unsigned int len, bool g2)
235{
236 unsigned int bitslen = len << 3;
237
238 switch (bitslen) {
239 case 1536:
240 return g2 ? PKE_DH_G2_1536 : PKE_DH_1536;
241 case 2048:
242 return g2 ? PKE_DH_G2_2048 : PKE_DH_2048;
243 case 3072:
244 return g2 ? PKE_DH_G2_3072 : PKE_DH_3072;
245 case 4096:
246 return g2 ? PKE_DH_G2_4096 : PKE_DH_4096;
247 default:
248 return 0;
249 };
250}
251
252static inline struct qat_dh_ctx *qat_dh_get_params(struct crypto_kpp *tfm)
253{
254 return kpp_tfm_ctx(tfm);
255}
256
257static int qat_dh_compute_value(struct kpp_request *req)
258{
259 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
260 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
261 struct qat_crypto_instance *inst = ctx->inst;
262 struct device *dev = &GET_DEV(inst->accel_dev);
263 struct qat_asym_request *qat_req =
264 PTR_ALIGN(kpp_request_ctx(req), 64);
265 struct icp_qat_fw_pke_request *msg = &qat_req->req;
266 int ret, ctr = 0;
267 int n_input_params = 0;
268
269 if (unlikely(!ctx->xa))
270 return -EINVAL;
271
272 if (req->dst_len < ctx->p_size) {
273 req->dst_len = ctx->p_size;
274 return -EOVERFLOW;
275 }
276 memset(msg, '\0', sizeof(*msg));
277 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
278 ICP_QAT_FW_COMN_REQ_FLAG_SET);
279
280 msg->pke_hdr.cd_pars.func_id = qat_dh_fn_id(ctx->p_size,
281 !req->src && ctx->g2);
282 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
283 return -EINVAL;
284
285 qat_req->cb = qat_dh_cb;
286 qat_req->ctx.dh = ctx;
287 qat_req->areq.dh = req;
288 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
289 msg->pke_hdr.comn_req_flags =
290 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
291 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
292
293
294
295
296 if (req->src) {
297 qat_req->in.dh.in.xa = ctx->dma_xa;
298 qat_req->in.dh.in.p = ctx->dma_p;
299 n_input_params = 3;
300 } else {
301 if (ctx->g2) {
302 qat_req->in.dh.in_g2.xa = ctx->dma_xa;
303 qat_req->in.dh.in_g2.p = ctx->dma_p;
304 n_input_params = 2;
305 } else {
306 qat_req->in.dh.in.b = ctx->dma_g;
307 qat_req->in.dh.in.xa = ctx->dma_xa;
308 qat_req->in.dh.in.p = ctx->dma_p;
309 n_input_params = 3;
310 }
311 }
312
313 ret = -ENOMEM;
314 if (req->src) {
315
316
317
318
319
320
321
322 if (sg_is_last(req->src) && req->src_len == ctx->p_size) {
323 qat_req->src_align = NULL;
324 qat_req->in.dh.in.b = dma_map_single(dev,
325 sg_virt(req->src),
326 req->src_len,
327 DMA_TO_DEVICE);
328 if (unlikely(dma_mapping_error(dev,
329 qat_req->in.dh.in.b)))
330 return ret;
331
332 } else {
333 int shift = ctx->p_size - req->src_len;
334
335 qat_req->src_align = dma_zalloc_coherent(dev,
336 ctx->p_size,
337 &qat_req->in.dh.in.b,
338 GFP_KERNEL);
339 if (unlikely(!qat_req->src_align))
340 return ret;
341
342 scatterwalk_map_and_copy(qat_req->src_align + shift,
343 req->src, 0, req->src_len, 0);
344 }
345 }
346
347
348
349
350
351
352
353 if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) {
354 qat_req->dst_align = NULL;
355 qat_req->out.dh.r = dma_map_single(dev, sg_virt(req->dst),
356 req->dst_len,
357 DMA_FROM_DEVICE);
358
359 if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
360 goto unmap_src;
361
362 } else {
363 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->p_size,
364 &qat_req->out.dh.r,
365 GFP_KERNEL);
366 if (unlikely(!qat_req->dst_align))
367 goto unmap_src;
368 }
369
370 qat_req->in.dh.in_tab[n_input_params] = 0;
371 qat_req->out.dh.out_tab[1] = 0;
372
373 qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh.in.b,
374 sizeof(struct qat_dh_input_params),
375 DMA_TO_DEVICE);
376 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
377 goto unmap_dst;
378
379 qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh.r,
380 sizeof(struct qat_dh_output_params),
381 DMA_TO_DEVICE);
382 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
383 goto unmap_in_params;
384
385 msg->pke_mid.src_data_addr = qat_req->phy_in;
386 msg->pke_mid.dest_data_addr = qat_req->phy_out;
387 msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
388 msg->input_param_count = n_input_params;
389 msg->output_param_count = 1;
390
391 do {
392 ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
393 } while (ret == -EBUSY && ctr++ < 100);
394
395 if (!ret)
396 return -EINPROGRESS;
397
398 if (!dma_mapping_error(dev, qat_req->phy_out))
399 dma_unmap_single(dev, qat_req->phy_out,
400 sizeof(struct qat_dh_output_params),
401 DMA_TO_DEVICE);
402unmap_in_params:
403 if (!dma_mapping_error(dev, qat_req->phy_in))
404 dma_unmap_single(dev, qat_req->phy_in,
405 sizeof(struct qat_dh_input_params),
406 DMA_TO_DEVICE);
407unmap_dst:
408 if (qat_req->dst_align)
409 dma_free_coherent(dev, ctx->p_size, qat_req->dst_align,
410 qat_req->out.dh.r);
411 else
412 if (!dma_mapping_error(dev, qat_req->out.dh.r))
413 dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
414 DMA_FROM_DEVICE);
415unmap_src:
416 if (req->src) {
417 if (qat_req->src_align)
418 dma_free_coherent(dev, ctx->p_size, qat_req->src_align,
419 qat_req->in.dh.in.b);
420 else
421 if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
422 dma_unmap_single(dev, qat_req->in.dh.in.b,
423 ctx->p_size,
424 DMA_TO_DEVICE);
425 }
426 return ret;
427}
428
429static int qat_dh_check_params_length(unsigned int p_len)
430{
431 switch (p_len) {
432 case 1536:
433 case 2048:
434 case 3072:
435 case 4096:
436 return 0;
437 }
438 return -EINVAL;
439}
440
441static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
442{
443 struct qat_crypto_instance *inst = ctx->inst;
444 struct device *dev = &GET_DEV(inst->accel_dev);
445
446 if (qat_dh_check_params_length(params->p_size << 3))
447 return -EINVAL;
448
449 ctx->p_size = params->p_size;
450 ctx->p = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
451 if (!ctx->p)
452 return -ENOMEM;
453 memcpy(ctx->p, params->p, ctx->p_size);
454
455
456 if (params->g_size == 1 && *(char *)params->g == 0x02) {
457 ctx->g2 = true;
458 return 0;
459 }
460
461 ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
462 if (!ctx->g)
463 return -ENOMEM;
464 memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
465 params->g_size);
466
467 return 0;
468}
469
470static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx)
471{
472 if (ctx->g) {
473 dma_free_coherent(dev, ctx->p_size, ctx->g, ctx->dma_g);
474 ctx->g = NULL;
475 }
476 if (ctx->xa) {
477 dma_free_coherent(dev, ctx->p_size, ctx->xa, ctx->dma_xa);
478 ctx->xa = NULL;
479 }
480 if (ctx->p) {
481 dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
482 ctx->p = NULL;
483 }
484 ctx->p_size = 0;
485 ctx->g2 = false;
486}
487
488static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
489 unsigned int len)
490{
491 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
492 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
493 struct dh params;
494 int ret;
495
496 if (crypto_dh_decode_key(buf, len, ¶ms) < 0)
497 return -EINVAL;
498
499
500 qat_dh_clear_ctx(dev, ctx);
501
502 ret = qat_dh_set_params(ctx, ¶ms);
503 if (ret < 0)
504 goto err_clear_ctx;
505
506 ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
507 GFP_KERNEL);
508 if (!ctx->xa) {
509 ret = -ENOMEM;
510 goto err_clear_ctx;
511 }
512 memcpy(ctx->xa + (ctx->p_size - params.key_size), params.key,
513 params.key_size);
514
515 return 0;
516
517err_clear_ctx:
518 qat_dh_clear_ctx(dev, ctx);
519 return ret;
520}
521
522static int qat_dh_max_size(struct crypto_kpp *tfm)
523{
524 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
525
526 return ctx->p_size;
527}
528
529static int qat_dh_init_tfm(struct crypto_kpp *tfm)
530{
531 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
532 struct qat_crypto_instance *inst =
533 qat_crypto_get_instance_node(get_current_node());
534
535 if (!inst)
536 return -EINVAL;
537
538 ctx->p_size = 0;
539 ctx->g2 = false;
540 ctx->inst = inst;
541 return 0;
542}
543
544static void qat_dh_exit_tfm(struct crypto_kpp *tfm)
545{
546 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
547 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
548
549 qat_dh_clear_ctx(dev, ctx);
550 qat_crypto_put_instance(ctx->inst);
551}
552
553static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
554{
555 struct qat_asym_request *req = (void *)(__force long)resp->opaque;
556 struct akcipher_request *areq = req->areq.rsa;
557 struct device *dev = &GET_DEV(req->ctx.rsa->inst->accel_dev);
558 int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
559 resp->pke_resp_hdr.comn_resp_flags);
560
561 err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
562
563 if (req->src_align)
564 dma_free_coherent(dev, req->ctx.rsa->key_sz, req->src_align,
565 req->in.rsa.enc.m);
566 else
567 dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
568 DMA_TO_DEVICE);
569
570 areq->dst_len = req->ctx.rsa->key_sz;
571 if (req->dst_align) {
572 scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
573 areq->dst_len, 1);
574
575 dma_free_coherent(dev, req->ctx.rsa->key_sz, req->dst_align,
576 req->out.rsa.enc.c);
577 } else {
578 dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
579 DMA_FROM_DEVICE);
580 }
581
582 dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
583 DMA_TO_DEVICE);
584 dma_unmap_single(dev, req->phy_out,
585 sizeof(struct qat_rsa_output_params),
586 DMA_TO_DEVICE);
587
588 akcipher_request_complete(areq, err);
589}
590
591void qat_alg_asym_callback(void *_resp)
592{
593 struct icp_qat_fw_pke_resp *resp = _resp;
594 struct qat_asym_request *areq = (void *)(__force long)resp->opaque;
595
596 areq->cb(resp);
597}
598
599#define PKE_RSA_EP_512 0x1c161b21
600#define PKE_RSA_EP_1024 0x35111bf7
601#define PKE_RSA_EP_1536 0x4d111cdc
602#define PKE_RSA_EP_2048 0x6e111dba
603#define PKE_RSA_EP_3072 0x7d111ea3
604#define PKE_RSA_EP_4096 0xa5101f7e
605
606static unsigned long qat_rsa_enc_fn_id(unsigned int len)
607{
608 unsigned int bitslen = len << 3;
609
610 switch (bitslen) {
611 case 512:
612 return PKE_RSA_EP_512;
613 case 1024:
614 return PKE_RSA_EP_1024;
615 case 1536:
616 return PKE_RSA_EP_1536;
617 case 2048:
618 return PKE_RSA_EP_2048;
619 case 3072:
620 return PKE_RSA_EP_3072;
621 case 4096:
622 return PKE_RSA_EP_4096;
623 default:
624 return 0;
625 };
626}
627
628#define PKE_RSA_DP1_512 0x1c161b3c
629#define PKE_RSA_DP1_1024 0x35111c12
630#define PKE_RSA_DP1_1536 0x4d111cf7
631#define PKE_RSA_DP1_2048 0x6e111dda
632#define PKE_RSA_DP1_3072 0x7d111ebe
633#define PKE_RSA_DP1_4096 0xa5101f98
634
635static unsigned long qat_rsa_dec_fn_id(unsigned int len)
636{
637 unsigned int bitslen = len << 3;
638
639 switch (bitslen) {
640 case 512:
641 return PKE_RSA_DP1_512;
642 case 1024:
643 return PKE_RSA_DP1_1024;
644 case 1536:
645 return PKE_RSA_DP1_1536;
646 case 2048:
647 return PKE_RSA_DP1_2048;
648 case 3072:
649 return PKE_RSA_DP1_3072;
650 case 4096:
651 return PKE_RSA_DP1_4096;
652 default:
653 return 0;
654 };
655}
656
657#define PKE_RSA_DP2_512 0x1c131b57
658#define PKE_RSA_DP2_1024 0x26131c2d
659#define PKE_RSA_DP2_1536 0x45111d12
660#define PKE_RSA_DP2_2048 0x59121dfa
661#define PKE_RSA_DP2_3072 0x81121ed9
662#define PKE_RSA_DP2_4096 0xb1111fb2
663
664static unsigned long qat_rsa_dec_fn_id_crt(unsigned int len)
665{
666 unsigned int bitslen = len << 3;
667
668 switch (bitslen) {
669 case 512:
670 return PKE_RSA_DP2_512;
671 case 1024:
672 return PKE_RSA_DP2_1024;
673 case 1536:
674 return PKE_RSA_DP2_1536;
675 case 2048:
676 return PKE_RSA_DP2_2048;
677 case 3072:
678 return PKE_RSA_DP2_3072;
679 case 4096:
680 return PKE_RSA_DP2_4096;
681 default:
682 return 0;
683 };
684}
685
686static int qat_rsa_enc(struct akcipher_request *req)
687{
688 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
689 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
690 struct qat_crypto_instance *inst = ctx->inst;
691 struct device *dev = &GET_DEV(inst->accel_dev);
692 struct qat_asym_request *qat_req =
693 PTR_ALIGN(akcipher_request_ctx(req), 64);
694 struct icp_qat_fw_pke_request *msg = &qat_req->req;
695 int ret, ctr = 0;
696
697 if (unlikely(!ctx->n || !ctx->e))
698 return -EINVAL;
699
700 if (req->dst_len < ctx->key_sz) {
701 req->dst_len = ctx->key_sz;
702 return -EOVERFLOW;
703 }
704 memset(msg, '\0', sizeof(*msg));
705 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
706 ICP_QAT_FW_COMN_REQ_FLAG_SET);
707 msg->pke_hdr.cd_pars.func_id = qat_rsa_enc_fn_id(ctx->key_sz);
708 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
709 return -EINVAL;
710
711 qat_req->cb = qat_rsa_cb;
712 qat_req->ctx.rsa = ctx;
713 qat_req->areq.rsa = req;
714 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
715 msg->pke_hdr.comn_req_flags =
716 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
717 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
718
719 qat_req->in.rsa.enc.e = ctx->dma_e;
720 qat_req->in.rsa.enc.n = ctx->dma_n;
721 ret = -ENOMEM;
722
723
724
725
726
727
728
729
730 if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
731 qat_req->src_align = NULL;
732 qat_req->in.rsa.enc.m = dma_map_single(dev, sg_virt(req->src),
733 req->src_len, DMA_TO_DEVICE);
734 if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
735 return ret;
736
737 } else {
738 int shift = ctx->key_sz - req->src_len;
739
740 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
741 &qat_req->in.rsa.enc.m,
742 GFP_KERNEL);
743 if (unlikely(!qat_req->src_align))
744 return ret;
745
746 scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
747 0, req->src_len, 0);
748 }
749 if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
750 qat_req->dst_align = NULL;
751 qat_req->out.rsa.enc.c = dma_map_single(dev, sg_virt(req->dst),
752 req->dst_len,
753 DMA_FROM_DEVICE);
754
755 if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
756 goto unmap_src;
757
758 } else {
759 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
760 &qat_req->out.rsa.enc.c,
761 GFP_KERNEL);
762 if (unlikely(!qat_req->dst_align))
763 goto unmap_src;
764
765 }
766 qat_req->in.rsa.in_tab[3] = 0;
767 qat_req->out.rsa.out_tab[1] = 0;
768 qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m,
769 sizeof(struct qat_rsa_input_params),
770 DMA_TO_DEVICE);
771 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
772 goto unmap_dst;
773
774 qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.enc.c,
775 sizeof(struct qat_rsa_output_params),
776 DMA_TO_DEVICE);
777 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
778 goto unmap_in_params;
779
780 msg->pke_mid.src_data_addr = qat_req->phy_in;
781 msg->pke_mid.dest_data_addr = qat_req->phy_out;
782 msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
783 msg->input_param_count = 3;
784 msg->output_param_count = 1;
785 do {
786 ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
787 } while (ret == -EBUSY && ctr++ < 100);
788
789 if (!ret)
790 return -EINPROGRESS;
791
792 if (!dma_mapping_error(dev, qat_req->phy_out))
793 dma_unmap_single(dev, qat_req->phy_out,
794 sizeof(struct qat_rsa_output_params),
795 DMA_TO_DEVICE);
796unmap_in_params:
797 if (!dma_mapping_error(dev, qat_req->phy_in))
798 dma_unmap_single(dev, qat_req->phy_in,
799 sizeof(struct qat_rsa_input_params),
800 DMA_TO_DEVICE);
801unmap_dst:
802 if (qat_req->dst_align)
803 dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
804 qat_req->out.rsa.enc.c);
805 else
806 if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
807 dma_unmap_single(dev, qat_req->out.rsa.enc.c,
808 ctx->key_sz, DMA_FROM_DEVICE);
809unmap_src:
810 if (qat_req->src_align)
811 dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
812 qat_req->in.rsa.enc.m);
813 else
814 if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
815 dma_unmap_single(dev, qat_req->in.rsa.enc.m,
816 ctx->key_sz, DMA_TO_DEVICE);
817 return ret;
818}
819
820static int qat_rsa_dec(struct akcipher_request *req)
821{
822 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
823 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
824 struct qat_crypto_instance *inst = ctx->inst;
825 struct device *dev = &GET_DEV(inst->accel_dev);
826 struct qat_asym_request *qat_req =
827 PTR_ALIGN(akcipher_request_ctx(req), 64);
828 struct icp_qat_fw_pke_request *msg = &qat_req->req;
829 int ret, ctr = 0;
830
831 if (unlikely(!ctx->n || !ctx->d))
832 return -EINVAL;
833
834 if (req->dst_len < ctx->key_sz) {
835 req->dst_len = ctx->key_sz;
836 return -EOVERFLOW;
837 }
838 memset(msg, '\0', sizeof(*msg));
839 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
840 ICP_QAT_FW_COMN_REQ_FLAG_SET);
841 msg->pke_hdr.cd_pars.func_id = ctx->crt_mode ?
842 qat_rsa_dec_fn_id_crt(ctx->key_sz) :
843 qat_rsa_dec_fn_id(ctx->key_sz);
844 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
845 return -EINVAL;
846
847 qat_req->cb = qat_rsa_cb;
848 qat_req->ctx.rsa = ctx;
849 qat_req->areq.rsa = req;
850 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
851 msg->pke_hdr.comn_req_flags =
852 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
853 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
854
855 if (ctx->crt_mode) {
856 qat_req->in.rsa.dec_crt.p = ctx->dma_p;
857 qat_req->in.rsa.dec_crt.q = ctx->dma_q;
858 qat_req->in.rsa.dec_crt.dp = ctx->dma_dp;
859 qat_req->in.rsa.dec_crt.dq = ctx->dma_dq;
860 qat_req->in.rsa.dec_crt.qinv = ctx->dma_qinv;
861 } else {
862 qat_req->in.rsa.dec.d = ctx->dma_d;
863 qat_req->in.rsa.dec.n = ctx->dma_n;
864 }
865 ret = -ENOMEM;
866
867
868
869
870
871
872
873
874 if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
875 qat_req->src_align = NULL;
876 qat_req->in.rsa.dec.c = dma_map_single(dev, sg_virt(req->src),
877 req->dst_len, DMA_TO_DEVICE);
878 if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
879 return ret;
880
881 } else {
882 int shift = ctx->key_sz - req->src_len;
883
884 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
885 &qat_req->in.rsa.dec.c,
886 GFP_KERNEL);
887 if (unlikely(!qat_req->src_align))
888 return ret;
889
890 scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
891 0, req->src_len, 0);
892 }
893 if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
894 qat_req->dst_align = NULL;
895 qat_req->out.rsa.dec.m = dma_map_single(dev, sg_virt(req->dst),
896 req->dst_len,
897 DMA_FROM_DEVICE);
898
899 if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
900 goto unmap_src;
901
902 } else {
903 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
904 &qat_req->out.rsa.dec.m,
905 GFP_KERNEL);
906 if (unlikely(!qat_req->dst_align))
907 goto unmap_src;
908
909 }
910
911 if (ctx->crt_mode)
912 qat_req->in.rsa.in_tab[6] = 0;
913 else
914 qat_req->in.rsa.in_tab[3] = 0;
915 qat_req->out.rsa.out_tab[1] = 0;
916 qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.dec.c,
917 sizeof(struct qat_rsa_input_params),
918 DMA_TO_DEVICE);
919 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
920 goto unmap_dst;
921
922 qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.dec.m,
923 sizeof(struct qat_rsa_output_params),
924 DMA_TO_DEVICE);
925 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
926 goto unmap_in_params;
927
928 msg->pke_mid.src_data_addr = qat_req->phy_in;
929 msg->pke_mid.dest_data_addr = qat_req->phy_out;
930 msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
931 if (ctx->crt_mode)
932 msg->input_param_count = 6;
933 else
934 msg->input_param_count = 3;
935
936 msg->output_param_count = 1;
937 do {
938 ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
939 } while (ret == -EBUSY && ctr++ < 100);
940
941 if (!ret)
942 return -EINPROGRESS;
943
944 if (!dma_mapping_error(dev, qat_req->phy_out))
945 dma_unmap_single(dev, qat_req->phy_out,
946 sizeof(struct qat_rsa_output_params),
947 DMA_TO_DEVICE);
948unmap_in_params:
949 if (!dma_mapping_error(dev, qat_req->phy_in))
950 dma_unmap_single(dev, qat_req->phy_in,
951 sizeof(struct qat_rsa_input_params),
952 DMA_TO_DEVICE);
953unmap_dst:
954 if (qat_req->dst_align)
955 dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
956 qat_req->out.rsa.dec.m);
957 else
958 if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
959 dma_unmap_single(dev, qat_req->out.rsa.dec.m,
960 ctx->key_sz, DMA_FROM_DEVICE);
961unmap_src:
962 if (qat_req->src_align)
963 dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
964 qat_req->in.rsa.dec.c);
965 else
966 if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
967 dma_unmap_single(dev, qat_req->in.rsa.dec.c,
968 ctx->key_sz, DMA_TO_DEVICE);
969 return ret;
970}
971
972int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value, size_t vlen)
973{
974 struct qat_crypto_instance *inst = ctx->inst;
975 struct device *dev = &GET_DEV(inst->accel_dev);
976 const char *ptr = value;
977 int ret;
978
979 while (!*ptr && vlen) {
980 ptr++;
981 vlen--;
982 }
983
984 ctx->key_sz = vlen;
985 ret = -EINVAL;
986
987 if (!qat_rsa_enc_fn_id(ctx->key_sz))
988 goto err;
989
990 ret = -ENOMEM;
991 ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
992 if (!ctx->n)
993 goto err;
994
995 memcpy(ctx->n, ptr, ctx->key_sz);
996 return 0;
997err:
998 ctx->key_sz = 0;
999 ctx->n = NULL;
1000 return ret;
1001}
1002
1003int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value, size_t vlen)
1004{
1005 struct qat_crypto_instance *inst = ctx->inst;
1006 struct device *dev = &GET_DEV(inst->accel_dev);
1007 const char *ptr = value;
1008
1009 while (!*ptr && vlen) {
1010 ptr++;
1011 vlen--;
1012 }
1013
1014 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
1015 ctx->e = NULL;
1016 return -EINVAL;
1017 }
1018
1019 ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
1020 if (!ctx->e)
1021 return -ENOMEM;
1022
1023 memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen);
1024 return 0;
1025}
1026
1027int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value, size_t vlen)
1028{
1029 struct qat_crypto_instance *inst = ctx->inst;
1030 struct device *dev = &GET_DEV(inst->accel_dev);
1031 const char *ptr = value;
1032 int ret;
1033
1034 while (!*ptr && vlen) {
1035 ptr++;
1036 vlen--;
1037 }
1038
1039 ret = -EINVAL;
1040 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
1041 goto err;
1042
1043 ret = -ENOMEM;
1044 ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
1045 if (!ctx->d)
1046 goto err;
1047
1048 memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen);
1049 return 0;
1050err:
1051 ctx->d = NULL;
1052 return ret;
1053}
1054
1055static void qat_rsa_drop_leading_zeros(const char **ptr, unsigned int *len)
1056{
1057 while (!**ptr && *len) {
1058 (*ptr)++;
1059 (*len)--;
1060 }
1061}
1062
1063static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
1064{
1065 struct qat_crypto_instance *inst = ctx->inst;
1066 struct device *dev = &GET_DEV(inst->accel_dev);
1067 const char *ptr;
1068 unsigned int len;
1069 unsigned int half_key_sz = ctx->key_sz / 2;
1070
1071
1072 ptr = rsa_key->p;
1073 len = rsa_key->p_sz;
1074 qat_rsa_drop_leading_zeros(&ptr, &len);
1075 if (!len)
1076 goto err;
1077 ctx->p = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
1078 if (!ctx->p)
1079 goto err;
1080 memcpy(ctx->p + (half_key_sz - len), ptr, len);
1081
1082
1083 ptr = rsa_key->q;
1084 len = rsa_key->q_sz;
1085 qat_rsa_drop_leading_zeros(&ptr, &len);
1086 if (!len)
1087 goto free_p;
1088 ctx->q = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
1089 if (!ctx->q)
1090 goto free_p;
1091 memcpy(ctx->q + (half_key_sz - len), ptr, len);
1092
1093
1094 ptr = rsa_key->dp;
1095 len = rsa_key->dp_sz;
1096 qat_rsa_drop_leading_zeros(&ptr, &len);
1097 if (!len)
1098 goto free_q;
1099 ctx->dp = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dp,
1100 GFP_KERNEL);
1101 if (!ctx->dp)
1102 goto free_q;
1103 memcpy(ctx->dp + (half_key_sz - len), ptr, len);
1104
1105
1106 ptr = rsa_key->dq;
1107 len = rsa_key->dq_sz;
1108 qat_rsa_drop_leading_zeros(&ptr, &len);
1109 if (!len)
1110 goto free_dp;
1111 ctx->dq = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dq,
1112 GFP_KERNEL);
1113 if (!ctx->dq)
1114 goto free_dp;
1115 memcpy(ctx->dq + (half_key_sz - len), ptr, len);
1116
1117
1118 ptr = rsa_key->qinv;
1119 len = rsa_key->qinv_sz;
1120 qat_rsa_drop_leading_zeros(&ptr, &len);
1121 if (!len)
1122 goto free_dq;
1123 ctx->qinv = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_qinv,
1124 GFP_KERNEL);
1125 if (!ctx->qinv)
1126 goto free_dq;
1127 memcpy(ctx->qinv + (half_key_sz - len), ptr, len);
1128
1129 ctx->crt_mode = true;
1130 return;
1131
1132free_dq:
1133 memset(ctx->dq, '\0', half_key_sz);
1134 dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
1135 ctx->dq = NULL;
1136free_dp:
1137 memset(ctx->dp, '\0', half_key_sz);
1138 dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
1139 ctx->dp = NULL;
1140free_q:
1141 memset(ctx->q, '\0', half_key_sz);
1142 dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
1143 ctx->q = NULL;
1144free_p:
1145 memset(ctx->p, '\0', half_key_sz);
1146 dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
1147 ctx->p = NULL;
1148err:
1149 ctx->crt_mode = false;
1150}
1151
1152static void qat_rsa_clear_ctx(struct device *dev, struct qat_rsa_ctx *ctx)
1153{
1154 unsigned int half_key_sz = ctx->key_sz / 2;
1155
1156
1157 if (ctx->n)
1158 dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
1159 if (ctx->e)
1160 dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
1161 if (ctx->d) {
1162 memset(ctx->d, '\0', ctx->key_sz);
1163 dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
1164 }
1165 if (ctx->p) {
1166 memset(ctx->p, '\0', half_key_sz);
1167 dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
1168 }
1169 if (ctx->q) {
1170 memset(ctx->q, '\0', half_key_sz);
1171 dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
1172 }
1173 if (ctx->dp) {
1174 memset(ctx->dp, '\0', half_key_sz);
1175 dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
1176 }
1177 if (ctx->dq) {
1178 memset(ctx->dq, '\0', half_key_sz);
1179 dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
1180 }
1181 if (ctx->qinv) {
1182 memset(ctx->qinv, '\0', half_key_sz);
1183 dma_free_coherent(dev, half_key_sz, ctx->qinv, ctx->dma_qinv);
1184 }
1185
1186 ctx->n = NULL;
1187 ctx->e = NULL;
1188 ctx->d = NULL;
1189 ctx->p = NULL;
1190 ctx->q = NULL;
1191 ctx->dp = NULL;
1192 ctx->dq = NULL;
1193 ctx->qinv = NULL;
1194 ctx->crt_mode = false;
1195 ctx->key_sz = 0;
1196}
1197
1198static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
1199 unsigned int keylen, bool private)
1200{
1201 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1202 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1203 struct rsa_key rsa_key;
1204 int ret;
1205
1206 qat_rsa_clear_ctx(dev, ctx);
1207
1208 if (private)
1209 ret = rsa_parse_priv_key(&rsa_key, key, keylen);
1210 else
1211 ret = rsa_parse_pub_key(&rsa_key, key, keylen);
1212 if (ret < 0)
1213 goto free;
1214
1215 ret = qat_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz);
1216 if (ret < 0)
1217 goto free;
1218 ret = qat_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
1219 if (ret < 0)
1220 goto free;
1221 if (private) {
1222 ret = qat_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
1223 if (ret < 0)
1224 goto free;
1225 qat_rsa_setkey_crt(ctx, &rsa_key);
1226 }
1227
1228 if (!ctx->n || !ctx->e) {
1229
1230 ret = -EINVAL;
1231 goto free;
1232 }
1233 if (private && !ctx->d) {
1234
1235 ret = -EINVAL;
1236 goto free;
1237 }
1238
1239 return 0;
1240free:
1241 qat_rsa_clear_ctx(dev, ctx);
1242 return ret;
1243}
1244
1245static int qat_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
1246 unsigned int keylen)
1247{
1248 return qat_rsa_setkey(tfm, key, keylen, false);
1249}
1250
1251static int qat_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
1252 unsigned int keylen)
1253{
1254 return qat_rsa_setkey(tfm, key, keylen, true);
1255}
1256
1257static int qat_rsa_max_size(struct crypto_akcipher *tfm)
1258{
1259 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1260
1261 return ctx->key_sz;
1262}
1263
1264static int qat_rsa_init_tfm(struct crypto_akcipher *tfm)
1265{
1266 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1267 struct qat_crypto_instance *inst =
1268 qat_crypto_get_instance_node(get_current_node());
1269
1270 if (!inst)
1271 return -EINVAL;
1272
1273 ctx->key_sz = 0;
1274 ctx->inst = inst;
1275 return 0;
1276}
1277
1278static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
1279{
1280 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1281 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1282
1283 if (ctx->n)
1284 dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
1285 if (ctx->e)
1286 dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
1287 if (ctx->d) {
1288 memset(ctx->d, '\0', ctx->key_sz);
1289 dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
1290 }
1291 qat_crypto_put_instance(ctx->inst);
1292 ctx->n = NULL;
1293 ctx->e = NULL;
1294 ctx->d = NULL;
1295}
1296
1297static struct akcipher_alg rsa = {
1298 .encrypt = qat_rsa_enc,
1299 .decrypt = qat_rsa_dec,
1300 .sign = qat_rsa_dec,
1301 .verify = qat_rsa_enc,
1302 .set_pub_key = qat_rsa_setpubkey,
1303 .set_priv_key = qat_rsa_setprivkey,
1304 .max_size = qat_rsa_max_size,
1305 .init = qat_rsa_init_tfm,
1306 .exit = qat_rsa_exit_tfm,
1307 .reqsize = sizeof(struct qat_asym_request) + 64,
1308 .base = {
1309 .cra_name = "rsa",
1310 .cra_driver_name = "qat-rsa",
1311 .cra_priority = 1000,
1312 .cra_module = THIS_MODULE,
1313 .cra_ctxsize = sizeof(struct qat_rsa_ctx),
1314 },
1315};
1316
1317static struct kpp_alg dh = {
1318 .set_secret = qat_dh_set_secret,
1319 .generate_public_key = qat_dh_compute_value,
1320 .compute_shared_secret = qat_dh_compute_value,
1321 .max_size = qat_dh_max_size,
1322 .init = qat_dh_init_tfm,
1323 .exit = qat_dh_exit_tfm,
1324 .reqsize = sizeof(struct qat_asym_request) + 64,
1325 .base = {
1326 .cra_name = "dh",
1327 .cra_driver_name = "qat-dh",
1328 .cra_priority = 1000,
1329 .cra_module = THIS_MODULE,
1330 .cra_ctxsize = sizeof(struct qat_dh_ctx),
1331 },
1332};
1333
1334int qat_asym_algs_register(void)
1335{
1336 int ret = 0;
1337
1338 mutex_lock(&algs_lock);
1339 if (++active_devs == 1) {
1340 rsa.base.cra_flags = 0;
1341 ret = crypto_register_akcipher(&rsa);
1342 if (ret)
1343 goto unlock;
1344 ret = crypto_register_kpp(&dh);
1345 }
1346unlock:
1347 mutex_unlock(&algs_lock);
1348 return ret;
1349}
1350
1351void qat_asym_algs_unregister(void)
1352{
1353 mutex_lock(&algs_lock);
1354 if (--active_devs == 0) {
1355 crypto_unregister_akcipher(&rsa);
1356 crypto_unregister_kpp(&dh);
1357 }
1358 mutex_unlock(&algs_lock);
1359}
1360