1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48#include <linux/module.h>
49#include <crypto/internal/rsa.h>
50#include <crypto/internal/akcipher.h>
51#include <crypto/akcipher.h>
52#include <crypto/kpp.h>
53#include <crypto/internal/kpp.h>
54#include <crypto/dh.h>
55#include <linux/dma-mapping.h>
56#include <linux/fips.h>
57#include <crypto/scatterwalk.h>
58#include "icp_qat_fw_pke.h"
59#include "adf_accel_devices.h"
60#include "adf_transport.h"
61#include "adf_common_drv.h"
62#include "qat_crypto.h"
63
64static DEFINE_MUTEX(algs_lock);
65static unsigned int active_devs;
66
67struct qat_rsa_input_params {
68 union {
69 struct {
70 dma_addr_t m;
71 dma_addr_t e;
72 dma_addr_t n;
73 } enc;
74 struct {
75 dma_addr_t c;
76 dma_addr_t d;
77 dma_addr_t n;
78 } dec;
79 struct {
80 dma_addr_t c;
81 dma_addr_t p;
82 dma_addr_t q;
83 dma_addr_t dp;
84 dma_addr_t dq;
85 dma_addr_t qinv;
86 } dec_crt;
87 u64 in_tab[8];
88 };
89} __packed __aligned(64);
90
91struct qat_rsa_output_params {
92 union {
93 struct {
94 dma_addr_t c;
95 } enc;
96 struct {
97 dma_addr_t m;
98 } dec;
99 u64 out_tab[8];
100 };
101} __packed __aligned(64);
102
103struct qat_rsa_ctx {
104 char *n;
105 char *e;
106 char *d;
107 char *p;
108 char *q;
109 char *dp;
110 char *dq;
111 char *qinv;
112 dma_addr_t dma_n;
113 dma_addr_t dma_e;
114 dma_addr_t dma_d;
115 dma_addr_t dma_p;
116 dma_addr_t dma_q;
117 dma_addr_t dma_dp;
118 dma_addr_t dma_dq;
119 dma_addr_t dma_qinv;
120 unsigned int key_sz;
121 bool crt_mode;
122 struct qat_crypto_instance *inst;
123} __packed __aligned(64);
124
125struct qat_dh_input_params {
126 union {
127 struct {
128 dma_addr_t b;
129 dma_addr_t xa;
130 dma_addr_t p;
131 } in;
132 struct {
133 dma_addr_t xa;
134 dma_addr_t p;
135 } in_g2;
136 u64 in_tab[8];
137 };
138} __packed __aligned(64);
139
140struct qat_dh_output_params {
141 union {
142 dma_addr_t r;
143 u64 out_tab[8];
144 };
145} __packed __aligned(64);
146
147struct qat_dh_ctx {
148 char *g;
149 char *xa;
150 char *p;
151 dma_addr_t dma_g;
152 dma_addr_t dma_xa;
153 dma_addr_t dma_p;
154 unsigned int p_size;
155 bool g2;
156 struct qat_crypto_instance *inst;
157} __packed __aligned(64);
158
159struct qat_asym_request {
160 union {
161 struct qat_rsa_input_params rsa;
162 struct qat_dh_input_params dh;
163 } in;
164 union {
165 struct qat_rsa_output_params rsa;
166 struct qat_dh_output_params dh;
167 } out;
168 dma_addr_t phy_in;
169 dma_addr_t phy_out;
170 char *src_align;
171 char *dst_align;
172 struct icp_qat_fw_pke_request req;
173 union {
174 struct qat_rsa_ctx *rsa;
175 struct qat_dh_ctx *dh;
176 } ctx;
177 union {
178 struct akcipher_request *rsa;
179 struct kpp_request *dh;
180 } areq;
181 int err;
182 void (*cb)(struct icp_qat_fw_pke_resp *resp);
183} __aligned(64);
184
185static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
186{
187 struct qat_asym_request *req = (void *)(__force long)resp->opaque;
188 struct kpp_request *areq = req->areq.dh;
189 struct device *dev = &GET_DEV(req->ctx.dh->inst->accel_dev);
190 int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
191 resp->pke_resp_hdr.comn_resp_flags);
192
193 err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
194
195 if (areq->src) {
196 if (req->src_align)
197 dma_free_coherent(dev, req->ctx.dh->p_size,
198 req->src_align, req->in.dh.in.b);
199 else
200 dma_unmap_single(dev, req->in.dh.in.b,
201 req->ctx.dh->p_size, DMA_TO_DEVICE);
202 }
203
204 areq->dst_len = req->ctx.dh->p_size;
205 if (req->dst_align) {
206 scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
207 areq->dst_len, 1);
208
209 dma_free_coherent(dev, req->ctx.dh->p_size, req->dst_align,
210 req->out.dh.r);
211 } else {
212 dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
213 DMA_FROM_DEVICE);
214 }
215
216 dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
217 DMA_TO_DEVICE);
218 dma_unmap_single(dev, req->phy_out,
219 sizeof(struct qat_dh_output_params),
220 DMA_TO_DEVICE);
221
222 kpp_request_complete(areq, err);
223}
224
225#define PKE_DH_1536 0x390c1a49
226#define PKE_DH_G2_1536 0x2e0b1a3e
227#define PKE_DH_2048 0x4d0c1a60
228#define PKE_DH_G2_2048 0x3e0b1a55
229#define PKE_DH_3072 0x510c1a77
230#define PKE_DH_G2_3072 0x3a0b1a6c
231#define PKE_DH_4096 0x690c1a8e
232#define PKE_DH_G2_4096 0x4a0b1a83
233
234static unsigned long qat_dh_fn_id(unsigned int len, bool g2)
235{
236 unsigned int bitslen = len << 3;
237
238 switch (bitslen) {
239 case 1536:
240 return g2 ? PKE_DH_G2_1536 : PKE_DH_1536;
241 case 2048:
242 return g2 ? PKE_DH_G2_2048 : PKE_DH_2048;
243 case 3072:
244 return g2 ? PKE_DH_G2_3072 : PKE_DH_3072;
245 case 4096:
246 return g2 ? PKE_DH_G2_4096 : PKE_DH_4096;
247 default:
248 return 0;
249 };
250}
251
252static inline struct qat_dh_ctx *qat_dh_get_params(struct crypto_kpp *tfm)
253{
254 return kpp_tfm_ctx(tfm);
255}
256
257static int qat_dh_compute_value(struct kpp_request *req)
258{
259 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
260 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
261 struct qat_crypto_instance *inst = ctx->inst;
262 struct device *dev = &GET_DEV(inst->accel_dev);
263 struct qat_asym_request *qat_req =
264 PTR_ALIGN(kpp_request_ctx(req), 64);
265 struct icp_qat_fw_pke_request *msg = &qat_req->req;
266 int ret, ctr = 0;
267 int n_input_params = 0;
268
269 if (unlikely(!ctx->xa))
270 return -EINVAL;
271
272 if (req->dst_len < ctx->p_size) {
273 req->dst_len = ctx->p_size;
274 return -EOVERFLOW;
275 }
276 memset(msg, '\0', sizeof(*msg));
277 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
278 ICP_QAT_FW_COMN_REQ_FLAG_SET);
279
280 msg->pke_hdr.cd_pars.func_id = qat_dh_fn_id(ctx->p_size,
281 !req->src && ctx->g2);
282 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
283 return -EINVAL;
284
285 qat_req->cb = qat_dh_cb;
286 qat_req->ctx.dh = ctx;
287 qat_req->areq.dh = req;
288 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
289 msg->pke_hdr.comn_req_flags =
290 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
291 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
292
293
294
295
296 if (req->src) {
297 qat_req->in.dh.in.xa = ctx->dma_xa;
298 qat_req->in.dh.in.p = ctx->dma_p;
299 n_input_params = 3;
300 } else {
301 if (ctx->g2) {
302 qat_req->in.dh.in_g2.xa = ctx->dma_xa;
303 qat_req->in.dh.in_g2.p = ctx->dma_p;
304 n_input_params = 2;
305 } else {
306 qat_req->in.dh.in.b = ctx->dma_g;
307 qat_req->in.dh.in.xa = ctx->dma_xa;
308 qat_req->in.dh.in.p = ctx->dma_p;
309 n_input_params = 3;
310 }
311 }
312
313 ret = -ENOMEM;
314 if (req->src) {
315
316
317
318
319
320
321
322 if (sg_is_last(req->src) && req->src_len == ctx->p_size) {
323 qat_req->src_align = NULL;
324 qat_req->in.dh.in.b = dma_map_single(dev,
325 sg_virt(req->src),
326 req->src_len,
327 DMA_TO_DEVICE);
328 if (unlikely(dma_mapping_error(dev,
329 qat_req->in.dh.in.b)))
330 return ret;
331
332 } else {
333 int shift = ctx->p_size - req->src_len;
334
335 qat_req->src_align = dma_zalloc_coherent(dev,
336 ctx->p_size,
337 &qat_req->in.dh.in.b,
338 GFP_KERNEL);
339 if (unlikely(!qat_req->src_align))
340 return ret;
341
342 scatterwalk_map_and_copy(qat_req->src_align + shift,
343 req->src, 0, req->src_len, 0);
344 }
345 }
346
347
348
349
350
351
352
353 if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) {
354 qat_req->dst_align = NULL;
355 qat_req->out.dh.r = dma_map_single(dev, sg_virt(req->dst),
356 req->dst_len,
357 DMA_FROM_DEVICE);
358
359 if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
360 goto unmap_src;
361
362 } else {
363 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->p_size,
364 &qat_req->out.dh.r,
365 GFP_KERNEL);
366 if (unlikely(!qat_req->dst_align))
367 goto unmap_src;
368 }
369
370 qat_req->in.dh.in_tab[n_input_params] = 0;
371 qat_req->out.dh.out_tab[1] = 0;
372
373 qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh.in.b,
374 sizeof(struct qat_dh_input_params),
375 DMA_TO_DEVICE);
376 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
377 goto unmap_dst;
378
379 qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh.r,
380 sizeof(struct qat_dh_output_params),
381 DMA_TO_DEVICE);
382 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
383 goto unmap_in_params;
384
385 msg->pke_mid.src_data_addr = qat_req->phy_in;
386 msg->pke_mid.dest_data_addr = qat_req->phy_out;
387 msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
388 msg->input_param_count = n_input_params;
389 msg->output_param_count = 1;
390
391 do {
392 ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
393 } while (ret == -EBUSY && ctr++ < 100);
394
395 if (!ret)
396 return -EINPROGRESS;
397
398 if (!dma_mapping_error(dev, qat_req->phy_out))
399 dma_unmap_single(dev, qat_req->phy_out,
400 sizeof(struct qat_dh_output_params),
401 DMA_TO_DEVICE);
402unmap_in_params:
403 if (!dma_mapping_error(dev, qat_req->phy_in))
404 dma_unmap_single(dev, qat_req->phy_in,
405 sizeof(struct qat_dh_input_params),
406 DMA_TO_DEVICE);
407unmap_dst:
408 if (qat_req->dst_align)
409 dma_free_coherent(dev, ctx->p_size, qat_req->dst_align,
410 qat_req->out.dh.r);
411 else
412 if (!dma_mapping_error(dev, qat_req->out.dh.r))
413 dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
414 DMA_FROM_DEVICE);
415unmap_src:
416 if (req->src) {
417 if (qat_req->src_align)
418 dma_free_coherent(dev, ctx->p_size, qat_req->src_align,
419 qat_req->in.dh.in.b);
420 else
421 if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
422 dma_unmap_single(dev, qat_req->in.dh.in.b,
423 ctx->p_size,
424 DMA_TO_DEVICE);
425 }
426 return ret;
427}
428
429static int qat_dh_check_params_length(unsigned int p_len)
430{
431 switch (p_len) {
432 case 1536:
433 case 2048:
434 case 3072:
435 case 4096:
436 return 0;
437 }
438 return -EINVAL;
439}
440
441static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
442{
443 struct qat_crypto_instance *inst = ctx->inst;
444 struct device *dev = &GET_DEV(inst->accel_dev);
445
446 if (unlikely(!params->p || !params->g))
447 return -EINVAL;
448
449 if (qat_dh_check_params_length(params->p_size << 3))
450 return -EINVAL;
451
452 ctx->p_size = params->p_size;
453 ctx->p = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
454 if (!ctx->p)
455 return -ENOMEM;
456 memcpy(ctx->p, params->p, ctx->p_size);
457
458
459 if (params->g_size == 1 && *(char *)params->g == 0x02) {
460 ctx->g2 = true;
461 return 0;
462 }
463
464 ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
465 if (!ctx->g) {
466 dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
467 ctx->p = NULL;
468 return -ENOMEM;
469 }
470 memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
471 params->g_size);
472
473 return 0;
474}
475
476static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx)
477{
478 if (ctx->g) {
479 dma_free_coherent(dev, ctx->p_size, ctx->g, ctx->dma_g);
480 ctx->g = NULL;
481 }
482 if (ctx->xa) {
483 dma_free_coherent(dev, ctx->p_size, ctx->xa, ctx->dma_xa);
484 ctx->xa = NULL;
485 }
486 if (ctx->p) {
487 dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
488 ctx->p = NULL;
489 }
490 ctx->p_size = 0;
491 ctx->g2 = false;
492}
493
494static int qat_dh_set_secret(struct crypto_kpp *tfm, void *buf,
495 unsigned int len)
496{
497 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
498 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
499 struct dh params;
500 int ret;
501
502 if (crypto_dh_decode_key(buf, len, ¶ms) < 0)
503 return -EINVAL;
504
505
506 qat_dh_clear_ctx(dev, ctx);
507
508 ret = qat_dh_set_params(ctx, ¶ms);
509 if (ret < 0)
510 return ret;
511
512 ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
513 GFP_KERNEL);
514 if (!ctx->xa) {
515 qat_dh_clear_ctx(dev, ctx);
516 return -ENOMEM;
517 }
518 memcpy(ctx->xa + (ctx->p_size - params.key_size), params.key,
519 params.key_size);
520
521 return 0;
522}
523
524static int qat_dh_max_size(struct crypto_kpp *tfm)
525{
526 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
527
528 return ctx->p ? ctx->p_size : -EINVAL;
529}
530
531static int qat_dh_init_tfm(struct crypto_kpp *tfm)
532{
533 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
534 struct qat_crypto_instance *inst =
535 qat_crypto_get_instance_node(get_current_node());
536
537 if (!inst)
538 return -EINVAL;
539
540 ctx->p_size = 0;
541 ctx->g2 = false;
542 ctx->inst = inst;
543 return 0;
544}
545
546static void qat_dh_exit_tfm(struct crypto_kpp *tfm)
547{
548 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
549 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
550
551 qat_dh_clear_ctx(dev, ctx);
552 qat_crypto_put_instance(ctx->inst);
553}
554
555static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
556{
557 struct qat_asym_request *req = (void *)(__force long)resp->opaque;
558 struct akcipher_request *areq = req->areq.rsa;
559 struct device *dev = &GET_DEV(req->ctx.rsa->inst->accel_dev);
560 int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
561 resp->pke_resp_hdr.comn_resp_flags);
562
563 err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
564
565 if (req->src_align)
566 dma_free_coherent(dev, req->ctx.rsa->key_sz, req->src_align,
567 req->in.rsa.enc.m);
568 else
569 dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
570 DMA_TO_DEVICE);
571
572 areq->dst_len = req->ctx.rsa->key_sz;
573 if (req->dst_align) {
574 scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
575 areq->dst_len, 1);
576
577 dma_free_coherent(dev, req->ctx.rsa->key_sz, req->dst_align,
578 req->out.rsa.enc.c);
579 } else {
580 dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
581 DMA_FROM_DEVICE);
582 }
583
584 dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
585 DMA_TO_DEVICE);
586 dma_unmap_single(dev, req->phy_out,
587 sizeof(struct qat_rsa_output_params),
588 DMA_TO_DEVICE);
589
590 akcipher_request_complete(areq, err);
591}
592
593void qat_alg_asym_callback(void *_resp)
594{
595 struct icp_qat_fw_pke_resp *resp = _resp;
596 struct qat_asym_request *areq = (void *)(__force long)resp->opaque;
597
598 areq->cb(resp);
599}
600
601#define PKE_RSA_EP_512 0x1c161b21
602#define PKE_RSA_EP_1024 0x35111bf7
603#define PKE_RSA_EP_1536 0x4d111cdc
604#define PKE_RSA_EP_2048 0x6e111dba
605#define PKE_RSA_EP_3072 0x7d111ea3
606#define PKE_RSA_EP_4096 0xa5101f7e
607
608static unsigned long qat_rsa_enc_fn_id(unsigned int len)
609{
610 unsigned int bitslen = len << 3;
611
612 switch (bitslen) {
613 case 512:
614 return PKE_RSA_EP_512;
615 case 1024:
616 return PKE_RSA_EP_1024;
617 case 1536:
618 return PKE_RSA_EP_1536;
619 case 2048:
620 return PKE_RSA_EP_2048;
621 case 3072:
622 return PKE_RSA_EP_3072;
623 case 4096:
624 return PKE_RSA_EP_4096;
625 default:
626 return 0;
627 };
628}
629
630#define PKE_RSA_DP1_512 0x1c161b3c
631#define PKE_RSA_DP1_1024 0x35111c12
632#define PKE_RSA_DP1_1536 0x4d111cf7
633#define PKE_RSA_DP1_2048 0x6e111dda
634#define PKE_RSA_DP1_3072 0x7d111ebe
635#define PKE_RSA_DP1_4096 0xa5101f98
636
637static unsigned long qat_rsa_dec_fn_id(unsigned int len)
638{
639 unsigned int bitslen = len << 3;
640
641 switch (bitslen) {
642 case 512:
643 return PKE_RSA_DP1_512;
644 case 1024:
645 return PKE_RSA_DP1_1024;
646 case 1536:
647 return PKE_RSA_DP1_1536;
648 case 2048:
649 return PKE_RSA_DP1_2048;
650 case 3072:
651 return PKE_RSA_DP1_3072;
652 case 4096:
653 return PKE_RSA_DP1_4096;
654 default:
655 return 0;
656 };
657}
658
659#define PKE_RSA_DP2_512 0x1c131b57
660#define PKE_RSA_DP2_1024 0x26131c2d
661#define PKE_RSA_DP2_1536 0x45111d12
662#define PKE_RSA_DP2_2048 0x59121dfa
663#define PKE_RSA_DP2_3072 0x81121ed9
664#define PKE_RSA_DP2_4096 0xb1111fb2
665
666static unsigned long qat_rsa_dec_fn_id_crt(unsigned int len)
667{
668 unsigned int bitslen = len << 3;
669
670 switch (bitslen) {
671 case 512:
672 return PKE_RSA_DP2_512;
673 case 1024:
674 return PKE_RSA_DP2_1024;
675 case 1536:
676 return PKE_RSA_DP2_1536;
677 case 2048:
678 return PKE_RSA_DP2_2048;
679 case 3072:
680 return PKE_RSA_DP2_3072;
681 case 4096:
682 return PKE_RSA_DP2_4096;
683 default:
684 return 0;
685 };
686}
687
688static int qat_rsa_enc(struct akcipher_request *req)
689{
690 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
691 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
692 struct qat_crypto_instance *inst = ctx->inst;
693 struct device *dev = &GET_DEV(inst->accel_dev);
694 struct qat_asym_request *qat_req =
695 PTR_ALIGN(akcipher_request_ctx(req), 64);
696 struct icp_qat_fw_pke_request *msg = &qat_req->req;
697 int ret, ctr = 0;
698
699 if (unlikely(!ctx->n || !ctx->e))
700 return -EINVAL;
701
702 if (req->dst_len < ctx->key_sz) {
703 req->dst_len = ctx->key_sz;
704 return -EOVERFLOW;
705 }
706 memset(msg, '\0', sizeof(*msg));
707 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
708 ICP_QAT_FW_COMN_REQ_FLAG_SET);
709 msg->pke_hdr.cd_pars.func_id = qat_rsa_enc_fn_id(ctx->key_sz);
710 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
711 return -EINVAL;
712
713 qat_req->cb = qat_rsa_cb;
714 qat_req->ctx.rsa = ctx;
715 qat_req->areq.rsa = req;
716 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
717 msg->pke_hdr.comn_req_flags =
718 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
719 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
720
721 qat_req->in.rsa.enc.e = ctx->dma_e;
722 qat_req->in.rsa.enc.n = ctx->dma_n;
723 ret = -ENOMEM;
724
725
726
727
728
729
730
731
732 if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
733 qat_req->src_align = NULL;
734 qat_req->in.rsa.enc.m = dma_map_single(dev, sg_virt(req->src),
735 req->src_len, DMA_TO_DEVICE);
736 if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
737 return ret;
738
739 } else {
740 int shift = ctx->key_sz - req->src_len;
741
742 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
743 &qat_req->in.rsa.enc.m,
744 GFP_KERNEL);
745 if (unlikely(!qat_req->src_align))
746 return ret;
747
748 scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
749 0, req->src_len, 0);
750 }
751 if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
752 qat_req->dst_align = NULL;
753 qat_req->out.rsa.enc.c = dma_map_single(dev, sg_virt(req->dst),
754 req->dst_len,
755 DMA_FROM_DEVICE);
756
757 if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
758 goto unmap_src;
759
760 } else {
761 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
762 &qat_req->out.rsa.enc.c,
763 GFP_KERNEL);
764 if (unlikely(!qat_req->dst_align))
765 goto unmap_src;
766
767 }
768 qat_req->in.rsa.in_tab[3] = 0;
769 qat_req->out.rsa.out_tab[1] = 0;
770 qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m,
771 sizeof(struct qat_rsa_input_params),
772 DMA_TO_DEVICE);
773 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
774 goto unmap_dst;
775
776 qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.enc.c,
777 sizeof(struct qat_rsa_output_params),
778 DMA_TO_DEVICE);
779 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
780 goto unmap_in_params;
781
782 msg->pke_mid.src_data_addr = qat_req->phy_in;
783 msg->pke_mid.dest_data_addr = qat_req->phy_out;
784 msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
785 msg->input_param_count = 3;
786 msg->output_param_count = 1;
787 do {
788 ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
789 } while (ret == -EBUSY && ctr++ < 100);
790
791 if (!ret)
792 return -EINPROGRESS;
793
794 if (!dma_mapping_error(dev, qat_req->phy_out))
795 dma_unmap_single(dev, qat_req->phy_out,
796 sizeof(struct qat_rsa_output_params),
797 DMA_TO_DEVICE);
798unmap_in_params:
799 if (!dma_mapping_error(dev, qat_req->phy_in))
800 dma_unmap_single(dev, qat_req->phy_in,
801 sizeof(struct qat_rsa_input_params),
802 DMA_TO_DEVICE);
803unmap_dst:
804 if (qat_req->dst_align)
805 dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
806 qat_req->out.rsa.enc.c);
807 else
808 if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
809 dma_unmap_single(dev, qat_req->out.rsa.enc.c,
810 ctx->key_sz, DMA_FROM_DEVICE);
811unmap_src:
812 if (qat_req->src_align)
813 dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
814 qat_req->in.rsa.enc.m);
815 else
816 if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
817 dma_unmap_single(dev, qat_req->in.rsa.enc.m,
818 ctx->key_sz, DMA_TO_DEVICE);
819 return ret;
820}
821
822static int qat_rsa_dec(struct akcipher_request *req)
823{
824 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
825 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
826 struct qat_crypto_instance *inst = ctx->inst;
827 struct device *dev = &GET_DEV(inst->accel_dev);
828 struct qat_asym_request *qat_req =
829 PTR_ALIGN(akcipher_request_ctx(req), 64);
830 struct icp_qat_fw_pke_request *msg = &qat_req->req;
831 int ret, ctr = 0;
832
833 if (unlikely(!ctx->n || !ctx->d))
834 return -EINVAL;
835
836 if (req->dst_len < ctx->key_sz) {
837 req->dst_len = ctx->key_sz;
838 return -EOVERFLOW;
839 }
840 memset(msg, '\0', sizeof(*msg));
841 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
842 ICP_QAT_FW_COMN_REQ_FLAG_SET);
843 msg->pke_hdr.cd_pars.func_id = ctx->crt_mode ?
844 qat_rsa_dec_fn_id_crt(ctx->key_sz) :
845 qat_rsa_dec_fn_id(ctx->key_sz);
846 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
847 return -EINVAL;
848
849 qat_req->cb = qat_rsa_cb;
850 qat_req->ctx.rsa = ctx;
851 qat_req->areq.rsa = req;
852 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
853 msg->pke_hdr.comn_req_flags =
854 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
855 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
856
857 if (ctx->crt_mode) {
858 qat_req->in.rsa.dec_crt.p = ctx->dma_p;
859 qat_req->in.rsa.dec_crt.q = ctx->dma_q;
860 qat_req->in.rsa.dec_crt.dp = ctx->dma_dp;
861 qat_req->in.rsa.dec_crt.dq = ctx->dma_dq;
862 qat_req->in.rsa.dec_crt.qinv = ctx->dma_qinv;
863 } else {
864 qat_req->in.rsa.dec.d = ctx->dma_d;
865 qat_req->in.rsa.dec.n = ctx->dma_n;
866 }
867 ret = -ENOMEM;
868
869
870
871
872
873
874
875
876 if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
877 qat_req->src_align = NULL;
878 qat_req->in.rsa.dec.c = dma_map_single(dev, sg_virt(req->src),
879 req->dst_len, DMA_TO_DEVICE);
880 if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
881 return ret;
882
883 } else {
884 int shift = ctx->key_sz - req->src_len;
885
886 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
887 &qat_req->in.rsa.dec.c,
888 GFP_KERNEL);
889 if (unlikely(!qat_req->src_align))
890 return ret;
891
892 scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
893 0, req->src_len, 0);
894 }
895 if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
896 qat_req->dst_align = NULL;
897 qat_req->out.rsa.dec.m = dma_map_single(dev, sg_virt(req->dst),
898 req->dst_len,
899 DMA_FROM_DEVICE);
900
901 if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
902 goto unmap_src;
903
904 } else {
905 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
906 &qat_req->out.rsa.dec.m,
907 GFP_KERNEL);
908 if (unlikely(!qat_req->dst_align))
909 goto unmap_src;
910
911 }
912
913 if (ctx->crt_mode)
914 qat_req->in.rsa.in_tab[6] = 0;
915 else
916 qat_req->in.rsa.in_tab[3] = 0;
917 qat_req->out.rsa.out_tab[1] = 0;
918 qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.dec.c,
919 sizeof(struct qat_rsa_input_params),
920 DMA_TO_DEVICE);
921 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
922 goto unmap_dst;
923
924 qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.dec.m,
925 sizeof(struct qat_rsa_output_params),
926 DMA_TO_DEVICE);
927 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
928 goto unmap_in_params;
929
930 msg->pke_mid.src_data_addr = qat_req->phy_in;
931 msg->pke_mid.dest_data_addr = qat_req->phy_out;
932 msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
933 if (ctx->crt_mode)
934 msg->input_param_count = 6;
935 else
936 msg->input_param_count = 3;
937
938 msg->output_param_count = 1;
939 do {
940 ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
941 } while (ret == -EBUSY && ctr++ < 100);
942
943 if (!ret)
944 return -EINPROGRESS;
945
946 if (!dma_mapping_error(dev, qat_req->phy_out))
947 dma_unmap_single(dev, qat_req->phy_out,
948 sizeof(struct qat_rsa_output_params),
949 DMA_TO_DEVICE);
950unmap_in_params:
951 if (!dma_mapping_error(dev, qat_req->phy_in))
952 dma_unmap_single(dev, qat_req->phy_in,
953 sizeof(struct qat_rsa_input_params),
954 DMA_TO_DEVICE);
955unmap_dst:
956 if (qat_req->dst_align)
957 dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
958 qat_req->out.rsa.dec.m);
959 else
960 if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
961 dma_unmap_single(dev, qat_req->out.rsa.dec.m,
962 ctx->key_sz, DMA_FROM_DEVICE);
963unmap_src:
964 if (qat_req->src_align)
965 dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
966 qat_req->in.rsa.dec.c);
967 else
968 if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
969 dma_unmap_single(dev, qat_req->in.rsa.dec.c,
970 ctx->key_sz, DMA_TO_DEVICE);
971 return ret;
972}
973
974int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value, size_t vlen)
975{
976 struct qat_crypto_instance *inst = ctx->inst;
977 struct device *dev = &GET_DEV(inst->accel_dev);
978 const char *ptr = value;
979 int ret;
980
981 while (!*ptr && vlen) {
982 ptr++;
983 vlen--;
984 }
985
986 ctx->key_sz = vlen;
987 ret = -EINVAL;
988
989 if (!qat_rsa_enc_fn_id(ctx->key_sz))
990 goto err;
991
992 ret = -ENOMEM;
993 ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
994 if (!ctx->n)
995 goto err;
996
997 memcpy(ctx->n, ptr, ctx->key_sz);
998 return 0;
999err:
1000 ctx->key_sz = 0;
1001 ctx->n = NULL;
1002 return ret;
1003}
1004
1005int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value, size_t vlen)
1006{
1007 struct qat_crypto_instance *inst = ctx->inst;
1008 struct device *dev = &GET_DEV(inst->accel_dev);
1009 const char *ptr = value;
1010
1011 while (!*ptr && vlen) {
1012 ptr++;
1013 vlen--;
1014 }
1015
1016 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
1017 ctx->e = NULL;
1018 return -EINVAL;
1019 }
1020
1021 ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
1022 if (!ctx->e)
1023 return -ENOMEM;
1024
1025 memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen);
1026 return 0;
1027}
1028
1029int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value, size_t vlen)
1030{
1031 struct qat_crypto_instance *inst = ctx->inst;
1032 struct device *dev = &GET_DEV(inst->accel_dev);
1033 const char *ptr = value;
1034 int ret;
1035
1036 while (!*ptr && vlen) {
1037 ptr++;
1038 vlen--;
1039 }
1040
1041 ret = -EINVAL;
1042 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
1043 goto err;
1044
1045 ret = -ENOMEM;
1046 ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
1047 if (!ctx->d)
1048 goto err;
1049
1050 memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen);
1051 return 0;
1052err:
1053 ctx->d = NULL;
1054 return ret;
1055}
1056
1057static void qat_rsa_drop_leading_zeros(const char **ptr, unsigned int *len)
1058{
1059 while (!**ptr && *len) {
1060 (*ptr)++;
1061 (*len)--;
1062 }
1063}
1064
1065static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
1066{
1067 struct qat_crypto_instance *inst = ctx->inst;
1068 struct device *dev = &GET_DEV(inst->accel_dev);
1069 const char *ptr;
1070 unsigned int len;
1071 unsigned int half_key_sz = ctx->key_sz / 2;
1072
1073
1074 ptr = rsa_key->p;
1075 len = rsa_key->p_sz;
1076 qat_rsa_drop_leading_zeros(&ptr, &len);
1077 if (!len)
1078 goto err;
1079 ctx->p = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
1080 if (!ctx->p)
1081 goto err;
1082 memcpy(ctx->p + (half_key_sz - len), ptr, len);
1083
1084
1085 ptr = rsa_key->q;
1086 len = rsa_key->q_sz;
1087 qat_rsa_drop_leading_zeros(&ptr, &len);
1088 if (!len)
1089 goto free_p;
1090 ctx->q = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
1091 if (!ctx->q)
1092 goto free_p;
1093 memcpy(ctx->q + (half_key_sz - len), ptr, len);
1094
1095
1096 ptr = rsa_key->dp;
1097 len = rsa_key->dp_sz;
1098 qat_rsa_drop_leading_zeros(&ptr, &len);
1099 if (!len)
1100 goto free_q;
1101 ctx->dp = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dp,
1102 GFP_KERNEL);
1103 if (!ctx->dp)
1104 goto free_q;
1105 memcpy(ctx->dp + (half_key_sz - len), ptr, len);
1106
1107
1108 ptr = rsa_key->dq;
1109 len = rsa_key->dq_sz;
1110 qat_rsa_drop_leading_zeros(&ptr, &len);
1111 if (!len)
1112 goto free_dp;
1113 ctx->dq = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dq,
1114 GFP_KERNEL);
1115 if (!ctx->dq)
1116 goto free_dp;
1117 memcpy(ctx->dq + (half_key_sz - len), ptr, len);
1118
1119
1120 ptr = rsa_key->qinv;
1121 len = rsa_key->qinv_sz;
1122 qat_rsa_drop_leading_zeros(&ptr, &len);
1123 if (!len)
1124 goto free_dq;
1125 ctx->qinv = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_qinv,
1126 GFP_KERNEL);
1127 if (!ctx->qinv)
1128 goto free_dq;
1129 memcpy(ctx->qinv + (half_key_sz - len), ptr, len);
1130
1131 ctx->crt_mode = true;
1132 return;
1133
1134free_dq:
1135 memset(ctx->dq, '\0', half_key_sz);
1136 dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
1137 ctx->dq = NULL;
1138free_dp:
1139 memset(ctx->dp, '\0', half_key_sz);
1140 dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
1141 ctx->dp = NULL;
1142free_q:
1143 memset(ctx->q, '\0', half_key_sz);
1144 dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
1145 ctx->q = NULL;
1146free_p:
1147 memset(ctx->p, '\0', half_key_sz);
1148 dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
1149 ctx->p = NULL;
1150err:
1151 ctx->crt_mode = false;
1152}
1153
1154static void qat_rsa_clear_ctx(struct device *dev, struct qat_rsa_ctx *ctx)
1155{
1156 unsigned int half_key_sz = ctx->key_sz / 2;
1157
1158
1159 if (ctx->n)
1160 dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
1161 if (ctx->e)
1162 dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
1163 if (ctx->d) {
1164 memset(ctx->d, '\0', ctx->key_sz);
1165 dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
1166 }
1167 if (ctx->p) {
1168 memset(ctx->p, '\0', half_key_sz);
1169 dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
1170 }
1171 if (ctx->q) {
1172 memset(ctx->q, '\0', half_key_sz);
1173 dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
1174 }
1175 if (ctx->dp) {
1176 memset(ctx->dp, '\0', half_key_sz);
1177 dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
1178 }
1179 if (ctx->dq) {
1180 memset(ctx->dq, '\0', half_key_sz);
1181 dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
1182 }
1183 if (ctx->qinv) {
1184 memset(ctx->qinv, '\0', half_key_sz);
1185 dma_free_coherent(dev, half_key_sz, ctx->qinv, ctx->dma_qinv);
1186 }
1187
1188 ctx->n = NULL;
1189 ctx->e = NULL;
1190 ctx->d = NULL;
1191 ctx->p = NULL;
1192 ctx->q = NULL;
1193 ctx->dp = NULL;
1194 ctx->dq = NULL;
1195 ctx->qinv = NULL;
1196 ctx->crt_mode = false;
1197 ctx->key_sz = 0;
1198}
1199
1200static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
1201 unsigned int keylen, bool private)
1202{
1203 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1204 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1205 struct rsa_key rsa_key;
1206 int ret;
1207
1208 qat_rsa_clear_ctx(dev, ctx);
1209
1210 if (private)
1211 ret = rsa_parse_priv_key(&rsa_key, key, keylen);
1212 else
1213 ret = rsa_parse_pub_key(&rsa_key, key, keylen);
1214 if (ret < 0)
1215 goto free;
1216
1217 ret = qat_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz);
1218 if (ret < 0)
1219 goto free;
1220 ret = qat_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
1221 if (ret < 0)
1222 goto free;
1223 if (private) {
1224 ret = qat_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
1225 if (ret < 0)
1226 goto free;
1227 qat_rsa_setkey_crt(ctx, &rsa_key);
1228 }
1229
1230 if (!ctx->n || !ctx->e) {
1231
1232 ret = -EINVAL;
1233 goto free;
1234 }
1235 if (private && !ctx->d) {
1236
1237 ret = -EINVAL;
1238 goto free;
1239 }
1240
1241 return 0;
1242free:
1243 qat_rsa_clear_ctx(dev, ctx);
1244 return ret;
1245}
1246
1247static int qat_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
1248 unsigned int keylen)
1249{
1250 return qat_rsa_setkey(tfm, key, keylen, false);
1251}
1252
1253static int qat_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
1254 unsigned int keylen)
1255{
1256 return qat_rsa_setkey(tfm, key, keylen, true);
1257}
1258
1259static int qat_rsa_max_size(struct crypto_akcipher *tfm)
1260{
1261 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1262
1263 return (ctx->n) ? ctx->key_sz : -EINVAL;
1264}
1265
1266static int qat_rsa_init_tfm(struct crypto_akcipher *tfm)
1267{
1268 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1269 struct qat_crypto_instance *inst =
1270 qat_crypto_get_instance_node(get_current_node());
1271
1272 if (!inst)
1273 return -EINVAL;
1274
1275 ctx->key_sz = 0;
1276 ctx->inst = inst;
1277 return 0;
1278}
1279
1280static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
1281{
1282 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1283 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1284
1285 if (ctx->n)
1286 dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
1287 if (ctx->e)
1288 dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
1289 if (ctx->d) {
1290 memset(ctx->d, '\0', ctx->key_sz);
1291 dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
1292 }
1293 qat_crypto_put_instance(ctx->inst);
1294 ctx->n = NULL;
1295 ctx->e = NULL;
1296 ctx->d = NULL;
1297}
1298
1299static struct akcipher_alg rsa = {
1300 .encrypt = qat_rsa_enc,
1301 .decrypt = qat_rsa_dec,
1302 .sign = qat_rsa_dec,
1303 .verify = qat_rsa_enc,
1304 .set_pub_key = qat_rsa_setpubkey,
1305 .set_priv_key = qat_rsa_setprivkey,
1306 .max_size = qat_rsa_max_size,
1307 .init = qat_rsa_init_tfm,
1308 .exit = qat_rsa_exit_tfm,
1309 .reqsize = sizeof(struct qat_asym_request) + 64,
1310 .base = {
1311 .cra_name = "rsa",
1312 .cra_driver_name = "qat-rsa",
1313 .cra_priority = 1000,
1314 .cra_module = THIS_MODULE,
1315 .cra_ctxsize = sizeof(struct qat_rsa_ctx),
1316 },
1317};
1318
1319static struct kpp_alg dh = {
1320 .set_secret = qat_dh_set_secret,
1321 .generate_public_key = qat_dh_compute_value,
1322 .compute_shared_secret = qat_dh_compute_value,
1323 .max_size = qat_dh_max_size,
1324 .init = qat_dh_init_tfm,
1325 .exit = qat_dh_exit_tfm,
1326 .reqsize = sizeof(struct qat_asym_request) + 64,
1327 .base = {
1328 .cra_name = "dh",
1329 .cra_driver_name = "qat-dh",
1330 .cra_priority = 1000,
1331 .cra_module = THIS_MODULE,
1332 .cra_ctxsize = sizeof(struct qat_dh_ctx),
1333 },
1334};
1335
1336int qat_asym_algs_register(void)
1337{
1338 int ret = 0;
1339
1340 mutex_lock(&algs_lock);
1341 if (++active_devs == 1) {
1342 rsa.base.cra_flags = 0;
1343 ret = crypto_register_akcipher(&rsa);
1344 if (ret)
1345 goto unlock;
1346 ret = crypto_register_kpp(&dh);
1347 }
1348unlock:
1349 mutex_unlock(&algs_lock);
1350 return ret;
1351}
1352
1353void qat_asym_algs_unregister(void)
1354{
1355 mutex_lock(&algs_lock);
1356 if (--active_devs == 0) {
1357 crypto_unregister_akcipher(&rsa);
1358 crypto_unregister_kpp(&dh);
1359 }
1360 mutex_unlock(&algs_lock);
1361}
1362