1
2
3
4
5#include <sys/queue.h>
6#include <ctype.h>
7#include <stdio.h>
8#include <stdlib.h>
9#include <string.h>
10#include <errno.h>
11#include <stdint.h>
12#include <inttypes.h>
13
14#include <rte_log.h>
15#include <rte_debug.h>
16#include <rte_dev.h>
17#include <rte_memory.h>
18#include <rte_memcpy.h>
19#include <rte_memzone.h>
20#include <rte_eal.h>
21#include <rte_common.h>
22#include <rte_mempool.h>
23#include <rte_malloc.h>
24#include <rte_errno.h>
25#include <rte_spinlock.h>
26#include <rte_string_fns.h>
27#include <rte_telemetry.h>
28
29#include "rte_crypto.h"
30#include "rte_cryptodev.h"
31#include "cryptodev_pmd.h"
32#include "rte_cryptodev_trace.h"
33
34static uint8_t nb_drivers;
35
36static struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
37
38struct rte_cryptodev *rte_cryptodevs = rte_crypto_devices;
39
40static struct rte_cryptodev_global cryptodev_globals = {
41 .devs = rte_crypto_devices,
42 .data = { NULL },
43 .nb_devs = 0
44};
45
46
47struct rte_crypto_fp_ops rte_crypto_fp_ops[RTE_CRYPTO_MAX_DEVS];
48
49
50static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
51
52
53
54
55
56
57
58struct rte_cryptodev_callback {
59 TAILQ_ENTRY(rte_cryptodev_callback) next;
60 rte_cryptodev_cb_fn cb_fn;
61 void *cb_arg;
62 enum rte_cryptodev_event_type event;
63 uint32_t active;
64};
65
66
67
68
69
70const char *
71rte_crypto_cipher_algorithm_strings[] = {
72 [RTE_CRYPTO_CIPHER_3DES_CBC] = "3des-cbc",
73 [RTE_CRYPTO_CIPHER_3DES_ECB] = "3des-ecb",
74 [RTE_CRYPTO_CIPHER_3DES_CTR] = "3des-ctr",
75
76 [RTE_CRYPTO_CIPHER_AES_CBC] = "aes-cbc",
77 [RTE_CRYPTO_CIPHER_AES_CTR] = "aes-ctr",
78 [RTE_CRYPTO_CIPHER_AES_DOCSISBPI] = "aes-docsisbpi",
79 [RTE_CRYPTO_CIPHER_AES_ECB] = "aes-ecb",
80 [RTE_CRYPTO_CIPHER_AES_F8] = "aes-f8",
81 [RTE_CRYPTO_CIPHER_AES_XTS] = "aes-xts",
82
83 [RTE_CRYPTO_CIPHER_ARC4] = "arc4",
84
85 [RTE_CRYPTO_CIPHER_DES_CBC] = "des-cbc",
86 [RTE_CRYPTO_CIPHER_DES_DOCSISBPI] = "des-docsisbpi",
87
88 [RTE_CRYPTO_CIPHER_NULL] = "null",
89
90 [RTE_CRYPTO_CIPHER_KASUMI_F8] = "kasumi-f8",
91 [RTE_CRYPTO_CIPHER_SNOW3G_UEA2] = "snow3g-uea2",
92 [RTE_CRYPTO_CIPHER_ZUC_EEA3] = "zuc-eea3"
93};
94
95
96
97
98
99const char *
100rte_crypto_cipher_operation_strings[] = {
101 [RTE_CRYPTO_CIPHER_OP_ENCRYPT] = "encrypt",
102 [RTE_CRYPTO_CIPHER_OP_DECRYPT] = "decrypt"
103};
104
105
106
107
108
109const char *
110rte_crypto_auth_algorithm_strings[] = {
111 [RTE_CRYPTO_AUTH_AES_CBC_MAC] = "aes-cbc-mac",
112 [RTE_CRYPTO_AUTH_AES_CMAC] = "aes-cmac",
113 [RTE_CRYPTO_AUTH_AES_GMAC] = "aes-gmac",
114 [RTE_CRYPTO_AUTH_AES_XCBC_MAC] = "aes-xcbc-mac",
115
116 [RTE_CRYPTO_AUTH_MD5] = "md5",
117 [RTE_CRYPTO_AUTH_MD5_HMAC] = "md5-hmac",
118
119 [RTE_CRYPTO_AUTH_NULL] = "null",
120
121 [RTE_CRYPTO_AUTH_SHA1] = "sha1",
122 [RTE_CRYPTO_AUTH_SHA1_HMAC] = "sha1-hmac",
123
124 [RTE_CRYPTO_AUTH_SHA224] = "sha2-224",
125 [RTE_CRYPTO_AUTH_SHA224_HMAC] = "sha2-224-hmac",
126 [RTE_CRYPTO_AUTH_SHA256] = "sha2-256",
127 [RTE_CRYPTO_AUTH_SHA256_HMAC] = "sha2-256-hmac",
128 [RTE_CRYPTO_AUTH_SHA384] = "sha2-384",
129 [RTE_CRYPTO_AUTH_SHA384_HMAC] = "sha2-384-hmac",
130 [RTE_CRYPTO_AUTH_SHA512] = "sha2-512",
131 [RTE_CRYPTO_AUTH_SHA512_HMAC] = "sha2-512-hmac",
132
133 [RTE_CRYPTO_AUTH_KASUMI_F9] = "kasumi-f9",
134 [RTE_CRYPTO_AUTH_SNOW3G_UIA2] = "snow3g-uia2",
135 [RTE_CRYPTO_AUTH_ZUC_EIA3] = "zuc-eia3"
136};
137
138
139
140
141
142const char *
143rte_crypto_aead_algorithm_strings[] = {
144 [RTE_CRYPTO_AEAD_AES_CCM] = "aes-ccm",
145 [RTE_CRYPTO_AEAD_AES_GCM] = "aes-gcm",
146 [RTE_CRYPTO_AEAD_CHACHA20_POLY1305] = "chacha20-poly1305"
147};
148
149
150
151
152
153const char *
154rte_crypto_aead_operation_strings[] = {
155 [RTE_CRYPTO_AEAD_OP_ENCRYPT] = "encrypt",
156 [RTE_CRYPTO_AEAD_OP_DECRYPT] = "decrypt"
157};
158
159
160
161
162const char *rte_crypto_asym_xform_strings[] = {
163 [RTE_CRYPTO_ASYM_XFORM_NONE] = "none",
164 [RTE_CRYPTO_ASYM_XFORM_RSA] = "rsa",
165 [RTE_CRYPTO_ASYM_XFORM_MODEX] = "modexp",
166 [RTE_CRYPTO_ASYM_XFORM_MODINV] = "modinv",
167 [RTE_CRYPTO_ASYM_XFORM_DH] = "dh",
168 [RTE_CRYPTO_ASYM_XFORM_DSA] = "dsa",
169 [RTE_CRYPTO_ASYM_XFORM_ECDSA] = "ecdsa",
170 [RTE_CRYPTO_ASYM_XFORM_ECPM] = "ecpm",
171};
172
173
174
175
176const char *rte_crypto_asym_op_strings[] = {
177 [RTE_CRYPTO_ASYM_OP_ENCRYPT] = "encrypt",
178 [RTE_CRYPTO_ASYM_OP_DECRYPT] = "decrypt",
179 [RTE_CRYPTO_ASYM_OP_SIGN] = "sign",
180 [RTE_CRYPTO_ASYM_OP_VERIFY] = "verify"
181};
182
183
184
185
186const char *rte_crypto_asym_ke_strings[] = {
187 [RTE_CRYPTO_ASYM_KE_PRIV_KEY_GENERATE] = "priv_key_generate",
188 [RTE_CRYPTO_ASYM_KE_PUB_KEY_GENERATE] = "pub_key_generate",
189 [RTE_CRYPTO_ASYM_KE_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
190 [RTE_CRYPTO_ASYM_KE_PUB_KEY_VERIFY] = "pub_ec_key_verify"
191};
192
193
194
195
196struct rte_cryptodev_sym_session_pool_private_data {
197 uint16_t nb_drivers;
198
199 uint16_t user_data_sz;
200
201};
202
203
204
205
206struct rte_cryptodev_asym_session_pool_private_data {
207 uint16_t max_priv_session_sz;
208
209 uint16_t user_data_sz;
210
211};
212
213int
214rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
215 const char *algo_string)
216{
217 unsigned int i;
218
219 for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
220 if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
221 *algo_enum = (enum rte_crypto_cipher_algorithm) i;
222 return 0;
223 }
224 }
225
226
227 return -1;
228}
229
230int
231rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
232 const char *algo_string)
233{
234 unsigned int i;
235
236 for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
237 if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
238 *algo_enum = (enum rte_crypto_auth_algorithm) i;
239 return 0;
240 }
241 }
242
243
244 return -1;
245}
246
247int
248rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
249 const char *algo_string)
250{
251 unsigned int i;
252
253 for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
254 if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
255 *algo_enum = (enum rte_crypto_aead_algorithm) i;
256 return 0;
257 }
258 }
259
260
261 return -1;
262}
263
264int
265rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
266 const char *xform_string)
267{
268 unsigned int i;
269
270 for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) {
271 if (strcmp(xform_string,
272 rte_crypto_asym_xform_strings[i]) == 0) {
273 *xform_enum = (enum rte_crypto_asym_xform_type) i;
274 return 0;
275 }
276 }
277
278
279 return -1;
280}
281
282
283
284
285
286const char *
287rte_crypto_auth_operation_strings[] = {
288 [RTE_CRYPTO_AUTH_OP_VERIFY] = "verify",
289 [RTE_CRYPTO_AUTH_OP_GENERATE] = "generate"
290};
291
292const struct rte_cryptodev_symmetric_capability *
293rte_cryptodev_sym_capability_get(uint8_t dev_id,
294 const struct rte_cryptodev_sym_capability_idx *idx)
295{
296 const struct rte_cryptodev_capabilities *capability;
297 struct rte_cryptodev_info dev_info;
298 int i = 0;
299
300 rte_cryptodev_info_get(dev_id, &dev_info);
301
302 while ((capability = &dev_info.capabilities[i++])->op !=
303 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
304 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
305 continue;
306
307 if (capability->sym.xform_type != idx->type)
308 continue;
309
310 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
311 capability->sym.auth.algo == idx->algo.auth)
312 return &capability->sym;
313
314 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
315 capability->sym.cipher.algo == idx->algo.cipher)
316 return &capability->sym;
317
318 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
319 capability->sym.aead.algo == idx->algo.aead)
320 return &capability->sym;
321 }
322
323 return NULL;
324}
325
326static int
327param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
328{
329 unsigned int next_size;
330
331
332 if (size < range->min)
333 return -1;
334
335 if (size > range->max)
336 return -1;
337
338
339 if (range->increment == 0)
340 return 0;
341
342
343 for (next_size = range->min; next_size <= range->max;
344 next_size += range->increment)
345 if (size == next_size)
346 return 0;
347
348 return -1;
349}
350
351const struct rte_cryptodev_asymmetric_xform_capability *
352rte_cryptodev_asym_capability_get(uint8_t dev_id,
353 const struct rte_cryptodev_asym_capability_idx *idx)
354{
355 const struct rte_cryptodev_capabilities *capability;
356 struct rte_cryptodev_info dev_info;
357 unsigned int i = 0;
358
359 memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
360 rte_cryptodev_info_get(dev_id, &dev_info);
361
362 while ((capability = &dev_info.capabilities[i++])->op !=
363 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
364 if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
365 continue;
366
367 if (capability->asym.xform_capa.xform_type == idx->type)
368 return &capability->asym.xform_capa;
369 }
370 return NULL;
371};
372
373int
374rte_cryptodev_sym_capability_check_cipher(
375 const struct rte_cryptodev_symmetric_capability *capability,
376 uint16_t key_size, uint16_t iv_size)
377{
378 if (param_range_check(key_size, &capability->cipher.key_size) != 0)
379 return -1;
380
381 if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
382 return -1;
383
384 return 0;
385}
386
387int
388rte_cryptodev_sym_capability_check_auth(
389 const struct rte_cryptodev_symmetric_capability *capability,
390 uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
391{
392 if (param_range_check(key_size, &capability->auth.key_size) != 0)
393 return -1;
394
395 if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
396 return -1;
397
398 if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
399 return -1;
400
401 return 0;
402}
403
404int
405rte_cryptodev_sym_capability_check_aead(
406 const struct rte_cryptodev_symmetric_capability *capability,
407 uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
408 uint16_t iv_size)
409{
410 if (param_range_check(key_size, &capability->aead.key_size) != 0)
411 return -1;
412
413 if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
414 return -1;
415
416 if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
417 return -1;
418
419 if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
420 return -1;
421
422 return 0;
423}
424int
425rte_cryptodev_asym_xform_capability_check_optype(
426 const struct rte_cryptodev_asymmetric_xform_capability *capability,
427 enum rte_crypto_asym_op_type op_type)
428{
429 if (capability->op_types & (1 << op_type))
430 return 1;
431
432 return 0;
433}
434
435int
436rte_cryptodev_asym_xform_capability_check_modlen(
437 const struct rte_cryptodev_asymmetric_xform_capability *capability,
438 uint16_t modlen)
439{
440
441 if (capability->modlen.min != 0) {
442 if (modlen < capability->modlen.min)
443 return -1;
444 }
445
446 if (capability->modlen.max != 0) {
447 if (modlen > capability->modlen.max)
448 return -1;
449 }
450
451
452 if (capability->modlen.increment != 0) {
453 if (modlen % (capability->modlen.increment))
454 return -1;
455 }
456
457 return 0;
458}
459
460
461static rte_spinlock_t rte_cryptodev_callback_lock = RTE_SPINLOCK_INITIALIZER;
462
463static void
464cryptodev_cb_cleanup(struct rte_cryptodev *dev)
465{
466 struct rte_cryptodev_cb_rcu *list;
467 struct rte_cryptodev_cb *cb, *next;
468 uint16_t qp_id;
469
470 if (dev->enq_cbs == NULL && dev->deq_cbs == NULL)
471 return;
472
473 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
474 list = &dev->enq_cbs[qp_id];
475 cb = list->next;
476 while (cb != NULL) {
477 next = cb->next;
478 rte_free(cb);
479 cb = next;
480 }
481
482 rte_free(list->qsbr);
483 }
484
485 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
486 list = &dev->deq_cbs[qp_id];
487 cb = list->next;
488 while (cb != NULL) {
489 next = cb->next;
490 rte_free(cb);
491 cb = next;
492 }
493
494 rte_free(list->qsbr);
495 }
496
497 rte_free(dev->enq_cbs);
498 dev->enq_cbs = NULL;
499 rte_free(dev->deq_cbs);
500 dev->deq_cbs = NULL;
501}
502
503static int
504cryptodev_cb_init(struct rte_cryptodev *dev)
505{
506 struct rte_cryptodev_cb_rcu *list;
507 struct rte_rcu_qsbr *qsbr;
508 uint16_t qp_id;
509 size_t size;
510
511
512 const uint32_t max_threads = 1;
513
514 dev->enq_cbs = rte_zmalloc(NULL,
515 sizeof(struct rte_cryptodev_cb_rcu) *
516 dev->data->nb_queue_pairs, 0);
517 if (dev->enq_cbs == NULL) {
518 CDEV_LOG_ERR("Failed to allocate memory for enq callbacks");
519 return -ENOMEM;
520 }
521
522 dev->deq_cbs = rte_zmalloc(NULL,
523 sizeof(struct rte_cryptodev_cb_rcu) *
524 dev->data->nb_queue_pairs, 0);
525 if (dev->deq_cbs == NULL) {
526 CDEV_LOG_ERR("Failed to allocate memory for deq callbacks");
527 rte_free(dev->enq_cbs);
528 return -ENOMEM;
529 }
530
531
532 size = rte_rcu_qsbr_get_memsize(max_threads);
533
534 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
535 list = &dev->enq_cbs[qp_id];
536 qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
537 if (qsbr == NULL) {
538 CDEV_LOG_ERR("Failed to allocate memory for RCU on "
539 "queue_pair_id=%d", qp_id);
540 goto cb_init_err;
541 }
542
543 if (rte_rcu_qsbr_init(qsbr, max_threads)) {
544 CDEV_LOG_ERR("Failed to initialize for RCU on "
545 "queue_pair_id=%d", qp_id);
546 goto cb_init_err;
547 }
548
549 list->qsbr = qsbr;
550 }
551
552 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
553 list = &dev->deq_cbs[qp_id];
554 qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
555 if (qsbr == NULL) {
556 CDEV_LOG_ERR("Failed to allocate memory for RCU on "
557 "queue_pair_id=%d", qp_id);
558 goto cb_init_err;
559 }
560
561 if (rte_rcu_qsbr_init(qsbr, max_threads)) {
562 CDEV_LOG_ERR("Failed to initialize for RCU on "
563 "queue_pair_id=%d", qp_id);
564 goto cb_init_err;
565 }
566
567 list->qsbr = qsbr;
568 }
569
570 return 0;
571
572cb_init_err:
573 cryptodev_cb_cleanup(dev);
574 return -ENOMEM;
575}
576
577const char *
578rte_cryptodev_get_feature_name(uint64_t flag)
579{
580 switch (flag) {
581 case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
582 return "SYMMETRIC_CRYPTO";
583 case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
584 return "ASYMMETRIC_CRYPTO";
585 case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
586 return "SYM_OPERATION_CHAINING";
587 case RTE_CRYPTODEV_FF_CPU_SSE:
588 return "CPU_SSE";
589 case RTE_CRYPTODEV_FF_CPU_AVX:
590 return "CPU_AVX";
591 case RTE_CRYPTODEV_FF_CPU_AVX2:
592 return "CPU_AVX2";
593 case RTE_CRYPTODEV_FF_CPU_AVX512:
594 return "CPU_AVX512";
595 case RTE_CRYPTODEV_FF_CPU_AESNI:
596 return "CPU_AESNI";
597 case RTE_CRYPTODEV_FF_HW_ACCELERATED:
598 return "HW_ACCELERATED";
599 case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
600 return "IN_PLACE_SGL";
601 case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
602 return "OOP_SGL_IN_SGL_OUT";
603 case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
604 return "OOP_SGL_IN_LB_OUT";
605 case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
606 return "OOP_LB_IN_SGL_OUT";
607 case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
608 return "OOP_LB_IN_LB_OUT";
609 case RTE_CRYPTODEV_FF_CPU_NEON:
610 return "CPU_NEON";
611 case RTE_CRYPTODEV_FF_CPU_ARM_CE:
612 return "CPU_ARM_CE";
613 case RTE_CRYPTODEV_FF_SECURITY:
614 return "SECURITY_PROTOCOL";
615 case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP:
616 return "RSA_PRIV_OP_KEY_EXP";
617 case RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT:
618 return "RSA_PRIV_OP_KEY_QT";
619 case RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED:
620 return "DIGEST_ENCRYPTED";
621 case RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO:
622 return "SYM_CPU_CRYPTO";
623 case RTE_CRYPTODEV_FF_ASYM_SESSIONLESS:
624 return "ASYM_SESSIONLESS";
625 case RTE_CRYPTODEV_FF_SYM_SESSIONLESS:
626 return "SYM_SESSIONLESS";
627 case RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA:
628 return "NON_BYTE_ALIGNED_DATA";
629 case RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS:
630 return "CIPHER_MULTIPLE_DATA_UNITS";
631 case RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY:
632 return "CIPHER_WRAPPED_KEY";
633 default:
634 return NULL;
635 }
636}
637
638struct rte_cryptodev *
639rte_cryptodev_pmd_get_dev(uint8_t dev_id)
640{
641 return &cryptodev_globals.devs[dev_id];
642}
643
644struct rte_cryptodev *
645rte_cryptodev_pmd_get_named_dev(const char *name)
646{
647 struct rte_cryptodev *dev;
648 unsigned int i;
649
650 if (name == NULL)
651 return NULL;
652
653 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
654 dev = &cryptodev_globals.devs[i];
655
656 if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
657 (strcmp(dev->data->name, name) == 0))
658 return dev;
659 }
660
661 return NULL;
662}
663
664static inline uint8_t
665rte_cryptodev_is_valid_device_data(uint8_t dev_id)
666{
667 if (dev_id >= RTE_CRYPTO_MAX_DEVS ||
668 rte_crypto_devices[dev_id].data == NULL)
669 return 0;
670
671 return 1;
672}
673
674unsigned int
675rte_cryptodev_is_valid_dev(uint8_t dev_id)
676{
677 struct rte_cryptodev *dev = NULL;
678
679 if (!rte_cryptodev_is_valid_device_data(dev_id))
680 return 0;
681
682 dev = rte_cryptodev_pmd_get_dev(dev_id);
683 if (dev->attached != RTE_CRYPTODEV_ATTACHED)
684 return 0;
685 else
686 return 1;
687}
688
689
690int
691rte_cryptodev_get_dev_id(const char *name)
692{
693 unsigned i;
694
695 if (name == NULL)
696 return -1;
697
698 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
699 if (!rte_cryptodev_is_valid_device_data(i))
700 continue;
701 if ((strcmp(cryptodev_globals.devs[i].data->name, name)
702 == 0) &&
703 (cryptodev_globals.devs[i].attached ==
704 RTE_CRYPTODEV_ATTACHED))
705 return i;
706 }
707
708 return -1;
709}
710
711uint8_t
712rte_cryptodev_count(void)
713{
714 return cryptodev_globals.nb_devs;
715}
716
717uint8_t
718rte_cryptodev_device_count_by_driver(uint8_t driver_id)
719{
720 uint8_t i, dev_count = 0;
721
722 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++)
723 if (cryptodev_globals.devs[i].driver_id == driver_id &&
724 cryptodev_globals.devs[i].attached ==
725 RTE_CRYPTODEV_ATTACHED)
726 dev_count++;
727
728 return dev_count;
729}
730
731uint8_t
732rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
733 uint8_t nb_devices)
734{
735 uint8_t i, count = 0;
736 struct rte_cryptodev *devs = cryptodev_globals.devs;
737
738 for (i = 0; i < RTE_CRYPTO_MAX_DEVS && count < nb_devices; i++) {
739 if (!rte_cryptodev_is_valid_device_data(i))
740 continue;
741
742 if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
743 int cmp;
744
745 cmp = strncmp(devs[i].device->driver->name,
746 driver_name,
747 strlen(driver_name) + 1);
748
749 if (cmp == 0)
750 devices[count++] = devs[i].data->dev_id;
751 }
752 }
753
754 return count;
755}
756
757void *
758rte_cryptodev_get_sec_ctx(uint8_t dev_id)
759{
760 if (dev_id < RTE_CRYPTO_MAX_DEVS &&
761 (rte_crypto_devices[dev_id].feature_flags &
762 RTE_CRYPTODEV_FF_SECURITY))
763 return rte_crypto_devices[dev_id].security_ctx;
764
765 return NULL;
766}
767
768int
769rte_cryptodev_socket_id(uint8_t dev_id)
770{
771 struct rte_cryptodev *dev;
772
773 if (!rte_cryptodev_is_valid_dev(dev_id))
774 return -1;
775
776 dev = rte_cryptodev_pmd_get_dev(dev_id);
777
778 return dev->data->socket_id;
779}
780
781static inline int
782rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
783 int socket_id)
784{
785 char mz_name[RTE_MEMZONE_NAMESIZE];
786 const struct rte_memzone *mz;
787 int n;
788
789
790 n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
791 if (n >= (int)sizeof(mz_name))
792 return -EINVAL;
793
794 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
795 mz = rte_memzone_reserve(mz_name,
796 sizeof(struct rte_cryptodev_data),
797 socket_id, 0);
798 CDEV_LOG_DEBUG("PRIMARY:reserved memzone for %s (%p)",
799 mz_name, mz);
800 } else {
801 mz = rte_memzone_lookup(mz_name);
802 CDEV_LOG_DEBUG("SECONDARY:looked up memzone for %s (%p)",
803 mz_name, mz);
804 }
805
806 if (mz == NULL)
807 return -ENOMEM;
808
809 *data = mz->addr;
810 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
811 memset(*data, 0, sizeof(struct rte_cryptodev_data));
812
813 return 0;
814}
815
816static inline int
817rte_cryptodev_data_free(uint8_t dev_id, struct rte_cryptodev_data **data)
818{
819 char mz_name[RTE_MEMZONE_NAMESIZE];
820 const struct rte_memzone *mz;
821 int n;
822
823
824 n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
825 if (n >= (int)sizeof(mz_name))
826 return -EINVAL;
827
828 mz = rte_memzone_lookup(mz_name);
829 if (mz == NULL)
830 return -ENOMEM;
831
832 RTE_ASSERT(*data == mz->addr);
833 *data = NULL;
834
835 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
836 CDEV_LOG_DEBUG("PRIMARY:free memzone of %s (%p)",
837 mz_name, mz);
838 return rte_memzone_free(mz);
839 } else {
840 CDEV_LOG_DEBUG("SECONDARY:don't free memzone of %s (%p)",
841 mz_name, mz);
842 }
843
844 return 0;
845}
846
847static uint8_t
848rte_cryptodev_find_free_device_index(void)
849{
850 uint8_t dev_id;
851
852 for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
853 if (rte_crypto_devices[dev_id].attached ==
854 RTE_CRYPTODEV_DETACHED)
855 return dev_id;
856 }
857 return RTE_CRYPTO_MAX_DEVS;
858}
859
860struct rte_cryptodev *
861rte_cryptodev_pmd_allocate(const char *name, int socket_id)
862{
863 struct rte_cryptodev *cryptodev;
864 uint8_t dev_id;
865
866 if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
867 CDEV_LOG_ERR("Crypto device with name %s already "
868 "allocated!", name);
869 return NULL;
870 }
871
872 dev_id = rte_cryptodev_find_free_device_index();
873 if (dev_id == RTE_CRYPTO_MAX_DEVS) {
874 CDEV_LOG_ERR("Reached maximum number of crypto devices");
875 return NULL;
876 }
877
878 cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
879
880 if (cryptodev->data == NULL) {
881 struct rte_cryptodev_data **cryptodev_data =
882 &cryptodev_globals.data[dev_id];
883
884 int retval = rte_cryptodev_data_alloc(dev_id, cryptodev_data,
885 socket_id);
886
887 if (retval < 0 || *cryptodev_data == NULL)
888 return NULL;
889
890 cryptodev->data = *cryptodev_data;
891
892 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
893 strlcpy(cryptodev->data->name, name,
894 RTE_CRYPTODEV_NAME_MAX_LEN);
895
896 cryptodev->data->dev_id = dev_id;
897 cryptodev->data->socket_id = socket_id;
898 cryptodev->data->dev_started = 0;
899 CDEV_LOG_DEBUG("PRIMARY:init data");
900 }
901
902 CDEV_LOG_DEBUG("Data for %s: dev_id %d, socket %d, started %d",
903 cryptodev->data->name,
904 cryptodev->data->dev_id,
905 cryptodev->data->socket_id,
906 cryptodev->data->dev_started);
907
908
909 TAILQ_INIT(&(cryptodev->link_intr_cbs));
910
911 cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
912
913 cryptodev_globals.nb_devs++;
914 }
915
916 return cryptodev;
917}
918
919int
920rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
921{
922 int ret;
923 uint8_t dev_id;
924
925 if (cryptodev == NULL)
926 return -EINVAL;
927
928 dev_id = cryptodev->data->dev_id;
929
930 cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
931
932
933 if (cryptodev->dev_ops) {
934 ret = rte_cryptodev_close(dev_id);
935 if (ret < 0)
936 return ret;
937 }
938
939 ret = rte_cryptodev_data_free(dev_id, &cryptodev_globals.data[dev_id]);
940 if (ret < 0)
941 return ret;
942
943 cryptodev->attached = RTE_CRYPTODEV_DETACHED;
944 cryptodev_globals.nb_devs--;
945 return 0;
946}
947
948uint16_t
949rte_cryptodev_queue_pair_count(uint8_t dev_id)
950{
951 struct rte_cryptodev *dev;
952
953 if (!rte_cryptodev_is_valid_device_data(dev_id)) {
954 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
955 return 0;
956 }
957
958 dev = &rte_crypto_devices[dev_id];
959 return dev->data->nb_queue_pairs;
960}
961
962static int
963rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
964 int socket_id)
965{
966 struct rte_cryptodev_info dev_info;
967 void **qp;
968 unsigned i;
969
970 if ((dev == NULL) || (nb_qpairs < 1)) {
971 CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
972 dev, nb_qpairs);
973 return -EINVAL;
974 }
975
976 CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
977 nb_qpairs, dev->data->dev_id);
978
979 memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
980
981 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
982 (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
983
984 if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
985 CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
986 nb_qpairs, dev->data->dev_id);
987 return -EINVAL;
988 }
989
990 if (dev->data->queue_pairs == NULL) {
991 dev->data->queue_pairs = rte_zmalloc_socket(
992 "cryptodev->queue_pairs",
993 sizeof(dev->data->queue_pairs[0]) *
994 dev_info.max_nb_queue_pairs,
995 RTE_CACHE_LINE_SIZE, socket_id);
996
997 if (dev->data->queue_pairs == NULL) {
998 dev->data->nb_queue_pairs = 0;
999 CDEV_LOG_ERR("failed to get memory for qp meta data, "
1000 "nb_queues %u",
1001 nb_qpairs);
1002 return -(ENOMEM);
1003 }
1004 } else {
1005 int ret;
1006 uint16_t old_nb_queues = dev->data->nb_queue_pairs;
1007
1008 qp = dev->data->queue_pairs;
1009
1010 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
1011 -ENOTSUP);
1012
1013 for (i = nb_qpairs; i < old_nb_queues; i++) {
1014 ret = (*dev->dev_ops->queue_pair_release)(dev, i);
1015 if (ret < 0)
1016 return ret;
1017 qp[i] = NULL;
1018 }
1019
1020 }
1021 dev->data->nb_queue_pairs = nb_qpairs;
1022 return 0;
1023}
1024
1025int
1026rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
1027{
1028 struct rte_cryptodev *dev;
1029 int diag;
1030
1031 if (!rte_cryptodev_is_valid_dev(dev_id)) {
1032 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1033 return -EINVAL;
1034 }
1035
1036 dev = &rte_crypto_devices[dev_id];
1037
1038 if (dev->data->dev_started) {
1039 CDEV_LOG_ERR(
1040 "device %d must be stopped to allow configuration", dev_id);
1041 return -EBUSY;
1042 }
1043
1044 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1045
1046 rte_spinlock_lock(&rte_cryptodev_callback_lock);
1047 cryptodev_cb_cleanup(dev);
1048 rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1049
1050
1051 diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
1052 config->socket_id);
1053 if (diag != 0) {
1054 CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
1055 dev_id, diag);
1056 return diag;
1057 }
1058
1059 rte_spinlock_lock(&rte_cryptodev_callback_lock);
1060 diag = cryptodev_cb_init(dev);
1061 rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1062 if (diag) {
1063 CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id);
1064 return diag;
1065 }
1066
1067 rte_cryptodev_trace_configure(dev_id, config);
1068 return (*dev->dev_ops->dev_configure)(dev, config);
1069}
1070
1071int
1072rte_cryptodev_start(uint8_t dev_id)
1073{
1074 struct rte_cryptodev *dev;
1075 int diag;
1076
1077 CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1078
1079 if (!rte_cryptodev_is_valid_dev(dev_id)) {
1080 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1081 return -EINVAL;
1082 }
1083
1084 dev = &rte_crypto_devices[dev_id];
1085
1086 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1087
1088 if (dev->data->dev_started != 0) {
1089 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
1090 dev_id);
1091 return 0;
1092 }
1093
1094 diag = (*dev->dev_ops->dev_start)(dev);
1095
1096 cryptodev_fp_ops_set(rte_crypto_fp_ops + dev_id, dev);
1097
1098 rte_cryptodev_trace_start(dev_id, diag);
1099 if (diag == 0)
1100 dev->data->dev_started = 1;
1101 else
1102 return diag;
1103
1104 return 0;
1105}
1106
1107void
1108rte_cryptodev_stop(uint8_t dev_id)
1109{
1110 struct rte_cryptodev *dev;
1111
1112 if (!rte_cryptodev_is_valid_dev(dev_id)) {
1113 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1114 return;
1115 }
1116
1117 dev = &rte_crypto_devices[dev_id];
1118
1119 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1120
1121 if (dev->data->dev_started == 0) {
1122 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
1123 dev_id);
1124 return;
1125 }
1126
1127
1128 cryptodev_fp_ops_reset(rte_crypto_fp_ops + dev_id);
1129
1130 (*dev->dev_ops->dev_stop)(dev);
1131 rte_cryptodev_trace_stop(dev_id);
1132 dev->data->dev_started = 0;
1133}
1134
1135int
1136rte_cryptodev_close(uint8_t dev_id)
1137{
1138 struct rte_cryptodev *dev;
1139 int retval;
1140
1141 if (!rte_cryptodev_is_valid_dev(dev_id)) {
1142 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1143 return -1;
1144 }
1145
1146 dev = &rte_crypto_devices[dev_id];
1147
1148
1149 if (dev->data->dev_started == 1) {
1150 CDEV_LOG_ERR("Device %u must be stopped before closing",
1151 dev_id);
1152 return -EBUSY;
1153 }
1154
1155
1156 if (dev->data->session_pool != NULL) {
1157 if (!rte_mempool_full(dev->data->session_pool)) {
1158 CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
1159 "has sessions still in use, free "
1160 "all sessions before calling close",
1161 (unsigned)dev_id);
1162 return -EBUSY;
1163 }
1164 }
1165
1166 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1167 retval = (*dev->dev_ops->dev_close)(dev);
1168 rte_cryptodev_trace_close(dev_id, retval);
1169
1170 if (retval < 0)
1171 return retval;
1172
1173 return 0;
1174}
1175
1176int
1177rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id)
1178{
1179 struct rte_cryptodev *dev;
1180
1181 if (!rte_cryptodev_is_valid_dev(dev_id)) {
1182 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1183 return -EINVAL;
1184 }
1185
1186 dev = &rte_crypto_devices[dev_id];
1187 if (queue_pair_id >= dev->data->nb_queue_pairs) {
1188 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1189 return -EINVAL;
1190 }
1191 void **qps = dev->data->queue_pairs;
1192
1193 if (qps[queue_pair_id]) {
1194 CDEV_LOG_DEBUG("qp %d on dev %d is initialised",
1195 queue_pair_id, dev_id);
1196 return 1;
1197 }
1198
1199 CDEV_LOG_DEBUG("qp %d on dev %d is not initialised",
1200 queue_pair_id, dev_id);
1201
1202 return 0;
1203}
1204
1205int
1206rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
1207 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
1208
1209{
1210 struct rte_cryptodev *dev;
1211
1212 if (!rte_cryptodev_is_valid_dev(dev_id)) {
1213 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1214 return -EINVAL;
1215 }
1216
1217 dev = &rte_crypto_devices[dev_id];
1218 if (queue_pair_id >= dev->data->nb_queue_pairs) {
1219 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
1220 return -EINVAL;
1221 }
1222
1223 if (!qp_conf) {
1224 CDEV_LOG_ERR("qp_conf cannot be NULL\n");
1225 return -EINVAL;
1226 }
1227
1228 if ((qp_conf->mp_session && !qp_conf->mp_session_private) ||
1229 (!qp_conf->mp_session && qp_conf->mp_session_private)) {
1230 CDEV_LOG_ERR("Invalid mempools\n");
1231 return -EINVAL;
1232 }
1233
1234 if (qp_conf->mp_session) {
1235 struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1236 uint32_t obj_size = qp_conf->mp_session->elt_size;
1237 uint32_t obj_priv_size = qp_conf->mp_session_private->elt_size;
1238 struct rte_cryptodev_sym_session s = {0};
1239
1240 pool_priv = rte_mempool_get_priv(qp_conf->mp_session);
1241 if (!pool_priv || qp_conf->mp_session->private_data_size <
1242 sizeof(*pool_priv)) {
1243 CDEV_LOG_ERR("Invalid mempool\n");
1244 return -EINVAL;
1245 }
1246
1247 s.nb_drivers = pool_priv->nb_drivers;
1248 s.user_data_sz = pool_priv->user_data_sz;
1249
1250 if ((rte_cryptodev_sym_get_existing_header_session_size(&s) >
1251 obj_size) || (s.nb_drivers <= dev->driver_id) ||
1252 rte_cryptodev_sym_get_private_session_size(dev_id) >
1253 obj_priv_size) {
1254 CDEV_LOG_ERR("Invalid mempool\n");
1255 return -EINVAL;
1256 }
1257 }
1258
1259 if (dev->data->dev_started) {
1260 CDEV_LOG_ERR(
1261 "device %d must be stopped to allow configuration", dev_id);
1262 return -EBUSY;
1263 }
1264
1265 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
1266
1267 rte_cryptodev_trace_queue_pair_setup(dev_id, queue_pair_id, qp_conf);
1268 return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
1269 socket_id);
1270}
1271
1272struct rte_cryptodev_cb *
1273rte_cryptodev_add_enq_callback(uint8_t dev_id,
1274 uint16_t qp_id,
1275 rte_cryptodev_callback_fn cb_fn,
1276 void *cb_arg)
1277{
1278 struct rte_cryptodev *dev;
1279 struct rte_cryptodev_cb_rcu *list;
1280 struct rte_cryptodev_cb *cb, *tail;
1281
1282 if (!cb_fn) {
1283 CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1284 rte_errno = EINVAL;
1285 return NULL;
1286 }
1287
1288 if (!rte_cryptodev_is_valid_dev(dev_id)) {
1289 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1290 rte_errno = ENODEV;
1291 return NULL;
1292 }
1293
1294 dev = &rte_crypto_devices[dev_id];
1295 if (qp_id >= dev->data->nb_queue_pairs) {
1296 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1297 rte_errno = ENODEV;
1298 return NULL;
1299 }
1300
1301 cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1302 if (cb == NULL) {
1303 CDEV_LOG_ERR("Failed to allocate memory for callback on "
1304 "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1305 rte_errno = ENOMEM;
1306 return NULL;
1307 }
1308
1309 rte_spinlock_lock(&rte_cryptodev_callback_lock);
1310
1311 cb->fn = cb_fn;
1312 cb->arg = cb_arg;
1313
1314
1315 list = &dev->enq_cbs[qp_id];
1316 tail = list->next;
1317
1318 if (tail) {
1319 while (tail->next)
1320 tail = tail->next;
1321
1322
1323
1324 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
1325 } else {
1326
1327
1328
1329 __atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
1330 }
1331
1332 rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1333
1334 return cb;
1335}
1336
1337int
1338rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1339 uint16_t qp_id,
1340 struct rte_cryptodev_cb *cb)
1341{
1342 struct rte_cryptodev *dev;
1343 struct rte_cryptodev_cb **prev_cb, *curr_cb;
1344 struct rte_cryptodev_cb_rcu *list;
1345 int ret;
1346
1347 ret = -EINVAL;
1348
1349 if (!cb) {
1350 CDEV_LOG_ERR("Callback is NULL");
1351 return -EINVAL;
1352 }
1353
1354 if (!rte_cryptodev_is_valid_dev(dev_id)) {
1355 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1356 return -ENODEV;
1357 }
1358
1359 dev = &rte_crypto_devices[dev_id];
1360 if (qp_id >= dev->data->nb_queue_pairs) {
1361 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1362 return -ENODEV;
1363 }
1364
1365 rte_spinlock_lock(&rte_cryptodev_callback_lock);
1366 if (dev->enq_cbs == NULL) {
1367 CDEV_LOG_ERR("Callback not initialized");
1368 goto cb_err;
1369 }
1370
1371 list = &dev->enq_cbs[qp_id];
1372 if (list == NULL) {
1373 CDEV_LOG_ERR("Callback list is NULL");
1374 goto cb_err;
1375 }
1376
1377 if (list->qsbr == NULL) {
1378 CDEV_LOG_ERR("Rcu qsbr is NULL");
1379 goto cb_err;
1380 }
1381
1382 prev_cb = &list->next;
1383 for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1384 curr_cb = *prev_cb;
1385 if (curr_cb == cb) {
1386
1387 __atomic_store_n(prev_cb, curr_cb->next,
1388 __ATOMIC_RELAXED);
1389 ret = 0;
1390 break;
1391 }
1392 }
1393
1394 if (!ret) {
1395
1396
1397
1398 rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1399 rte_free(cb);
1400 }
1401
1402cb_err:
1403 rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1404 return ret;
1405}
1406
1407struct rte_cryptodev_cb *
1408rte_cryptodev_add_deq_callback(uint8_t dev_id,
1409 uint16_t qp_id,
1410 rte_cryptodev_callback_fn cb_fn,
1411 void *cb_arg)
1412{
1413 struct rte_cryptodev *dev;
1414 struct rte_cryptodev_cb_rcu *list;
1415 struct rte_cryptodev_cb *cb, *tail;
1416
1417 if (!cb_fn) {
1418 CDEV_LOG_ERR("Callback is NULL on dev_id=%d", dev_id);
1419 rte_errno = EINVAL;
1420 return NULL;
1421 }
1422
1423 if (!rte_cryptodev_is_valid_dev(dev_id)) {
1424 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1425 rte_errno = ENODEV;
1426 return NULL;
1427 }
1428
1429 dev = &rte_crypto_devices[dev_id];
1430 if (qp_id >= dev->data->nb_queue_pairs) {
1431 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1432 rte_errno = ENODEV;
1433 return NULL;
1434 }
1435
1436 cb = rte_zmalloc(NULL, sizeof(*cb), 0);
1437 if (cb == NULL) {
1438 CDEV_LOG_ERR("Failed to allocate memory for callback on "
1439 "dev=%d, queue_pair_id=%d", dev_id, qp_id);
1440 rte_errno = ENOMEM;
1441 return NULL;
1442 }
1443
1444 rte_spinlock_lock(&rte_cryptodev_callback_lock);
1445
1446 cb->fn = cb_fn;
1447 cb->arg = cb_arg;
1448
1449
1450 list = &dev->deq_cbs[qp_id];
1451 tail = list->next;
1452
1453 if (tail) {
1454 while (tail->next)
1455 tail = tail->next;
1456
1457
1458
1459 __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
1460 } else {
1461
1462
1463
1464 __atomic_store_n(&list->next, cb, __ATOMIC_RELEASE);
1465 }
1466
1467 rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1468
1469 return cb;
1470}
1471
1472int
1473rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1474 uint16_t qp_id,
1475 struct rte_cryptodev_cb *cb)
1476{
1477 struct rte_cryptodev *dev;
1478 struct rte_cryptodev_cb **prev_cb, *curr_cb;
1479 struct rte_cryptodev_cb_rcu *list;
1480 int ret;
1481
1482 ret = -EINVAL;
1483
1484 if (!cb) {
1485 CDEV_LOG_ERR("Callback is NULL");
1486 return -EINVAL;
1487 }
1488
1489 if (!rte_cryptodev_is_valid_dev(dev_id)) {
1490 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1491 return -ENODEV;
1492 }
1493
1494 dev = &rte_crypto_devices[dev_id];
1495 if (qp_id >= dev->data->nb_queue_pairs) {
1496 CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id);
1497 return -ENODEV;
1498 }
1499
1500 rte_spinlock_lock(&rte_cryptodev_callback_lock);
1501 if (dev->enq_cbs == NULL) {
1502 CDEV_LOG_ERR("Callback not initialized");
1503 goto cb_err;
1504 }
1505
1506 list = &dev->deq_cbs[qp_id];
1507 if (list == NULL) {
1508 CDEV_LOG_ERR("Callback list is NULL");
1509 goto cb_err;
1510 }
1511
1512 if (list->qsbr == NULL) {
1513 CDEV_LOG_ERR("Rcu qsbr is NULL");
1514 goto cb_err;
1515 }
1516
1517 prev_cb = &list->next;
1518 for (; *prev_cb != NULL; prev_cb = &curr_cb->next) {
1519 curr_cb = *prev_cb;
1520 if (curr_cb == cb) {
1521
1522 __atomic_store_n(prev_cb, curr_cb->next,
1523 __ATOMIC_RELAXED);
1524 ret = 0;
1525 break;
1526 }
1527 }
1528
1529 if (!ret) {
1530
1531
1532
1533 rte_rcu_qsbr_synchronize(list->qsbr, RTE_QSBR_THRID_INVALID);
1534 rte_free(cb);
1535 }
1536
1537cb_err:
1538 rte_spinlock_unlock(&rte_cryptodev_callback_lock);
1539 return ret;
1540}
1541
1542int
1543rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
1544{
1545 struct rte_cryptodev *dev;
1546
1547 if (!rte_cryptodev_is_valid_dev(dev_id)) {
1548 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1549 return -ENODEV;
1550 }
1551
1552 if (stats == NULL) {
1553 CDEV_LOG_ERR("Invalid stats ptr");
1554 return -EINVAL;
1555 }
1556
1557 dev = &rte_crypto_devices[dev_id];
1558 memset(stats, 0, sizeof(*stats));
1559
1560 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1561 (*dev->dev_ops->stats_get)(dev, stats);
1562 return 0;
1563}
1564
1565void
1566rte_cryptodev_stats_reset(uint8_t dev_id)
1567{
1568 struct rte_cryptodev *dev;
1569
1570 if (!rte_cryptodev_is_valid_dev(dev_id)) {
1571 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1572 return;
1573 }
1574
1575 dev = &rte_crypto_devices[dev_id];
1576
1577 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1578 (*dev->dev_ops->stats_reset)(dev);
1579}
1580
1581void
1582rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
1583{
1584 struct rte_cryptodev *dev;
1585
1586 if (!rte_cryptodev_is_valid_dev(dev_id)) {
1587 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1588 return;
1589 }
1590
1591 dev = &rte_crypto_devices[dev_id];
1592
1593 memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
1594
1595 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1596 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1597
1598 dev_info->driver_name = dev->device->driver->name;
1599 dev_info->device = dev->device;
1600}
1601
1602int
1603rte_cryptodev_callback_register(uint8_t dev_id,
1604 enum rte_cryptodev_event_type event,
1605 rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1606{
1607 struct rte_cryptodev *dev;
1608 struct rte_cryptodev_callback *user_cb;
1609
1610 if (!cb_fn)
1611 return -EINVAL;
1612
1613 if (!rte_cryptodev_is_valid_dev(dev_id)) {
1614 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1615 return -EINVAL;
1616 }
1617
1618 dev = &rte_crypto_devices[dev_id];
1619 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1620
1621 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1622 if (user_cb->cb_fn == cb_fn &&
1623 user_cb->cb_arg == cb_arg &&
1624 user_cb->event == event) {
1625 break;
1626 }
1627 }
1628
1629
1630 if (user_cb == NULL) {
1631 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1632 sizeof(struct rte_cryptodev_callback), 0);
1633 if (user_cb != NULL) {
1634 user_cb->cb_fn = cb_fn;
1635 user_cb->cb_arg = cb_arg;
1636 user_cb->event = event;
1637 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1638 }
1639 }
1640
1641 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1642 return (user_cb == NULL) ? -ENOMEM : 0;
1643}
1644
1645int
1646rte_cryptodev_callback_unregister(uint8_t dev_id,
1647 enum rte_cryptodev_event_type event,
1648 rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1649{
1650 int ret;
1651 struct rte_cryptodev *dev;
1652 struct rte_cryptodev_callback *cb, *next;
1653
1654 if (!cb_fn)
1655 return -EINVAL;
1656
1657 if (!rte_cryptodev_is_valid_dev(dev_id)) {
1658 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1659 return -EINVAL;
1660 }
1661
1662 dev = &rte_crypto_devices[dev_id];
1663 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1664
1665 ret = 0;
1666 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1667
1668 next = TAILQ_NEXT(cb, next);
1669
1670 if (cb->cb_fn != cb_fn || cb->event != event ||
1671 (cb->cb_arg != (void *)-1 &&
1672 cb->cb_arg != cb_arg))
1673 continue;
1674
1675
1676
1677
1678
1679 if (cb->active == 0) {
1680 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1681 rte_free(cb);
1682 } else {
1683 ret = -EAGAIN;
1684 }
1685 }
1686
1687 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1688 return ret;
1689}
1690
1691void
1692rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1693 enum rte_cryptodev_event_type event)
1694{
1695 struct rte_cryptodev_callback *cb_lst;
1696 struct rte_cryptodev_callback dev_cb;
1697
1698 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1699 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1700 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1701 continue;
1702 dev_cb = *cb_lst;
1703 cb_lst->active = 1;
1704 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1705 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1706 dev_cb.cb_arg);
1707 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1708 cb_lst->active = 0;
1709 }
1710 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1711}
1712
1713int
1714rte_cryptodev_sym_session_init(uint8_t dev_id,
1715 struct rte_cryptodev_sym_session *sess,
1716 struct rte_crypto_sym_xform *xforms,
1717 struct rte_mempool *mp)
1718{
1719 struct rte_cryptodev *dev;
1720 uint32_t sess_priv_sz = rte_cryptodev_sym_get_private_session_size(
1721 dev_id);
1722 uint8_t index;
1723 int ret;
1724
1725 if (!rte_cryptodev_is_valid_dev(dev_id)) {
1726 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1727 return -EINVAL;
1728 }
1729
1730 dev = rte_cryptodev_pmd_get_dev(dev_id);
1731
1732 if (sess == NULL || xforms == NULL || dev == NULL || mp == NULL)
1733 return -EINVAL;
1734
1735 if (mp->elt_size < sess_priv_sz)
1736 return -EINVAL;
1737
1738 index = dev->driver_id;
1739 if (index >= sess->nb_drivers)
1740 return -EINVAL;
1741
1742 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP);
1743
1744 if (sess->sess_data[index].refcnt == 0) {
1745 ret = dev->dev_ops->sym_session_configure(dev, xforms,
1746 sess, mp);
1747 if (ret < 0) {
1748 CDEV_LOG_ERR(
1749 "dev_id %d failed to configure session details",
1750 dev_id);
1751 return ret;
1752 }
1753 }
1754
1755 rte_cryptodev_trace_sym_session_init(dev_id, sess, xforms, mp);
1756 sess->sess_data[index].refcnt++;
1757 return 0;
1758}
1759
1760struct rte_mempool *
1761rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1762 uint32_t elt_size, uint32_t cache_size, uint16_t user_data_size,
1763 int socket_id)
1764{
1765 struct rte_mempool *mp;
1766 struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1767 uint32_t obj_sz;
1768
1769 obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size;
1770 if (obj_sz > elt_size)
1771 CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size,
1772 obj_sz);
1773 else
1774 obj_sz = elt_size;
1775
1776 mp = rte_mempool_create(name, nb_elts, obj_sz, cache_size,
1777 (uint32_t)(sizeof(*pool_priv)),
1778 NULL, NULL, NULL, NULL,
1779 socket_id, 0);
1780 if (mp == NULL) {
1781 CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1782 __func__, name, rte_errno);
1783 return NULL;
1784 }
1785
1786 pool_priv = rte_mempool_get_priv(mp);
1787 if (!pool_priv) {
1788 CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1789 __func__, name);
1790 rte_mempool_free(mp);
1791 return NULL;
1792 }
1793
1794 pool_priv->nb_drivers = nb_drivers;
1795 pool_priv->user_data_sz = user_data_size;
1796
1797 rte_cryptodev_trace_sym_session_pool_create(name, nb_elts,
1798 elt_size, cache_size, user_data_size, mp);
1799 return mp;
1800}
1801
1802struct rte_mempool *
1803rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts,
1804 uint32_t cache_size, uint16_t user_data_size, int socket_id)
1805{
1806 struct rte_mempool *mp;
1807 struct rte_cryptodev_asym_session_pool_private_data *pool_priv;
1808 uint32_t obj_sz, obj_sz_aligned;
1809 uint8_t dev_id;
1810 unsigned int priv_sz, max_priv_sz = 0;
1811
1812 for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++)
1813 if (rte_cryptodev_is_valid_dev(dev_id)) {
1814 priv_sz = rte_cryptodev_asym_get_private_session_size(dev_id);
1815 if (priv_sz > max_priv_sz)
1816 max_priv_sz = priv_sz;
1817 }
1818 if (max_priv_sz == 0) {
1819 CDEV_LOG_INFO("Could not set max private session size\n");
1820 return NULL;
1821 }
1822
1823 obj_sz = rte_cryptodev_asym_get_header_session_size() + max_priv_sz +
1824 user_data_size;
1825 obj_sz_aligned = RTE_ALIGN_CEIL(obj_sz, RTE_CACHE_LINE_SIZE);
1826
1827 mp = rte_mempool_create(name, nb_elts, obj_sz_aligned, cache_size,
1828 (uint32_t)(sizeof(*pool_priv)),
1829 NULL, NULL, NULL, NULL,
1830 socket_id, 0);
1831 if (mp == NULL) {
1832 CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n",
1833 __func__, name, rte_errno);
1834 return NULL;
1835 }
1836
1837 pool_priv = rte_mempool_get_priv(mp);
1838 if (!pool_priv) {
1839 CDEV_LOG_ERR("%s(name=%s) failed to get private data\n",
1840 __func__, name);
1841 rte_mempool_free(mp);
1842 return NULL;
1843 }
1844 pool_priv->max_priv_session_sz = max_priv_sz;
1845 pool_priv->user_data_sz = user_data_size;
1846
1847 rte_cryptodev_trace_asym_session_pool_create(name, nb_elts,
1848 user_data_size, cache_size, mp);
1849 return mp;
1850}
1851
1852static unsigned int
1853rte_cryptodev_sym_session_data_size(struct rte_cryptodev_sym_session *sess)
1854{
1855 return (sizeof(sess->sess_data[0]) * sess->nb_drivers) +
1856 sess->user_data_sz;
1857}
1858
1859static uint8_t
1860rte_cryptodev_sym_is_valid_session_pool(struct rte_mempool *mp)
1861{
1862 struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1863
1864 if (!mp)
1865 return 0;
1866
1867 pool_priv = rte_mempool_get_priv(mp);
1868
1869 if (!pool_priv || mp->private_data_size < sizeof(*pool_priv) ||
1870 pool_priv->nb_drivers != nb_drivers ||
1871 mp->elt_size <
1872 rte_cryptodev_sym_get_header_session_size()
1873 + pool_priv->user_data_sz)
1874 return 0;
1875
1876 return 1;
1877}
1878
1879struct rte_cryptodev_sym_session *
1880rte_cryptodev_sym_session_create(struct rte_mempool *mp)
1881{
1882 struct rte_cryptodev_sym_session *sess;
1883 struct rte_cryptodev_sym_session_pool_private_data *pool_priv;
1884
1885 if (!rte_cryptodev_sym_is_valid_session_pool(mp)) {
1886 CDEV_LOG_ERR("Invalid mempool\n");
1887 return NULL;
1888 }
1889
1890 pool_priv = rte_mempool_get_priv(mp);
1891
1892
1893 if (rte_mempool_get(mp, (void **)&sess)) {
1894 CDEV_LOG_ERR("couldn't get object from session mempool");
1895 return NULL;
1896 }
1897
1898 sess->nb_drivers = pool_priv->nb_drivers;
1899 sess->user_data_sz = pool_priv->user_data_sz;
1900 sess->opaque_data = 0;
1901
1902
1903
1904
1905 memset(sess->sess_data, 0,
1906 rte_cryptodev_sym_session_data_size(sess));
1907
1908 rte_cryptodev_trace_sym_session_create(mp, sess);
1909 return sess;
1910}
1911
1912int
1913rte_cryptodev_asym_session_create(uint8_t dev_id,
1914 struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp,
1915 void **session)
1916{
1917 struct rte_cryptodev_asym_session *sess;
1918 uint32_t session_priv_data_sz;
1919 struct rte_cryptodev_asym_session_pool_private_data *pool_priv;
1920 unsigned int session_header_size =
1921 rte_cryptodev_asym_get_header_session_size();
1922 struct rte_cryptodev *dev;
1923 int ret;
1924
1925 if (!rte_cryptodev_is_valid_dev(dev_id)) {
1926 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1927 return -EINVAL;
1928 }
1929
1930 dev = rte_cryptodev_pmd_get_dev(dev_id);
1931
1932 if (dev == NULL)
1933 return -EINVAL;
1934
1935 if (!mp) {
1936 CDEV_LOG_ERR("invalid mempool\n");
1937 return -EINVAL;
1938 }
1939
1940 session_priv_data_sz = rte_cryptodev_asym_get_private_session_size(
1941 dev_id);
1942 pool_priv = rte_mempool_get_priv(mp);
1943
1944 if (pool_priv->max_priv_session_sz < session_priv_data_sz) {
1945 CDEV_LOG_DEBUG(
1946 "The private session data size used when creating the mempool is smaller than this device's private session data.");
1947 return -EINVAL;
1948 }
1949
1950
1951 if (mp->elt_size < session_header_size + session_priv_data_sz) {
1952 CDEV_LOG_ERR(
1953 "mempool elements too small to hold session objects");
1954 return -EINVAL;
1955 }
1956
1957
1958 if (rte_mempool_get(mp, session)) {
1959 CDEV_LOG_ERR("couldn't get object from session mempool");
1960 return -ENOMEM;
1961 }
1962
1963 sess = *session;
1964 sess->driver_id = dev->driver_id;
1965 sess->user_data_sz = pool_priv->user_data_sz;
1966 sess->max_priv_data_sz = pool_priv->max_priv_session_sz;
1967
1968
1969 memset(sess->sess_private_data, 0, session_priv_data_sz + sess->user_data_sz);
1970
1971 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure, -ENOTSUP);
1972
1973 if (sess->sess_private_data[0] == 0) {
1974 ret = dev->dev_ops->asym_session_configure(dev, xforms, sess);
1975 if (ret < 0) {
1976 CDEV_LOG_ERR(
1977 "dev_id %d failed to configure session details",
1978 dev_id);
1979 return ret;
1980 }
1981 }
1982
1983 rte_cryptodev_trace_asym_session_create(dev_id, xforms, mp, sess);
1984 return 0;
1985}
1986
1987int
1988rte_cryptodev_sym_session_clear(uint8_t dev_id,
1989 struct rte_cryptodev_sym_session *sess)
1990{
1991 struct rte_cryptodev *dev;
1992 uint8_t driver_id;
1993
1994 if (!rte_cryptodev_is_valid_dev(dev_id)) {
1995 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1996 return -EINVAL;
1997 }
1998
1999 dev = rte_cryptodev_pmd_get_dev(dev_id);
2000
2001 if (dev == NULL || sess == NULL)
2002 return -EINVAL;
2003
2004 driver_id = dev->driver_id;
2005 if (sess->sess_data[driver_id].refcnt == 0)
2006 return 0;
2007 if (--sess->sess_data[driver_id].refcnt != 0)
2008 return -EBUSY;
2009
2010 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP);
2011
2012 dev->dev_ops->sym_session_clear(dev, sess);
2013
2014 rte_cryptodev_trace_sym_session_clear(dev_id, sess);
2015 return 0;
2016}
2017
2018int
2019rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
2020{
2021 uint8_t i;
2022 struct rte_mempool *sess_mp;
2023
2024 if (sess == NULL)
2025 return -EINVAL;
2026
2027
2028 for (i = 0; i < sess->nb_drivers; i++) {
2029 if (sess->sess_data[i].refcnt != 0)
2030 return -EBUSY;
2031 }
2032
2033
2034 sess_mp = rte_mempool_from_obj(sess);
2035 rte_mempool_put(sess_mp, sess);
2036
2037 rte_cryptodev_trace_sym_session_free(sess);
2038 return 0;
2039}
2040
2041int
2042rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess)
2043{
2044 struct rte_mempool *sess_mp;
2045 struct rte_cryptodev *dev;
2046
2047 if (!rte_cryptodev_is_valid_dev(dev_id)) {
2048 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2049 return -EINVAL;
2050 }
2051
2052 dev = rte_cryptodev_pmd_get_dev(dev_id);
2053
2054 if (dev == NULL || sess == NULL)
2055 return -EINVAL;
2056
2057 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP);
2058
2059 dev->dev_ops->asym_session_clear(dev, sess);
2060
2061 rte_free(((struct rte_cryptodev_asym_session *)sess)->event_mdata);
2062
2063
2064 sess_mp = rte_mempool_from_obj(sess);
2065 rte_mempool_put(sess_mp, sess);
2066
2067 rte_cryptodev_trace_asym_session_free(dev_id, sess);
2068 return 0;
2069}
2070
2071unsigned int
2072rte_cryptodev_sym_get_header_session_size(void)
2073{
2074
2075
2076
2077
2078
2079 struct rte_cryptodev_sym_session s = {0};
2080
2081 s.nb_drivers = nb_drivers;
2082
2083 return (unsigned int)(sizeof(s) +
2084 rte_cryptodev_sym_session_data_size(&s));
2085}
2086
2087unsigned int
2088rte_cryptodev_sym_get_existing_header_session_size(
2089 struct rte_cryptodev_sym_session *sess)
2090{
2091 if (!sess)
2092 return 0;
2093 else
2094 return (unsigned int)(sizeof(*sess) +
2095 rte_cryptodev_sym_session_data_size(sess));
2096}
2097
2098unsigned int
2099rte_cryptodev_asym_get_header_session_size(void)
2100{
2101 return sizeof(struct rte_cryptodev_asym_session);
2102}
2103
2104unsigned int
2105rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
2106{
2107 struct rte_cryptodev *dev;
2108 unsigned int priv_sess_size;
2109
2110 if (!rte_cryptodev_is_valid_dev(dev_id))
2111 return 0;
2112
2113 dev = rte_cryptodev_pmd_get_dev(dev_id);
2114
2115 if (*dev->dev_ops->sym_session_get_size == NULL)
2116 return 0;
2117
2118 priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
2119
2120 return priv_sess_size;
2121}
2122
2123unsigned int
2124rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
2125{
2126 struct rte_cryptodev *dev;
2127 unsigned int priv_sess_size;
2128
2129 if (!rte_cryptodev_is_valid_dev(dev_id))
2130 return 0;
2131
2132 dev = rte_cryptodev_pmd_get_dev(dev_id);
2133
2134 if (*dev->dev_ops->asym_session_get_size == NULL)
2135 return 0;
2136
2137 priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
2138
2139 return priv_sess_size;
2140}
2141
2142int
2143rte_cryptodev_sym_session_set_user_data(
2144 struct rte_cryptodev_sym_session *sess,
2145 void *data,
2146 uint16_t size)
2147{
2148 if (sess == NULL)
2149 return -EINVAL;
2150
2151 if (sess->user_data_sz < size)
2152 return -ENOMEM;
2153
2154 rte_memcpy(sess->sess_data + sess->nb_drivers, data, size);
2155 return 0;
2156}
2157
2158void *
2159rte_cryptodev_sym_session_get_user_data(
2160 struct rte_cryptodev_sym_session *sess)
2161{
2162 if (sess == NULL || sess->user_data_sz == 0)
2163 return NULL;
2164
2165 return (void *)(sess->sess_data + sess->nb_drivers);
2166}
2167
2168int
2169rte_cryptodev_asym_session_set_user_data(void *session, void *data, uint16_t size)
2170{
2171 struct rte_cryptodev_asym_session *sess = session;
2172 if (sess == NULL)
2173 return -EINVAL;
2174
2175 if (sess->user_data_sz < size)
2176 return -ENOMEM;
2177
2178 rte_memcpy(sess->sess_private_data +
2179 sess->max_priv_data_sz,
2180 data, size);
2181 return 0;
2182}
2183
2184void *
2185rte_cryptodev_asym_session_get_user_data(void *session)
2186{
2187 struct rte_cryptodev_asym_session *sess = session;
2188 if (sess == NULL || sess->user_data_sz == 0)
2189 return NULL;
2190
2191 return (void *)(sess->sess_private_data +
2192 sess->max_priv_data_sz);
2193}
2194
2195static inline void
2196sym_crypto_fill_status(struct rte_crypto_sym_vec *vec, int32_t errnum)
2197{
2198 uint32_t i;
2199 for (i = 0; i < vec->num; i++)
2200 vec->status[i] = errnum;
2201}
2202
2203uint32_t
2204rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
2205 struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
2206 struct rte_crypto_sym_vec *vec)
2207{
2208 struct rte_cryptodev *dev;
2209
2210 if (!rte_cryptodev_is_valid_dev(dev_id)) {
2211 sym_crypto_fill_status(vec, EINVAL);
2212 return 0;
2213 }
2214
2215 dev = rte_cryptodev_pmd_get_dev(dev_id);
2216
2217 if (*dev->dev_ops->sym_cpu_process == NULL ||
2218 !(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) {
2219 sym_crypto_fill_status(vec, ENOTSUP);
2220 return 0;
2221 }
2222
2223 return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
2224}
2225
2226int
2227rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id)
2228{
2229 struct rte_cryptodev *dev;
2230 int32_t size = sizeof(struct rte_crypto_raw_dp_ctx);
2231 int32_t priv_size;
2232
2233 if (!rte_cryptodev_is_valid_dev(dev_id))
2234 return -EINVAL;
2235
2236 dev = rte_cryptodev_pmd_get_dev(dev_id);
2237
2238 if (*dev->dev_ops->sym_get_raw_dp_ctx_size == NULL ||
2239 !(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)) {
2240 return -ENOTSUP;
2241 }
2242
2243 priv_size = (*dev->dev_ops->sym_get_raw_dp_ctx_size)(dev);
2244 if (priv_size < 0)
2245 return -ENOTSUP;
2246
2247 return RTE_ALIGN_CEIL((size + priv_size), 8);
2248}
2249
2250int
2251rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
2252 struct rte_crypto_raw_dp_ctx *ctx,
2253 enum rte_crypto_op_sess_type sess_type,
2254 union rte_cryptodev_session_ctx session_ctx,
2255 uint8_t is_update)
2256{
2257 struct rte_cryptodev *dev;
2258
2259 if (!rte_cryptodev_get_qp_status(dev_id, qp_id))
2260 return -EINVAL;
2261
2262 dev = rte_cryptodev_pmd_get_dev(dev_id);
2263 if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP)
2264 || dev->dev_ops->sym_configure_raw_dp_ctx == NULL)
2265 return -ENOTSUP;
2266
2267 return (*dev->dev_ops->sym_configure_raw_dp_ctx)(dev, qp_id, ctx,
2268 sess_type, session_ctx, is_update);
2269}
2270
2271int
2272rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess,
2273 enum rte_crypto_op_type op_type,
2274 enum rte_crypto_op_sess_type sess_type,
2275 void *ev_mdata,
2276 uint16_t size)
2277{
2278 struct rte_cryptodev *dev;
2279
2280 if (sess == NULL || ev_mdata == NULL)
2281 return -EINVAL;
2282
2283 if (!rte_cryptodev_is_valid_dev(dev_id))
2284 goto skip_pmd_op;
2285
2286 dev = rte_cryptodev_pmd_get_dev(dev_id);
2287 if (dev->dev_ops->session_ev_mdata_set == NULL)
2288 goto skip_pmd_op;
2289
2290 return (*dev->dev_ops->session_ev_mdata_set)(dev, sess, op_type,
2291 sess_type, ev_mdata);
2292
2293skip_pmd_op:
2294 if (op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC)
2295 return rte_cryptodev_sym_session_set_user_data(sess, ev_mdata,
2296 size);
2297 else if (op_type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
2298 struct rte_cryptodev_asym_session *s = sess;
2299
2300 if (s->event_mdata == NULL) {
2301 s->event_mdata = rte_malloc(NULL, size, 0);
2302 if (s->event_mdata == NULL)
2303 return -ENOMEM;
2304 }
2305 rte_memcpy(s->event_mdata, ev_mdata, size);
2306
2307 return 0;
2308 } else
2309 return -ENOTSUP;
2310}
2311
2312uint32_t
2313rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2314 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
2315 void **user_data, int *enqueue_status)
2316{
2317 return (*ctx->enqueue_burst)(ctx->qp_data, ctx->drv_ctx_data, vec,
2318 ofs, user_data, enqueue_status);
2319}
2320
2321int
2322rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx,
2323 uint32_t n)
2324{
2325 return (*ctx->enqueue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2326}
2327
2328uint32_t
2329rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx,
2330 rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
2331 uint32_t max_nb_to_dequeue,
2332 rte_cryptodev_raw_post_dequeue_t post_dequeue,
2333 void **out_user_data, uint8_t is_user_data_array,
2334 uint32_t *n_success_jobs, int *status)
2335{
2336 return (*ctx->dequeue_burst)(ctx->qp_data, ctx->drv_ctx_data,
2337 get_dequeue_count, max_nb_to_dequeue, post_dequeue,
2338 out_user_data, is_user_data_array, n_success_jobs, status);
2339}
2340
2341int
2342rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx,
2343 uint32_t n)
2344{
2345 return (*ctx->dequeue_done)(ctx->qp_data, ctx->drv_ctx_data, n);
2346}
2347
2348
2349static void
2350rte_crypto_op_init(struct rte_mempool *mempool,
2351 void *opaque_arg,
2352 void *_op_data,
2353 __rte_unused unsigned i)
2354{
2355 struct rte_crypto_op *op = _op_data;
2356 enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
2357
2358 memset(_op_data, 0, mempool->elt_size);
2359
2360 __rte_crypto_op_reset(op, type);
2361
2362 op->phys_addr = rte_mem_virt2iova(_op_data);
2363 op->mempool = mempool;
2364}
2365
2366
2367struct rte_mempool *
2368rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
2369 unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
2370 int socket_id)
2371{
2372 struct rte_crypto_op_pool_private *priv;
2373
2374 unsigned elt_size = sizeof(struct rte_crypto_op) +
2375 priv_size;
2376
2377 if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
2378 elt_size += sizeof(struct rte_crypto_sym_op);
2379 } else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
2380 elt_size += sizeof(struct rte_crypto_asym_op);
2381 } else if (type == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2382 elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op),
2383 sizeof(struct rte_crypto_asym_op));
2384 } else {
2385 CDEV_LOG_ERR("Invalid op_type\n");
2386 return NULL;
2387 }
2388
2389
2390 struct rte_mempool *mp = rte_mempool_lookup(name);
2391
2392 if (mp != NULL) {
2393 priv = (struct rte_crypto_op_pool_private *)
2394 rte_mempool_get_priv(mp);
2395
2396 if (mp->elt_size != elt_size ||
2397 mp->cache_size < cache_size ||
2398 mp->size < nb_elts ||
2399 priv->priv_size < priv_size) {
2400 mp = NULL;
2401 CDEV_LOG_ERR("Mempool %s already exists but with "
2402 "incompatible parameters", name);
2403 return NULL;
2404 }
2405 return mp;
2406 }
2407
2408 mp = rte_mempool_create(
2409 name,
2410 nb_elts,
2411 elt_size,
2412 cache_size,
2413 sizeof(struct rte_crypto_op_pool_private),
2414 NULL,
2415 NULL,
2416 rte_crypto_op_init,
2417 &type,
2418 socket_id,
2419 0);
2420
2421 if (mp == NULL) {
2422 CDEV_LOG_ERR("Failed to create mempool %s", name);
2423 return NULL;
2424 }
2425
2426 priv = (struct rte_crypto_op_pool_private *)
2427 rte_mempool_get_priv(mp);
2428
2429 priv->priv_size = priv_size;
2430 priv->type = type;
2431
2432 return mp;
2433}
2434
2435int
2436rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
2437{
2438 struct rte_cryptodev *dev = NULL;
2439 uint32_t i = 0;
2440
2441 if (name == NULL)
2442 return -EINVAL;
2443
2444 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
2445 int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
2446 "%s_%u", dev_name_prefix, i);
2447
2448 if (ret < 0)
2449 return ret;
2450
2451 dev = rte_cryptodev_pmd_get_named_dev(name);
2452 if (!dev)
2453 return 0;
2454 }
2455
2456 return -1;
2457}
2458
2459TAILQ_HEAD(cryptodev_driver_list, cryptodev_driver);
2460
2461static struct cryptodev_driver_list cryptodev_driver_list =
2462 TAILQ_HEAD_INITIALIZER(cryptodev_driver_list);
2463
2464int
2465rte_cryptodev_driver_id_get(const char *name)
2466{
2467 struct cryptodev_driver *driver;
2468 const char *driver_name;
2469
2470 if (name == NULL) {
2471 RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL");
2472 return -1;
2473 }
2474
2475 TAILQ_FOREACH(driver, &cryptodev_driver_list, next) {
2476 driver_name = driver->driver->name;
2477 if (strncmp(driver_name, name, strlen(driver_name) + 1) == 0)
2478 return driver->id;
2479 }
2480 return -1;
2481}
2482
2483const char *
2484rte_cryptodev_name_get(uint8_t dev_id)
2485{
2486 struct rte_cryptodev *dev;
2487
2488 if (!rte_cryptodev_is_valid_device_data(dev_id)) {
2489 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
2490 return NULL;
2491 }
2492
2493 dev = rte_cryptodev_pmd_get_dev(dev_id);
2494 if (dev == NULL)
2495 return NULL;
2496
2497 return dev->data->name;
2498}
2499
2500const char *
2501rte_cryptodev_driver_name_get(uint8_t driver_id)
2502{
2503 struct cryptodev_driver *driver;
2504
2505 TAILQ_FOREACH(driver, &cryptodev_driver_list, next)
2506 if (driver->id == driver_id)
2507 return driver->driver->name;
2508 return NULL;
2509}
2510
2511uint8_t
2512rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
2513 const struct rte_driver *drv)
2514{
2515 crypto_drv->driver = drv;
2516 crypto_drv->id = nb_drivers;
2517
2518 TAILQ_INSERT_TAIL(&cryptodev_driver_list, crypto_drv, next);
2519
2520 return nb_drivers++;
2521}
2522
2523RTE_INIT(cryptodev_init_fp_ops)
2524{
2525 uint32_t i;
2526
2527 for (i = 0; i != RTE_DIM(rte_crypto_fp_ops); i++)
2528 cryptodev_fp_ops_reset(rte_crypto_fp_ops + i);
2529}
2530
2531static int
2532cryptodev_handle_dev_list(const char *cmd __rte_unused,
2533 const char *params __rte_unused,
2534 struct rte_tel_data *d)
2535{
2536 int dev_id;
2537
2538 if (rte_cryptodev_count() < 1)
2539 return -EINVAL;
2540
2541 rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
2542 for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++)
2543 if (rte_cryptodev_is_valid_dev(dev_id))
2544 rte_tel_data_add_array_int(d, dev_id);
2545
2546 return 0;
2547}
2548
2549static int
2550cryptodev_handle_dev_info(const char *cmd __rte_unused,
2551 const char *params, struct rte_tel_data *d)
2552{
2553 struct rte_cryptodev_info cryptodev_info;
2554 int dev_id;
2555 char *end_param;
2556
2557 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
2558 return -EINVAL;
2559
2560 dev_id = strtoul(params, &end_param, 0);
2561 if (*end_param != '\0')
2562 CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2563 if (!rte_cryptodev_is_valid_dev(dev_id))
2564 return -EINVAL;
2565
2566 rte_cryptodev_info_get(dev_id, &cryptodev_info);
2567
2568 rte_tel_data_start_dict(d);
2569 rte_tel_data_add_dict_string(d, "device_name",
2570 cryptodev_info.device->name);
2571 rte_tel_data_add_dict_int(d, "max_nb_queue_pairs",
2572 cryptodev_info.max_nb_queue_pairs);
2573
2574 return 0;
2575}
2576
2577#define ADD_DICT_STAT(s) rte_tel_data_add_dict_u64(d, #s, cryptodev_stats.s)
2578
2579static int
2580cryptodev_handle_dev_stats(const char *cmd __rte_unused,
2581 const char *params,
2582 struct rte_tel_data *d)
2583{
2584 struct rte_cryptodev_stats cryptodev_stats;
2585 int dev_id, ret;
2586 char *end_param;
2587
2588 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
2589 return -EINVAL;
2590
2591 dev_id = strtoul(params, &end_param, 0);
2592 if (*end_param != '\0')
2593 CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2594 if (!rte_cryptodev_is_valid_dev(dev_id))
2595 return -EINVAL;
2596
2597 ret = rte_cryptodev_stats_get(dev_id, &cryptodev_stats);
2598 if (ret < 0)
2599 return ret;
2600
2601 rte_tel_data_start_dict(d);
2602 ADD_DICT_STAT(enqueued_count);
2603 ADD_DICT_STAT(dequeued_count);
2604 ADD_DICT_STAT(enqueue_err_count);
2605 ADD_DICT_STAT(dequeue_err_count);
2606
2607 return 0;
2608}
2609
2610#define CRYPTO_CAPS_SZ \
2611 (RTE_ALIGN_CEIL(sizeof(struct rte_cryptodev_capabilities), \
2612 sizeof(uint64_t)) / \
2613 sizeof(uint64_t))
2614
2615static int
2616crypto_caps_array(struct rte_tel_data *d,
2617 const struct rte_cryptodev_capabilities *capabilities)
2618{
2619 const struct rte_cryptodev_capabilities *dev_caps;
2620 uint64_t caps_val[CRYPTO_CAPS_SZ];
2621 unsigned int i = 0, j;
2622
2623 rte_tel_data_start_array(d, RTE_TEL_U64_VAL);
2624
2625 while ((dev_caps = &capabilities[i++])->op !=
2626 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
2627 memset(&caps_val, 0, CRYPTO_CAPS_SZ * sizeof(caps_val[0]));
2628 rte_memcpy(caps_val, dev_caps, sizeof(capabilities[0]));
2629 for (j = 0; j < CRYPTO_CAPS_SZ; j++)
2630 rte_tel_data_add_array_u64(d, caps_val[j]);
2631 }
2632
2633 return i;
2634}
2635
2636static int
2637cryptodev_handle_dev_caps(const char *cmd __rte_unused, const char *params,
2638 struct rte_tel_data *d)
2639{
2640 struct rte_cryptodev_info dev_info;
2641 struct rte_tel_data *crypto_caps;
2642 int crypto_caps_n;
2643 char *end_param;
2644 int dev_id;
2645
2646 if (!params || strlen(params) == 0 || !isdigit(*params))
2647 return -EINVAL;
2648
2649 dev_id = strtoul(params, &end_param, 0);
2650 if (*end_param != '\0')
2651 CDEV_LOG_ERR("Extra parameters passed to command, ignoring");
2652 if (!rte_cryptodev_is_valid_dev(dev_id))
2653 return -EINVAL;
2654
2655 rte_tel_data_start_dict(d);
2656 crypto_caps = rte_tel_data_alloc();
2657 if (!crypto_caps)
2658 return -ENOMEM;
2659
2660 rte_cryptodev_info_get(dev_id, &dev_info);
2661 crypto_caps_n = crypto_caps_array(crypto_caps, dev_info.capabilities);
2662 rte_tel_data_add_dict_container(d, "crypto_caps", crypto_caps, 0);
2663 rte_tel_data_add_dict_int(d, "crypto_caps_n", crypto_caps_n);
2664
2665 return 0;
2666}
2667
2668RTE_INIT(cryptodev_init_telemetry)
2669{
2670 rte_telemetry_register_cmd("/cryptodev/info", cryptodev_handle_dev_info,
2671 "Returns information for a cryptodev. Parameters: int dev_id");
2672 rte_telemetry_register_cmd("/cryptodev/list",
2673 cryptodev_handle_dev_list,
2674 "Returns list of available crypto devices by IDs. No parameters.");
2675 rte_telemetry_register_cmd("/cryptodev/stats",
2676 cryptodev_handle_dev_stats,
2677 "Returns the stats for a cryptodev. Parameters: int dev_id");
2678 rte_telemetry_register_cmd("/cryptodev/caps",
2679 cryptodev_handle_dev_caps,
2680 "Returns the capabilities for a cryptodev. Parameters: int dev_id");
2681}
2682