1
2
3
4
5#include <fcntl.h>
6#include <unistd.h>
7#include <sched.h>
8#include <net/if.h>
9
10#include <rte_byteorder.h>
11#include <rte_common.h>
12#include <cryptodev_pmd.h>
13#include <rte_crypto.h>
14#include <rte_cryptodev.h>
15#include <bus_vdev_driver.h>
16#include <rte_malloc.h>
17#include <rte_security_driver.h>
18#include <rte_hexdump.h>
19
20#include <caam_jr_capabilities.h>
21#include <caam_jr_config.h>
22#include <caam_jr_hw_specific.h>
23#include <caam_jr_pvt.h>
24#include <caam_jr_desc.h>
25#include <caam_jr_log.h>
26
27
28#include <desc/common.h>
29#include <desc/algo.h>
30#include <dpaa_of.h>
31#ifdef RTE_LIBRTE_PMD_CAAM_JR_DEBUG
32#define CAAM_JR_DBG 1
33#else
34#define CAAM_JR_DBG 0
35#endif
36#define CRYPTODEV_NAME_CAAM_JR_PMD crypto_caam_jr
37static uint8_t cryptodev_driver_id;
38
39
40enum sec_driver_state_e {
41 SEC_DRIVER_STATE_IDLE,
42 SEC_DRIVER_STATE_STARTED,
43 SEC_DRIVER_STATE_RELEASE,
44};
45
46
47static struct sec_job_ring_t g_job_rings[MAX_SEC_JOB_RINGS];
48
49
50static enum sec_driver_state_e g_driver_state = SEC_DRIVER_STATE_IDLE;
51
52
53static int g_job_rings_no;
54static int g_job_rings_max;
55
56struct sec_outring_entry {
57 phys_addr_t desc;
58 uint32_t status;
59} __rte_packed;
60
61
62static inline phys_addr_t
63caam_jr_vtop_ctx(struct caam_jr_op_ctx *ctx, void *vaddr)
64{
65 return (size_t)vaddr - ctx->vtop_offset;
66}
67
68static inline void
69caam_jr_op_ending(struct caam_jr_op_ctx *ctx)
70{
71
72 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
73}
74
75static inline struct caam_jr_op_ctx *
76caam_jr_alloc_ctx(struct caam_jr_session *ses)
77{
78 struct caam_jr_op_ctx *ctx;
79 int ret;
80
81 ret = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
82 if (!ctx || ret) {
83 CAAM_JR_DP_WARN("Alloc sec descriptor failed!");
84 return NULL;
85 }
86
87
88
89
90
91
92 dcbz_64(&ctx->sg[SG_CACHELINE_0]);
93 dcbz_64(&ctx->sg[SG_CACHELINE_1]);
94 dcbz_64(&ctx->sg[SG_CACHELINE_2]);
95 dcbz_64(&ctx->sg[SG_CACHELINE_3]);
96
97 ctx->ctx_pool = ses->ctx_pool;
98 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
99
100 return ctx;
101}
102
103static
104void caam_jr_stats_get(struct rte_cryptodev *dev,
105 struct rte_cryptodev_stats *stats)
106{
107 struct caam_jr_qp **qp = (struct caam_jr_qp **)
108 dev->data->queue_pairs;
109 int i;
110
111 PMD_INIT_FUNC_TRACE();
112 if (stats == NULL) {
113 CAAM_JR_ERR("Invalid stats ptr NULL");
114 return;
115 }
116 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
117 if (qp[i] == NULL) {
118 CAAM_JR_WARN("Uninitialised queue pair");
119 continue;
120 }
121
122 stats->enqueued_count += qp[i]->tx_pkts;
123 stats->dequeued_count += qp[i]->rx_pkts;
124 stats->enqueue_err_count += qp[i]->tx_errs;
125 stats->dequeue_err_count += qp[i]->rx_errs;
126 CAAM_JR_INFO("extra stats:\n\tRX Poll ERR = %" PRIu64
127 "\n\tTX Ring Full = %" PRIu64,
128 qp[i]->rx_poll_err,
129 qp[i]->tx_ring_full);
130 }
131}
132
133static
134void caam_jr_stats_reset(struct rte_cryptodev *dev)
135{
136 int i;
137 struct caam_jr_qp **qp = (struct caam_jr_qp **)
138 (dev->data->queue_pairs);
139
140 PMD_INIT_FUNC_TRACE();
141 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
142 if (qp[i] == NULL) {
143 CAAM_JR_WARN("Uninitialised queue pair");
144 continue;
145 }
146 qp[i]->rx_pkts = 0;
147 qp[i]->rx_errs = 0;
148 qp[i]->rx_poll_err = 0;
149 qp[i]->tx_pkts = 0;
150 qp[i]->tx_errs = 0;
151 qp[i]->tx_ring_full = 0;
152 }
153}
154
155static inline int
156is_cipher_only(struct caam_jr_session *ses)
157{
158 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
159 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
160}
161
162static inline int
163is_auth_only(struct caam_jr_session *ses)
164{
165 return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
166 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
167}
168
169static inline int
170is_aead(struct caam_jr_session *ses)
171{
172 return ((ses->cipher_alg == 0) &&
173 (ses->auth_alg == 0) &&
174 (ses->aead_alg != 0));
175}
176
177static inline int
178is_auth_cipher(struct caam_jr_session *ses)
179{
180 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
181 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
182 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
183}
184
185static inline int
186is_proto_ipsec(struct caam_jr_session *ses)
187{
188 return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
189}
190
191static inline int
192is_encode(struct caam_jr_session *ses)
193{
194 return ses->dir == DIR_ENC;
195}
196
197static inline int
198is_decode(struct caam_jr_session *ses)
199{
200 return ses->dir == DIR_DEC;
201}
202
203static inline void
204caam_auth_alg(struct caam_jr_session *ses, struct alginfo *alginfo_a)
205{
206 switch (ses->auth_alg) {
207 case RTE_CRYPTO_AUTH_NULL:
208 ses->digest_length = 0;
209 break;
210 case RTE_CRYPTO_AUTH_MD5_HMAC:
211 alginfo_a->algtype =
212 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
213 OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
214 alginfo_a->algmode = OP_ALG_AAI_HMAC;
215 break;
216 case RTE_CRYPTO_AUTH_SHA1_HMAC:
217 alginfo_a->algtype =
218 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
219 OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
220 alginfo_a->algmode = OP_ALG_AAI_HMAC;
221 break;
222 case RTE_CRYPTO_AUTH_SHA224_HMAC:
223 alginfo_a->algtype =
224 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
225 OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
226 alginfo_a->algmode = OP_ALG_AAI_HMAC;
227 break;
228 case RTE_CRYPTO_AUTH_SHA256_HMAC:
229 alginfo_a->algtype =
230 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
231 OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
232 alginfo_a->algmode = OP_ALG_AAI_HMAC;
233 break;
234 case RTE_CRYPTO_AUTH_SHA384_HMAC:
235 alginfo_a->algtype =
236 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
237 OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
238 alginfo_a->algmode = OP_ALG_AAI_HMAC;
239 break;
240 case RTE_CRYPTO_AUTH_SHA512_HMAC:
241 alginfo_a->algtype =
242 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
243 OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
244 alginfo_a->algmode = OP_ALG_AAI_HMAC;
245 break;
246 default:
247 CAAM_JR_DEBUG("unsupported auth alg %u", ses->auth_alg);
248 }
249}
250
251static inline void
252caam_cipher_alg(struct caam_jr_session *ses, struct alginfo *alginfo_c)
253{
254 switch (ses->cipher_alg) {
255 case RTE_CRYPTO_CIPHER_NULL:
256 break;
257 case RTE_CRYPTO_CIPHER_AES_CBC:
258 alginfo_c->algtype =
259 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
260 OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
261 alginfo_c->algmode = OP_ALG_AAI_CBC;
262 break;
263 case RTE_CRYPTO_CIPHER_3DES_CBC:
264 alginfo_c->algtype =
265 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
266 OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
267 alginfo_c->algmode = OP_ALG_AAI_CBC;
268 break;
269 case RTE_CRYPTO_CIPHER_AES_CTR:
270 alginfo_c->algtype =
271 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
272 OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
273 alginfo_c->algmode = OP_ALG_AAI_CTR;
274 break;
275 default:
276 CAAM_JR_DEBUG("unsupported cipher alg %d", ses->cipher_alg);
277 }
278}
279
280static inline void
281caam_aead_alg(struct caam_jr_session *ses, struct alginfo *alginfo)
282{
283 switch (ses->aead_alg) {
284 case RTE_CRYPTO_AEAD_AES_GCM:
285 alginfo->algtype = OP_ALG_ALGSEL_AES;
286 alginfo->algmode = OP_ALG_AAI_GCM;
287 break;
288 default:
289 CAAM_JR_DEBUG("unsupported AEAD alg %d", ses->aead_alg);
290 }
291}
292
293
294static int
295caam_jr_prep_cdb(struct caam_jr_session *ses)
296{
297 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
298 int32_t shared_desc_len = 0;
299 struct sec_cdb *cdb;
300 int err;
301#if CAAM_BYTE_ORDER == CORE_BYTE_ORDER
302 int swap = false;
303#else
304 int swap = true;
305#endif
306
307 if (ses->cdb)
308 caam_jr_dma_free(ses->cdb);
309
310 cdb = caam_jr_dma_mem_alloc(L1_CACHE_BYTES, sizeof(struct sec_cdb));
311 if (!cdb) {
312 CAAM_JR_ERR("failed to allocate memory for cdb\n");
313 return -1;
314 }
315
316 ses->cdb = cdb;
317
318 memset(cdb, 0, sizeof(struct sec_cdb));
319
320 if (is_cipher_only(ses)) {
321 caam_cipher_alg(ses, &alginfo_c);
322 if (alginfo_c.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
323 CAAM_JR_ERR("not supported cipher alg");
324 rte_free(cdb);
325 return -ENOTSUP;
326 }
327
328 alginfo_c.key = (size_t)ses->cipher_key.data;
329 alginfo_c.keylen = ses->cipher_key.length;
330 alginfo_c.key_enc_flags = 0;
331 alginfo_c.key_type = RTA_DATA_IMM;
332
333 shared_desc_len = cnstr_shdsc_blkcipher(
334 cdb->sh_desc, true,
335 swap, SHR_NEVER, &alginfo_c,
336 ses->iv.length,
337 ses->dir);
338 } else if (is_auth_only(ses)) {
339 caam_auth_alg(ses, &alginfo_a);
340 if (alginfo_a.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
341 CAAM_JR_ERR("not supported auth alg");
342 rte_free(cdb);
343 return -ENOTSUP;
344 }
345
346 alginfo_a.key = (size_t)ses->auth_key.data;
347 alginfo_a.keylen = ses->auth_key.length;
348 alginfo_a.key_enc_flags = 0;
349 alginfo_a.key_type = RTA_DATA_IMM;
350
351 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
352 swap, SHR_NEVER, &alginfo_a,
353 !ses->dir,
354 ses->digest_length);
355 } else if (is_aead(ses)) {
356 caam_aead_alg(ses, &alginfo);
357 if (alginfo.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
358 CAAM_JR_ERR("not supported aead alg");
359 rte_free(cdb);
360 return -ENOTSUP;
361 }
362 alginfo.key = (size_t)ses->aead_key.data;
363 alginfo.keylen = ses->aead_key.length;
364 alginfo.key_enc_flags = 0;
365 alginfo.key_type = RTA_DATA_IMM;
366
367 if (ses->dir == DIR_ENC)
368 shared_desc_len = cnstr_shdsc_gcm_encap(
369 cdb->sh_desc, true, swap,
370 SHR_NEVER, &alginfo,
371 ses->iv.length,
372 ses->digest_length);
373 else
374 shared_desc_len = cnstr_shdsc_gcm_decap(
375 cdb->sh_desc, true, swap,
376 SHR_NEVER, &alginfo,
377 ses->iv.length,
378 ses->digest_length);
379 } else {
380 caam_cipher_alg(ses, &alginfo_c);
381 if (alginfo_c.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
382 CAAM_JR_ERR("not supported cipher alg");
383 rte_free(cdb);
384 return -ENOTSUP;
385 }
386
387 alginfo_c.key = (size_t)ses->cipher_key.data;
388 alginfo_c.keylen = ses->cipher_key.length;
389 alginfo_c.key_enc_flags = 0;
390 alginfo_c.key_type = RTA_DATA_IMM;
391
392 caam_auth_alg(ses, &alginfo_a);
393 if (alginfo_a.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
394 CAAM_JR_ERR("not supported auth alg");
395 rte_free(cdb);
396 return -ENOTSUP;
397 }
398
399 alginfo_a.key = (size_t)ses->auth_key.data;
400 alginfo_a.keylen = ses->auth_key.length;
401 alginfo_a.key_enc_flags = 0;
402 alginfo_a.key_type = RTA_DATA_IMM;
403
404 cdb->sh_desc[0] = alginfo_c.keylen;
405 cdb->sh_desc[1] = alginfo_a.keylen;
406 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
407 MIN_JOB_DESC_SIZE,
408 (unsigned int *)cdb->sh_desc,
409 &cdb->sh_desc[2], 2);
410
411 if (err < 0) {
412 CAAM_JR_ERR("Crypto: Incorrect key lengths");
413 rte_free(cdb);
414 return err;
415 }
416 if (cdb->sh_desc[2] & 1)
417 alginfo_c.key_type = RTA_DATA_IMM;
418 else {
419 alginfo_c.key = (size_t)caam_jr_mem_vtop(
420 (void *)(size_t)alginfo_c.key);
421 alginfo_c.key_type = RTA_DATA_PTR;
422 }
423 if (cdb->sh_desc[2] & (1<<1))
424 alginfo_a.key_type = RTA_DATA_IMM;
425 else {
426 alginfo_a.key = (size_t)caam_jr_mem_vtop(
427 (void *)(size_t)alginfo_a.key);
428 alginfo_a.key_type = RTA_DATA_PTR;
429 }
430 cdb->sh_desc[0] = 0;
431 cdb->sh_desc[1] = 0;
432 cdb->sh_desc[2] = 0;
433 if (is_proto_ipsec(ses)) {
434 if (ses->dir == DIR_ENC) {
435 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
436 cdb->sh_desc,
437 true, swap, SHR_SERIAL,
438 &ses->encap_pdb,
439 (uint8_t *)&ses->ip4_hdr,
440 &alginfo_c, &alginfo_a);
441 } else if (ses->dir == DIR_DEC) {
442 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
443 cdb->sh_desc,
444 true, swap, SHR_SERIAL,
445 &ses->decap_pdb,
446 &alginfo_c, &alginfo_a);
447 }
448 } else {
449
450 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
451 true, swap, SHR_SERIAL,
452 &alginfo_c, &alginfo_a,
453 ses->iv.length,
454 ses->digest_length, ses->dir);
455 }
456 }
457
458 if (shared_desc_len < 0) {
459 CAAM_JR_ERR("error in preparing command block");
460 return shared_desc_len;
461 }
462
463#if CAAM_JR_DBG
464 SEC_DUMP_DESC(cdb->sh_desc);
465#endif
466
467 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
468
469 return 0;
470}
471
472
473
474
475
476
477
478
479
480
481
482
483static void
484hw_flush_job_ring(struct sec_job_ring_t *job_ring,
485 uint32_t do_notify,
486 uint32_t *notified_descs)
487{
488 int32_t jobs_no_to_discard = 0;
489 int32_t discarded_descs_no = 0;
490
491 CAAM_JR_DEBUG("Jr[%p] pi[%d] ci[%d].Flushing jr notify desc=[%d]",
492 job_ring, job_ring->pidx, job_ring->cidx, do_notify);
493
494 jobs_no_to_discard = hw_get_no_finished_jobs(job_ring);
495
496
497 CAAM_JR_DEBUG("Jr[%p] pi[%d] ci[%d].Discarding %d descs",
498 job_ring, job_ring->pidx, job_ring->cidx,
499 jobs_no_to_discard);
500
501 while (jobs_no_to_discard > discarded_descs_no) {
502 discarded_descs_no++;
503
504
505
506
507 job_ring->cidx = SEC_CIRCULAR_COUNTER(job_ring->cidx,
508 SEC_JOB_RING_SIZE);
509
510 hw_remove_entries(job_ring, 1);
511 }
512
513 if (do_notify == true) {
514 ASSERT(notified_descs != NULL);
515 *notified_descs = discarded_descs_no;
516 }
517}
518
519
520
521
522
523
524
525
526
527
528
529
530static int
531hw_poll_job_ring(struct sec_job_ring_t *job_ring,
532 struct rte_crypto_op **ops, int32_t limit,
533 struct caam_jr_qp *jr_qp)
534{
535 int32_t jobs_no_to_notify = 0;
536 int32_t number_of_jobs_available = 0;
537 int32_t notified_descs_no = 0;
538 uint32_t sec_error_code = 0;
539 struct job_descriptor *current_desc;
540 phys_addr_t current_desc_addr;
541 phys_addr_t *temp_addr;
542 struct caam_jr_op_ctx *ctx;
543
544
545
546
547
548 if (JR_REG_JRINT_JRE_EXTRACT(GET_JR_REG(JRINT, job_ring))) {
549 CAAM_JR_INFO("err received");
550 sec_error_code = JR_REG_JRINT_ERR_TYPE_EXTRACT(
551 GET_JR_REG(JRINT, job_ring));
552 if (unlikely(sec_error_code)) {
553 hw_job_ring_error_print(job_ring, sec_error_code);
554 return -1;
555 }
556 }
557
558
559
560 number_of_jobs_available = hw_get_no_finished_jobs(job_ring);
561
562
563
564
565
566
567 jobs_no_to_notify = (limit > number_of_jobs_available) ?
568 number_of_jobs_available : limit;
569 CAAM_JR_DP_DEBUG(
570 "Jr[%p] pi[%d] ci[%d].limit =%d Available=%d.Jobs to notify=%d",
571 job_ring, job_ring->pidx, job_ring->cidx,
572 limit, number_of_jobs_available, jobs_no_to_notify);
573
574 rte_smp_rmb();
575
576 while (jobs_no_to_notify > notified_descs_no) {
577 static uint64_t false_alarm;
578 static uint64_t real_poll;
579
580
581 sec_error_code = job_ring->output_ring[job_ring->cidx].status;
582
583 temp_addr = &(job_ring->output_ring[job_ring->cidx].desc);
584 current_desc_addr = (phys_addr_t)sec_read_addr(temp_addr);
585
586 real_poll++;
587
588 if (!current_desc_addr) {
589 false_alarm++;
590 printf("false alarm %" PRIu64 "real %" PRIu64
591 " sec_err =0x%x cidx Index =0%d\n",
592 false_alarm, real_poll,
593 sec_error_code, job_ring->cidx);
594 rte_panic("CAAM JR descriptor NULL");
595 return notified_descs_no;
596 }
597 current_desc = (struct job_descriptor *)
598 caam_jr_dma_ptov(current_desc_addr);
599
600
601
602 job_ring->cidx = SEC_CIRCULAR_COUNTER(job_ring->cidx,
603 SEC_JOB_RING_SIZE);
604
605 hw_remove_entries(job_ring, 1);
606
607 ctx = container_of(current_desc, struct caam_jr_op_ctx, jobdes);
608 if (unlikely(sec_error_code)) {
609 CAAM_JR_ERR("desc at cidx %d generated error 0x%x\n",
610 job_ring->cidx, sec_error_code);
611 hw_handle_job_ring_error(job_ring, sec_error_code);
612
613 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
614 jr_qp->rx_errs++;
615 } else {
616 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
617#if CAAM_JR_DBG
618 if (ctx->op->sym->m_dst) {
619 rte_hexdump(stdout, "PROCESSED",
620 rte_pktmbuf_mtod(ctx->op->sym->m_dst, void *),
621 rte_pktmbuf_data_len(ctx->op->sym->m_dst));
622 } else {
623 rte_hexdump(stdout, "PROCESSED",
624 rte_pktmbuf_mtod(ctx->op->sym->m_src, void *),
625 rte_pktmbuf_data_len(ctx->op->sym->m_src));
626 }
627#endif
628 }
629 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
630 struct ip *ip4_hdr;
631
632 if (ctx->op->sym->m_dst) {
633
634 ip4_hdr = (struct ip *)
635 rte_pktmbuf_mtod(ctx->op->sym->m_dst, char*);
636 ctx->op->sym->m_dst->pkt_len =
637 rte_be_to_cpu_16(ip4_hdr->ip_len);
638 ctx->op->sym->m_dst->data_len =
639 rte_be_to_cpu_16(ip4_hdr->ip_len);
640 } else {
641 ip4_hdr = (struct ip *)
642 rte_pktmbuf_mtod(ctx->op->sym->m_src, char*);
643 ctx->op->sym->m_src->pkt_len =
644 rte_be_to_cpu_16(ip4_hdr->ip_len);
645 ctx->op->sym->m_src->data_len =
646 rte_be_to_cpu_16(ip4_hdr->ip_len);
647 }
648 }
649 *ops = ctx->op;
650 caam_jr_op_ending(ctx);
651 ops++;
652 notified_descs_no++;
653 }
654 return notified_descs_no;
655}
656
657static uint16_t
658caam_jr_dequeue_burst(void *qp, struct rte_crypto_op **ops,
659 uint16_t nb_ops)
660{
661 struct caam_jr_qp *jr_qp = (struct caam_jr_qp *)qp;
662 struct sec_job_ring_t *ring = jr_qp->ring;
663 int num_rx;
664 int ret;
665
666 CAAM_JR_DP_DEBUG("Jr[%p]Polling. limit[%d]", ring, nb_ops);
667
668
669
670
671
672
673
674 num_rx = hw_poll_job_ring(ring, ops, nb_ops, jr_qp);
675 if (num_rx < 0) {
676 CAAM_JR_ERR("Error polling SEC engine (%d)", num_rx);
677 return 0;
678 }
679
680 CAAM_JR_DP_DEBUG("Jr[%p].Jobs notified[%d]. ", ring, num_rx);
681
682 if (ring->jr_mode == SEC_NOTIFICATION_TYPE_NAPI) {
683 if (num_rx < nb_ops) {
684 ret = caam_jr_enable_irqs(ring->irq_fd);
685 SEC_ASSERT(ret == 0, ret,
686 "Failed to enable irqs for job ring %p", ring);
687 }
688 } else if (ring->jr_mode == SEC_NOTIFICATION_TYPE_IRQ) {
689
690
691 ret = caam_jr_enable_irqs(ring->irq_fd);
692 SEC_ASSERT(ret == 0, ret,
693 "Failed to enable irqs for job ring %p", ring);
694 }
695
696 jr_qp->rx_pkts += num_rx;
697
698 return num_rx;
699}
700
701
702
703
704
705
706
707
708
709static inline struct caam_jr_op_ctx *
710build_auth_only_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
711{
712 struct rte_crypto_sym_op *sym = op->sym;
713 struct rte_mbuf *mbuf = sym->m_src;
714 struct caam_jr_op_ctx *ctx;
715 struct sec4_sg_entry *sg;
716 int length;
717 struct sec_cdb *cdb;
718 uint64_t sdesc_offset;
719 struct sec_job_descriptor_t *jobdescr;
720 uint8_t extra_segs;
721
722 if (is_decode(ses))
723 extra_segs = 2;
724 else
725 extra_segs = 1;
726
727 if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
728 CAAM_JR_DP_ERR("Auth: Max sec segs supported is %d",
729 MAX_SG_ENTRIES);
730 return NULL;
731 }
732
733 ctx = caam_jr_alloc_ctx(ses);
734 if (!ctx)
735 return NULL;
736
737 ctx->op = op;
738
739 cdb = ses->cdb;
740 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
741
742 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
743
744 SEC_JD_INIT(jobdescr);
745 SEC_JD_SET_SD(jobdescr,
746 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
747 cdb->sh_hdr.hi.field.idlen);
748
749
750 SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)sym->auth.digest.phys_addr,
751 0, ses->digest_length);
752
753
754 sg = &ctx->sg[0];
755 length = sym->auth.data.length;
756 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf) + sym->auth.data.offset);
757 sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
758
759
760 mbuf = mbuf->next;
761 while (mbuf) {
762 sg++;
763 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
764 sg->len = cpu_to_caam32(mbuf->data_len);
765 mbuf = mbuf->next;
766 }
767
768 if (is_decode(ses)) {
769
770 sg++;
771
772 rte_memcpy(ctx->digest, sym->auth.digest.data,
773 ses->digest_length);
774#if CAAM_JR_DBG
775 rte_hexdump(stdout, "ICV", ctx->digest, ses->digest_length);
776#endif
777 sg->ptr = cpu_to_caam64(caam_jr_vtop_ctx(ctx, ctx->digest));
778 sg->len = cpu_to_caam32(ses->digest_length);
779 length += ses->digest_length;
780 } else {
781 sg->len -= ses->digest_length;
782 }
783
784
785 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
786
787 SEC_JD_SET_IN_PTR(jobdescr,
788 (uint64_t)caam_jr_vtop_ctx(ctx, &ctx->sg[0]), 0, length);
789
790 (jobdescr)->seq_in.command.word |= 0x01000000;
791
792 return ctx;
793}
794
795static inline struct caam_jr_op_ctx *
796build_auth_only(struct rte_crypto_op *op, struct caam_jr_session *ses)
797{
798 struct rte_crypto_sym_op *sym = op->sym;
799 struct caam_jr_op_ctx *ctx;
800 struct sec4_sg_entry *sg;
801 rte_iova_t start_addr;
802 struct sec_cdb *cdb;
803 uint64_t sdesc_offset;
804 struct sec_job_descriptor_t *jobdescr;
805
806 ctx = caam_jr_alloc_ctx(ses);
807 if (!ctx)
808 return NULL;
809
810 ctx->op = op;
811
812 cdb = ses->cdb;
813 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
814
815 start_addr = rte_pktmbuf_iova(sym->m_src);
816
817 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
818
819 SEC_JD_INIT(jobdescr);
820 SEC_JD_SET_SD(jobdescr,
821 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
822 cdb->sh_hdr.hi.field.idlen);
823
824
825 SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)sym->auth.digest.phys_addr,
826 0, ses->digest_length);
827
828
829 if (is_decode(ses)) {
830 sg = &ctx->sg[0];
831 SEC_JD_SET_IN_PTR(jobdescr,
832 (uint64_t)caam_jr_vtop_ctx(ctx, sg), 0,
833 (sym->auth.data.length + ses->digest_length));
834
835 (jobdescr)->seq_in.command.word |= 0x01000000;
836
837
838 rte_memcpy(ctx->digest, sym->auth.digest.data,
839 ses->digest_length);
840 sg->ptr = cpu_to_caam64(start_addr + sym->auth.data.offset);
841 sg->len = cpu_to_caam32(sym->auth.data.length);
842
843#if CAAM_JR_DBG
844 rte_hexdump(stdout, "ICV", ctx->digest, ses->digest_length);
845#endif
846
847 sg++;
848 sg->ptr = cpu_to_caam64(caam_jr_vtop_ctx(ctx, ctx->digest));
849 sg->len = cpu_to_caam32(ses->digest_length);
850
851 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
852 } else {
853 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)start_addr,
854 sym->auth.data.offset, sym->auth.data.length);
855 }
856 return ctx;
857}
858
859static inline struct caam_jr_op_ctx *
860build_cipher_only_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
861{
862 struct rte_crypto_sym_op *sym = op->sym;
863 struct rte_mbuf *mbuf = sym->m_src;
864 struct caam_jr_op_ctx *ctx;
865 struct sec4_sg_entry *sg, *in_sg;
866 int length;
867 struct sec_cdb *cdb;
868 uint64_t sdesc_offset;
869 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
870 ses->iv.offset);
871 struct sec_job_descriptor_t *jobdescr;
872 uint8_t reg_segs;
873
874 if (sym->m_dst) {
875 mbuf = sym->m_dst;
876 reg_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
877 } else {
878 mbuf = sym->m_src;
879 reg_segs = mbuf->nb_segs * 2 + 2;
880 }
881
882 if (reg_segs > MAX_SG_ENTRIES) {
883 CAAM_JR_DP_ERR("Cipher: Max sec segs supported is %d",
884 MAX_SG_ENTRIES);
885 return NULL;
886 }
887
888 ctx = caam_jr_alloc_ctx(ses);
889 if (!ctx)
890 return NULL;
891
892 ctx->op = op;
893 cdb = ses->cdb;
894 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
895
896 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
897
898 SEC_JD_INIT(jobdescr);
899 SEC_JD_SET_SD(jobdescr,
900 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
901 cdb->sh_hdr.hi.field.idlen);
902
903#if CAAM_JR_DBG
904 CAAM_JR_INFO("mbuf offset =%d, cipher offset = %d, length =%d+%d",
905 sym->m_src->data_off, sym->cipher.data.offset,
906 sym->cipher.data.length, ses->iv.length);
907#endif
908
909 if (sym->m_dst)
910 mbuf = sym->m_dst;
911 else
912 mbuf = sym->m_src;
913
914 sg = &ctx->sg[0];
915 length = sym->cipher.data.length;
916
917 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
918 + sym->cipher.data.offset);
919 sg->len = cpu_to_caam32(mbuf->data_len - sym->cipher.data.offset);
920
921
922 mbuf = mbuf->next;
923 while (mbuf) {
924 sg++;
925 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
926 sg->len = cpu_to_caam32(mbuf->data_len);
927 mbuf = mbuf->next;
928 }
929
930 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
931
932 SEC_JD_SET_OUT_PTR(jobdescr,
933 (uint64_t)caam_jr_vtop_ctx(ctx, &ctx->sg[0]), 0,
934 length);
935
936 (jobdescr)->seq_out.command.word |= 0x01000000;
937
938
939 sg++;
940 mbuf = sym->m_src;
941 in_sg = sg;
942
943 length = sym->cipher.data.length + ses->iv.length;
944
945
946 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
947 sg->len = cpu_to_caam32(ses->iv.length);
948
949
950 sg++;
951 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
952 + sym->cipher.data.offset);
953 sg->len = cpu_to_caam32(mbuf->data_len - sym->cipher.data.offset);
954
955
956 mbuf = mbuf->next;
957 while (mbuf) {
958 sg++;
959 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
960 sg->len = cpu_to_caam32(mbuf->data_len);
961 mbuf = mbuf->next;
962 }
963
964 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
965
966
967 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_vtop_ctx(ctx, in_sg), 0,
968 length);
969
970 (jobdescr)->seq_in.command.word |= 0x01000000;
971
972 return ctx;
973}
974
975static inline struct caam_jr_op_ctx *
976build_cipher_only(struct rte_crypto_op *op, struct caam_jr_session *ses)
977{
978 struct rte_crypto_sym_op *sym = op->sym;
979 struct caam_jr_op_ctx *ctx;
980 struct sec4_sg_entry *sg;
981 rte_iova_t src_start_addr, dst_start_addr;
982 struct sec_cdb *cdb;
983 uint64_t sdesc_offset;
984 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
985 ses->iv.offset);
986 struct sec_job_descriptor_t *jobdescr;
987
988 ctx = caam_jr_alloc_ctx(ses);
989 if (!ctx)
990 return NULL;
991
992 ctx->op = op;
993 cdb = ses->cdb;
994 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
995
996 src_start_addr = rte_pktmbuf_iova(sym->m_src);
997 if (sym->m_dst)
998 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
999 else
1000 dst_start_addr = src_start_addr;
1001
1002 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1003
1004 SEC_JD_INIT(jobdescr);
1005 SEC_JD_SET_SD(jobdescr,
1006 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1007 cdb->sh_hdr.hi.field.idlen);
1008
1009#if CAAM_JR_DBG
1010 CAAM_JR_INFO("mbuf offset =%d, cipher offset = %d, length =%d+%d",
1011 sym->m_src->data_off, sym->cipher.data.offset,
1012 sym->cipher.data.length, ses->iv.length);
1013#endif
1014
1015 SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)dst_start_addr,
1016 sym->cipher.data.offset,
1017 sym->cipher.data.length + ses->iv.length);
1018
1019
1020 sg = &ctx->sg[0];
1021 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_vtop_ctx(ctx, sg), 0,
1022 sym->cipher.data.length + ses->iv.length);
1023
1024 (jobdescr)->seq_in.command.word |= 0x01000000;
1025
1026 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1027 sg->len = cpu_to_caam32(ses->iv.length);
1028
1029 sg = &ctx->sg[1];
1030 sg->ptr = cpu_to_caam64(src_start_addr + sym->cipher.data.offset);
1031 sg->len = cpu_to_caam32(sym->cipher.data.length);
1032
1033 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1034
1035 return ctx;
1036}
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049static inline struct caam_jr_op_ctx *
1050build_cipher_auth_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
1051{
1052 struct rte_crypto_sym_op *sym = op->sym;
1053 struct caam_jr_op_ctx *ctx;
1054 struct sec4_sg_entry *sg, *out_sg, *in_sg;
1055 struct rte_mbuf *mbuf;
1056 uint32_t length = 0;
1057 struct sec_cdb *cdb;
1058 uint64_t sdesc_offset;
1059 uint8_t req_segs;
1060 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1061 ses->iv.offset);
1062 struct sec_job_descriptor_t *jobdescr;
1063 uint16_t auth_hdr_len = sym->cipher.data.offset -
1064 sym->auth.data.offset;
1065 uint16_t auth_tail_len = sym->auth.data.length -
1066 sym->cipher.data.length - auth_hdr_len;
1067 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
1068
1069 if (sym->m_dst) {
1070 mbuf = sym->m_dst;
1071 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
1072 } else {
1073 mbuf = sym->m_src;
1074 req_segs = mbuf->nb_segs * 2 + 3;
1075 }
1076
1077 if (req_segs > MAX_SG_ENTRIES) {
1078 CAAM_JR_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1079 MAX_SG_ENTRIES);
1080 return NULL;
1081 }
1082
1083 ctx = caam_jr_alloc_ctx(ses);
1084 if (!ctx)
1085 return NULL;
1086
1087 ctx->op = op;
1088 cdb = ses->cdb;
1089 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1090
1091 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1092
1093 SEC_JD_INIT(jobdescr);
1094 SEC_JD_SET_SD(jobdescr,
1095 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1096 cdb->sh_hdr.hi.field.idlen);
1097
1098
1099 if (sym->m_dst)
1100 mbuf = sym->m_dst;
1101 else
1102 mbuf = sym->m_src;
1103
1104 out_sg = &ctx->sg[0];
1105 if (is_encode(ses))
1106 length = sym->auth.data.length + ses->digest_length;
1107 else
1108 length = sym->auth.data.length;
1109
1110 sg = &ctx->sg[0];
1111
1112
1113 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
1114 + sym->auth.data.offset);
1115 sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
1116
1117
1118 mbuf = mbuf->next;
1119 while (mbuf) {
1120 sg++;
1121 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
1122 sg->len = cpu_to_caam32(mbuf->data_len);
1123 mbuf = mbuf->next;
1124 }
1125
1126 if (is_encode(ses)) {
1127
1128 sg++;
1129 sg->ptr = cpu_to_caam64(sym->auth.digest.phys_addr);
1130 sg->len = cpu_to_caam32(ses->digest_length);
1131 }
1132
1133 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1134
1135 SEC_JD_SET_OUT_PTR(jobdescr,
1136 (uint64_t)caam_jr_dma_vtop(out_sg), 0, length);
1137
1138 (jobdescr)->seq_out.command.word |= 0x01000000;
1139
1140
1141 sg++;
1142 mbuf = sym->m_src;
1143 in_sg = sg;
1144 if (is_encode(ses))
1145 length = ses->iv.length + sym->auth.data.length;
1146 else
1147 length = ses->iv.length + sym->auth.data.length
1148 + ses->digest_length;
1149
1150 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1151 sg->len = cpu_to_caam32(ses->iv.length);
1152
1153 sg++;
1154
1155 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
1156 + sym->auth.data.offset);
1157 sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
1158
1159
1160 mbuf = mbuf->next;
1161 while (mbuf) {
1162 sg++;
1163 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
1164 sg->len = cpu_to_caam32(mbuf->data_len);
1165 mbuf = mbuf->next;
1166 }
1167
1168 if (is_decode(ses)) {
1169 sg++;
1170 rte_memcpy(ctx->digest, sym->auth.digest.data,
1171 ses->digest_length);
1172 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(ctx->digest));
1173 sg->len = cpu_to_caam32(ses->digest_length);
1174 }
1175
1176 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1177
1178 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_dma_vtop(in_sg), 0,
1179 length);
1180
1181 (jobdescr)->seq_in.command.word |= 0x01000000;
1182
1183
1184
1185
1186 if (auth_only_len)
1187
1188 (jobdescr)->dpovrd = 0x80000000 | auth_only_len;
1189
1190 return ctx;
1191}
1192
1193static inline struct caam_jr_op_ctx *
1194build_cipher_auth(struct rte_crypto_op *op, struct caam_jr_session *ses)
1195{
1196 struct rte_crypto_sym_op *sym = op->sym;
1197 struct caam_jr_op_ctx *ctx;
1198 struct sec4_sg_entry *sg;
1199 rte_iova_t src_start_addr, dst_start_addr;
1200 uint32_t length = 0;
1201 struct sec_cdb *cdb;
1202 uint64_t sdesc_offset;
1203 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1204 ses->iv.offset);
1205 struct sec_job_descriptor_t *jobdescr;
1206 uint16_t auth_hdr_len = sym->cipher.data.offset -
1207 sym->auth.data.offset;
1208 uint16_t auth_tail_len = sym->auth.data.length -
1209 sym->cipher.data.length - auth_hdr_len;
1210 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
1211
1212 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1213 if (sym->m_dst)
1214 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1215 else
1216 dst_start_addr = src_start_addr;
1217
1218 ctx = caam_jr_alloc_ctx(ses);
1219 if (!ctx)
1220 return NULL;
1221
1222 ctx->op = op;
1223 cdb = ses->cdb;
1224 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1225
1226 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1227
1228 SEC_JD_INIT(jobdescr);
1229 SEC_JD_SET_SD(jobdescr,
1230 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1231 cdb->sh_hdr.hi.field.idlen);
1232
1233
1234 sg = &ctx->sg[0];
1235 if (is_encode(ses)) {
1236 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1237 sg->len = cpu_to_caam32(ses->iv.length);
1238 length += ses->iv.length;
1239
1240 sg++;
1241 sg->ptr = cpu_to_caam64(src_start_addr + sym->auth.data.offset);
1242 sg->len = cpu_to_caam32(sym->auth.data.length);
1243 length += sym->auth.data.length;
1244
1245 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1246 } else {
1247 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1248 sg->len = cpu_to_caam32(ses->iv.length);
1249 length += ses->iv.length;
1250
1251 sg++;
1252 sg->ptr = cpu_to_caam64(src_start_addr + sym->auth.data.offset);
1253 sg->len = cpu_to_caam32(sym->auth.data.length);
1254 length += sym->auth.data.length;
1255
1256 rte_memcpy(ctx->digest, sym->auth.digest.data,
1257 ses->digest_length);
1258 sg++;
1259 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(ctx->digest));
1260 sg->len = cpu_to_caam32(ses->digest_length);
1261 length += ses->digest_length;
1262
1263 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1264 }
1265
1266 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_dma_vtop(&ctx->sg[0]), 0,
1267 length);
1268
1269 (jobdescr)->seq_in.command.word |= 0x01000000;
1270
1271
1272 sg = &ctx->sg[6];
1273
1274 sg->ptr = cpu_to_caam64(dst_start_addr + sym->cipher.data.offset);
1275 sg->len = cpu_to_caam32(sym->cipher.data.length);
1276 length = sym->cipher.data.length;
1277
1278 if (is_encode(ses)) {
1279
1280 sg++;
1281 sg->ptr = cpu_to_caam64(sym->auth.digest.phys_addr);
1282 sg->len = cpu_to_caam32(ses->digest_length);
1283 length += ses->digest_length;
1284 }
1285
1286 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1287
1288 SEC_JD_SET_OUT_PTR(jobdescr,
1289 (uint64_t)caam_jr_dma_vtop(&ctx->sg[6]), 0, length);
1290
1291 (jobdescr)->seq_out.command.word |= 0x01000000;
1292
1293
1294
1295
1296
1297 if (auth_only_len)
1298
1299 (jobdescr)->dpovrd = 0x80000000 | auth_only_len;
1300
1301 return ctx;
1302}
1303
1304static inline struct caam_jr_op_ctx *
1305build_proto(struct rte_crypto_op *op, struct caam_jr_session *ses)
1306{
1307 struct rte_crypto_sym_op *sym = op->sym;
1308 struct caam_jr_op_ctx *ctx = NULL;
1309 phys_addr_t src_start_addr, dst_start_addr;
1310 struct sec_cdb *cdb;
1311 uint64_t sdesc_offset;
1312 struct sec_job_descriptor_t *jobdescr;
1313
1314 ctx = caam_jr_alloc_ctx(ses);
1315 if (!ctx)
1316 return NULL;
1317 ctx->op = op;
1318
1319 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1320 if (sym->m_dst)
1321 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1322 else
1323 dst_start_addr = src_start_addr;
1324
1325 cdb = ses->cdb;
1326 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1327
1328 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1329
1330 SEC_JD_INIT(jobdescr);
1331 SEC_JD_SET_SD(jobdescr,
1332 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1333 cdb->sh_hdr.hi.field.idlen);
1334
1335
1336 SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)dst_start_addr, 0,
1337 sym->m_src->buf_len - sym->m_src->data_off);
1338
1339 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)src_start_addr, 0,
1340 sym->m_src->pkt_len);
1341 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1342
1343 return ctx;
1344}
1345
1346static int
1347caam_jr_enqueue_op(struct rte_crypto_op *op, struct caam_jr_qp *qp)
1348{
1349 struct sec_job_ring_t *ring = qp->ring;
1350 struct caam_jr_session *ses;
1351 struct caam_jr_op_ctx *ctx = NULL;
1352 struct sec_job_descriptor_t *jobdescr __rte_unused;
1353#if CAAM_JR_DBG
1354 int i;
1355#endif
1356
1357 switch (op->sess_type) {
1358 case RTE_CRYPTO_OP_WITH_SESSION:
1359 ses = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);
1360 break;
1361 case RTE_CRYPTO_OP_SECURITY_SESSION:
1362 ses = SECURITY_GET_SESS_PRIV(op->sym->session);
1363 break;
1364 default:
1365 CAAM_JR_DP_ERR("sessionless crypto op not supported");
1366 qp->tx_errs++;
1367 return -1;
1368 }
1369
1370 if (unlikely(!ses->qp || ses->qp != qp)) {
1371 CAAM_JR_DP_DEBUG("Old:sess->qp=%p New qp = %p\n", ses->qp, qp);
1372 ses->qp = qp;
1373 caam_jr_prep_cdb(ses);
1374 }
1375
1376 if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1377 if (is_auth_cipher(ses))
1378 ctx = build_cipher_auth(op, ses);
1379 else if (is_aead(ses))
1380 goto err1;
1381 else if (is_auth_only(ses))
1382 ctx = build_auth_only(op, ses);
1383 else if (is_cipher_only(ses))
1384 ctx = build_cipher_only(op, ses);
1385 else if (is_proto_ipsec(ses))
1386 ctx = build_proto(op, ses);
1387 } else {
1388 if (is_auth_cipher(ses))
1389 ctx = build_cipher_auth_sg(op, ses);
1390 else if (is_aead(ses))
1391 goto err1;
1392 else if (is_auth_only(ses))
1393 ctx = build_auth_only_sg(op, ses);
1394 else if (is_cipher_only(ses))
1395 ctx = build_cipher_only_sg(op, ses);
1396 }
1397err1:
1398 if (unlikely(!ctx)) {
1399 qp->tx_errs++;
1400 CAAM_JR_ERR("not supported sec op");
1401 return -1;
1402 }
1403#if CAAM_JR_DBG
1404 if (is_decode(ses))
1405 rte_hexdump(stdout, "DECODE",
1406 rte_pktmbuf_mtod(op->sym->m_src, void *),
1407 rte_pktmbuf_data_len(op->sym->m_src));
1408 else
1409 rte_hexdump(stdout, "ENCODE",
1410 rte_pktmbuf_mtod(op->sym->m_src, void *),
1411 rte_pktmbuf_data_len(op->sym->m_src));
1412
1413 printf("\n JD before conversion\n");
1414 for (i = 0; i < 12; i++)
1415 printf("\n 0x%08x", ctx->jobdes.desc[i]);
1416#endif
1417
1418 CAAM_JR_DP_DEBUG("Jr[%p] pi[%d] ci[%d].Before sending desc",
1419 ring, ring->pidx, ring->cidx);
1420
1421
1422 if (SEC_JOB_RING_IS_FULL(ring->pidx, ring->cidx,
1423 SEC_JOB_RING_SIZE, SEC_JOB_RING_SIZE)) {
1424 CAAM_JR_DP_DEBUG("Ring FULL Jr[%p] pi[%d] ci[%d].Size = %d",
1425 ring, ring->pidx, ring->cidx, SEC_JOB_RING_SIZE);
1426 caam_jr_op_ending(ctx);
1427 qp->tx_ring_full++;
1428 return -EBUSY;
1429 }
1430
1431#if CORE_BYTE_ORDER != CAAM_BYTE_ORDER
1432 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1433
1434 jobdescr->deschdr.command.word =
1435 cpu_to_caam32(jobdescr->deschdr.command.word);
1436 jobdescr->sd_ptr = cpu_to_caam64(jobdescr->sd_ptr);
1437 jobdescr->seq_out.command.word =
1438 cpu_to_caam32(jobdescr->seq_out.command.word);
1439 jobdescr->seq_out_ptr = cpu_to_caam64(jobdescr->seq_out_ptr);
1440 jobdescr->out_ext_length = cpu_to_caam32(jobdescr->out_ext_length);
1441 jobdescr->seq_in.command.word =
1442 cpu_to_caam32(jobdescr->seq_in.command.word);
1443 jobdescr->seq_in_ptr = cpu_to_caam64(jobdescr->seq_in_ptr);
1444 jobdescr->in_ext_length = cpu_to_caam32(jobdescr->in_ext_length);
1445 jobdescr->load_dpovrd.command.word =
1446 cpu_to_caam32(jobdescr->load_dpovrd.command.word);
1447 jobdescr->dpovrd = cpu_to_caam32(jobdescr->dpovrd);
1448#endif
1449
1450
1451 sec_write_addr(&ring->input_ring[ring->pidx],
1452 (phys_addr_t)caam_jr_vtop_ctx(ctx, ctx->jobdes.desc));
1453 rte_smp_wmb();
1454
1455
1456 hw_enqueue_desc_on_job_ring(ring);
1457
1458
1459 ring->pidx = SEC_CIRCULAR_COUNTER(ring->pidx, SEC_JOB_RING_SIZE);
1460
1461 return 0;
1462}
1463
1464static uint16_t
1465caam_jr_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1466 uint16_t nb_ops)
1467{
1468
1469 uint32_t loop;
1470 int32_t ret;
1471 struct caam_jr_qp *jr_qp = (struct caam_jr_qp *)qp;
1472 uint16_t num_tx = 0;
1473
1474 for (loop = 0; loop < nb_ops; loop++) {
1475 ret = caam_jr_enqueue_op(ops[loop], jr_qp);
1476 if (!ret)
1477 num_tx++;
1478 }
1479
1480 jr_qp->tx_pkts += num_tx;
1481
1482 return num_tx;
1483}
1484
1485
1486static int
1487caam_jr_queue_pair_release(struct rte_cryptodev *dev,
1488 uint16_t qp_id)
1489{
1490 struct sec_job_ring_t *internals;
1491 struct caam_jr_qp *qp = NULL;
1492
1493 PMD_INIT_FUNC_TRACE();
1494 CAAM_JR_DEBUG("dev =%p, queue =%d", dev, qp_id);
1495
1496 internals = dev->data->dev_private;
1497 if (qp_id >= internals->max_nb_queue_pairs) {
1498 CAAM_JR_ERR("Max supported qpid %d",
1499 internals->max_nb_queue_pairs);
1500 return -EINVAL;
1501 }
1502
1503 qp = &internals->qps[qp_id];
1504 qp->ring = NULL;
1505 dev->data->queue_pairs[qp_id] = NULL;
1506
1507 return 0;
1508}
1509
1510
1511static int
1512caam_jr_queue_pair_setup(
1513 struct rte_cryptodev *dev, uint16_t qp_id,
1514 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1515 __rte_unused int socket_id)
1516{
1517 struct sec_job_ring_t *internals;
1518 struct caam_jr_qp *qp = NULL;
1519
1520 PMD_INIT_FUNC_TRACE();
1521 CAAM_JR_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1522
1523 internals = dev->data->dev_private;
1524 if (qp_id >= internals->max_nb_queue_pairs) {
1525 CAAM_JR_ERR("Max supported qpid %d",
1526 internals->max_nb_queue_pairs);
1527 return -EINVAL;
1528 }
1529
1530 qp = &internals->qps[qp_id];
1531 qp->ring = internals;
1532 dev->data->queue_pairs[qp_id] = qp;
1533
1534 return 0;
1535}
1536
1537
1538static unsigned int
1539caam_jr_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1540{
1541 PMD_INIT_FUNC_TRACE();
1542
1543 return sizeof(struct caam_jr_session);
1544}
1545
1546static int
1547caam_jr_cipher_init(struct rte_cryptodev *dev __rte_unused,
1548 struct rte_crypto_sym_xform *xform,
1549 struct caam_jr_session *session)
1550{
1551 session->cipher_alg = xform->cipher.algo;
1552 session->iv.length = xform->cipher.iv.length;
1553 session->iv.offset = xform->cipher.iv.offset;
1554 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1555 RTE_CACHE_LINE_SIZE);
1556 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1557 CAAM_JR_ERR("No Memory for cipher key\n");
1558 return -ENOMEM;
1559 }
1560 session->cipher_key.length = xform->cipher.key.length;
1561
1562 memcpy(session->cipher_key.data, xform->cipher.key.data,
1563 xform->cipher.key.length);
1564 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1565 DIR_ENC : DIR_DEC;
1566
1567 return 0;
1568}
1569
1570static int
1571caam_jr_auth_init(struct rte_cryptodev *dev __rte_unused,
1572 struct rte_crypto_sym_xform *xform,
1573 struct caam_jr_session *session)
1574{
1575 session->auth_alg = xform->auth.algo;
1576 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1577 RTE_CACHE_LINE_SIZE);
1578 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1579 CAAM_JR_ERR("No Memory for auth key\n");
1580 return -ENOMEM;
1581 }
1582 session->auth_key.length = xform->auth.key.length;
1583 session->digest_length = xform->auth.digest_length;
1584
1585 memcpy(session->auth_key.data, xform->auth.key.data,
1586 xform->auth.key.length);
1587 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1588 DIR_ENC : DIR_DEC;
1589
1590 return 0;
1591}
1592
1593static int
1594caam_jr_aead_init(struct rte_cryptodev *dev __rte_unused,
1595 struct rte_crypto_sym_xform *xform,
1596 struct caam_jr_session *session)
1597{
1598 session->aead_alg = xform->aead.algo;
1599 session->iv.length = xform->aead.iv.length;
1600 session->iv.offset = xform->aead.iv.offset;
1601 session->auth_only_len = xform->aead.aad_length;
1602 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1603 RTE_CACHE_LINE_SIZE);
1604 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1605 CAAM_JR_ERR("No Memory for aead key\n");
1606 return -ENOMEM;
1607 }
1608 session->aead_key.length = xform->aead.key.length;
1609 session->digest_length = xform->aead.digest_length;
1610
1611 memcpy(session->aead_key.data, xform->aead.key.data,
1612 xform->aead.key.length);
1613 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1614 DIR_ENC : DIR_DEC;
1615
1616 return 0;
1617}
1618
1619static int
1620caam_jr_set_session_parameters(struct rte_cryptodev *dev,
1621 struct rte_crypto_sym_xform *xform, void *sess)
1622{
1623 struct sec_job_ring_t *internals = dev->data->dev_private;
1624 struct caam_jr_session *session = sess;
1625
1626 PMD_INIT_FUNC_TRACE();
1627
1628 if (unlikely(sess == NULL)) {
1629 CAAM_JR_ERR("invalid session struct");
1630 return -EINVAL;
1631 }
1632
1633
1634 session->iv.length = 0;
1635
1636
1637 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1638 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1639 caam_jr_cipher_init(dev, xform, session);
1640
1641
1642 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1643 xform->next == NULL) {
1644 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1645 caam_jr_auth_init(dev, xform, session);
1646
1647
1648 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1649 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1650 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1651 caam_jr_cipher_init(dev, xform, session);
1652 caam_jr_auth_init(dev, xform->next, session);
1653 } else {
1654 CAAM_JR_ERR("Not supported: Auth then Cipher");
1655 goto err1;
1656 }
1657
1658
1659 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1660 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1661 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1662 caam_jr_auth_init(dev, xform, session);
1663 caam_jr_cipher_init(dev, xform->next, session);
1664 } else {
1665 CAAM_JR_ERR("Not supported: Auth then Cipher");
1666 goto err1;
1667 }
1668
1669
1670 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1671 xform->next == NULL) {
1672 caam_jr_aead_init(dev, xform, session);
1673
1674 } else {
1675 CAAM_JR_ERR("Invalid crypto type");
1676 return -EINVAL;
1677 }
1678 session->ctx_pool = internals->ctx_pool;
1679
1680 return 0;
1681
1682err1:
1683 rte_free(session->cipher_key.data);
1684 rte_free(session->auth_key.data);
1685 memset(session, 0, sizeof(struct caam_jr_session));
1686
1687 return -EINVAL;
1688}
1689
1690static int
1691caam_jr_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
1692 struct rte_crypto_sym_xform *xform,
1693 struct rte_cryptodev_sym_session *sess)
1694{
1695 void *sess_private_data;
1696 int ret;
1697
1698 PMD_INIT_FUNC_TRACE();
1699 sess_private_data = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
1700 memset(sess_private_data, 0, sizeof(struct caam_jr_session));
1701 ret = caam_jr_set_session_parameters(dev, xform, sess_private_data);
1702 if (ret != 0) {
1703 CAAM_JR_ERR("failed to configure session parameters");
1704
1705 return ret;
1706 }
1707
1708 return 0;
1709}
1710
1711
1712static void
1713caam_jr_sym_session_clear(struct rte_cryptodev *dev __rte_unused,
1714 struct rte_cryptodev_sym_session *sess)
1715{
1716 struct caam_jr_session *s = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
1717
1718 PMD_INIT_FUNC_TRACE();
1719
1720 if (s) {
1721 rte_free(s->cipher_key.data);
1722 rte_free(s->auth_key.data);
1723 }
1724}
1725
1726static int
1727caam_jr_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
1728 struct rte_security_session_conf *conf,
1729 void *sess)
1730{
1731 struct sec_job_ring_t *internals = dev->data->dev_private;
1732 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
1733 struct rte_crypto_auth_xform *auth_xform;
1734 struct rte_crypto_cipher_xform *cipher_xform;
1735 struct caam_jr_session *session = (struct caam_jr_session *)sess;
1736
1737 PMD_INIT_FUNC_TRACE();
1738
1739 if (ipsec_xform->life.bytes_hard_limit != 0 ||
1740 ipsec_xform->life.bytes_soft_limit != 0 ||
1741 ipsec_xform->life.packets_hard_limit != 0 ||
1742 ipsec_xform->life.packets_soft_limit != 0)
1743 return -ENOTSUP;
1744
1745 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1746 cipher_xform = &conf->crypto_xform->cipher;
1747 auth_xform = &conf->crypto_xform->next->auth;
1748 } else {
1749 auth_xform = &conf->crypto_xform->auth;
1750 cipher_xform = &conf->crypto_xform->next->cipher;
1751 }
1752 session->proto_alg = conf->protocol;
1753 session->cipher_key.data = rte_zmalloc(NULL,
1754 cipher_xform->key.length,
1755 RTE_CACHE_LINE_SIZE);
1756 if (session->cipher_key.data == NULL &&
1757 cipher_xform->key.length > 0) {
1758 CAAM_JR_ERR("No Memory for cipher key\n");
1759 return -ENOMEM;
1760 }
1761
1762 session->cipher_key.length = cipher_xform->key.length;
1763 session->auth_key.data = rte_zmalloc(NULL,
1764 auth_xform->key.length,
1765 RTE_CACHE_LINE_SIZE);
1766 if (session->auth_key.data == NULL &&
1767 auth_xform->key.length > 0) {
1768 CAAM_JR_ERR("No Memory for auth key\n");
1769 rte_free(session->cipher_key.data);
1770 return -ENOMEM;
1771 }
1772 session->auth_key.length = auth_xform->key.length;
1773 memcpy(session->cipher_key.data, cipher_xform->key.data,
1774 cipher_xform->key.length);
1775 memcpy(session->auth_key.data, auth_xform->key.data,
1776 auth_xform->key.length);
1777
1778 switch (auth_xform->algo) {
1779 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1780 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1781 break;
1782 case RTE_CRYPTO_AUTH_MD5_HMAC:
1783 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1784 break;
1785 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1786 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1787 break;
1788 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1789 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1790 break;
1791 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1792 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1793 break;
1794 case RTE_CRYPTO_AUTH_AES_CMAC:
1795 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
1796 break;
1797 case RTE_CRYPTO_AUTH_NULL:
1798 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1799 break;
1800 case RTE_CRYPTO_AUTH_SHA224_HMAC:
1801 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1802 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1803 case RTE_CRYPTO_AUTH_SHA1:
1804 case RTE_CRYPTO_AUTH_SHA256:
1805 case RTE_CRYPTO_AUTH_SHA512:
1806 case RTE_CRYPTO_AUTH_SHA224:
1807 case RTE_CRYPTO_AUTH_SHA384:
1808 case RTE_CRYPTO_AUTH_MD5:
1809 case RTE_CRYPTO_AUTH_AES_GMAC:
1810 case RTE_CRYPTO_AUTH_KASUMI_F9:
1811 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1812 case RTE_CRYPTO_AUTH_ZUC_EIA3:
1813 CAAM_JR_ERR("Crypto: Unsupported auth alg %u\n",
1814 auth_xform->algo);
1815 goto out;
1816 default:
1817 CAAM_JR_ERR("Crypto: Undefined Auth specified %u\n",
1818 auth_xform->algo);
1819 goto out;
1820 }
1821
1822 switch (cipher_xform->algo) {
1823 case RTE_CRYPTO_CIPHER_AES_CBC:
1824 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1825 break;
1826 case RTE_CRYPTO_CIPHER_3DES_CBC:
1827 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1828 break;
1829 case RTE_CRYPTO_CIPHER_AES_CTR:
1830 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1831 break;
1832 case RTE_CRYPTO_CIPHER_NULL:
1833 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1834 case RTE_CRYPTO_CIPHER_3DES_ECB:
1835 case RTE_CRYPTO_CIPHER_AES_ECB:
1836 case RTE_CRYPTO_CIPHER_KASUMI_F8:
1837 CAAM_JR_ERR("Crypto: Unsupported Cipher alg %u\n",
1838 cipher_xform->algo);
1839 goto out;
1840 default:
1841 CAAM_JR_ERR("Crypto: Undefined Cipher specified %u\n",
1842 cipher_xform->algo);
1843 goto out;
1844 }
1845
1846 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1847 memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
1848 sizeof(session->ip4_hdr));
1849 session->ip4_hdr.ip_v = IPVERSION;
1850 session->ip4_hdr.ip_hl = 5;
1851 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
1852 sizeof(session->ip4_hdr));
1853 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
1854 session->ip4_hdr.ip_id = 0;
1855 session->ip4_hdr.ip_off = 0;
1856 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
1857 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
1858 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
1859 : IPPROTO_AH;
1860 session->ip4_hdr.ip_sum = 0;
1861 session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
1862 session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
1863 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
1864 (void *)&session->ip4_hdr,
1865 sizeof(struct ip));
1866
1867 session->encap_pdb.options =
1868 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
1869 PDBOPTS_ESP_OIHI_PDB_INL |
1870 PDBOPTS_ESP_IVSRC;
1871 if (ipsec_xform->options.dec_ttl)
1872 session->encap_pdb.options |= PDBHMO_ESP_ENCAP_DTTL;
1873 if (ipsec_xform->options.esn)
1874 session->encap_pdb.options |= PDBOPTS_ESP_ESN;
1875 session->encap_pdb.spi = ipsec_xform->spi;
1876 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
1877
1878 session->dir = DIR_ENC;
1879 } else if (ipsec_xform->direction ==
1880 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
1881 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
1882 session->decap_pdb.options = sizeof(struct ip) << 16;
1883 if (ipsec_xform->options.esn)
1884 session->decap_pdb.options |= PDBOPTS_ESP_ESN;
1885 session->dir = DIR_DEC;
1886 } else
1887 goto out;
1888 session->ctx_pool = internals->ctx_pool;
1889
1890 return 0;
1891out:
1892 rte_free(session->auth_key.data);
1893 rte_free(session->cipher_key.data);
1894 memset(session, 0, sizeof(struct caam_jr_session));
1895 return -1;
1896}
1897
1898static int
1899caam_jr_security_session_create(void *dev,
1900 struct rte_security_session_conf *conf,
1901 struct rte_security_session *sess)
1902{
1903 void *sess_private_data = SECURITY_GET_SESS_PRIV(sess);
1904 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
1905 int ret;
1906
1907 switch (conf->protocol) {
1908 case RTE_SECURITY_PROTOCOL_IPSEC:
1909 ret = caam_jr_set_ipsec_session(cdev, conf,
1910 sess_private_data);
1911 break;
1912 case RTE_SECURITY_PROTOCOL_MACSEC:
1913 return -ENOTSUP;
1914 default:
1915 return -EINVAL;
1916 }
1917 if (ret != 0) {
1918 CAAM_JR_ERR("failed to configure session parameters");
1919 }
1920
1921 return ret;
1922}
1923
1924
1925static int
1926caam_jr_security_session_destroy(void *dev __rte_unused,
1927 struct rte_security_session *sess)
1928{
1929 PMD_INIT_FUNC_TRACE();
1930 struct caam_jr_session *s = SECURITY_GET_SESS_PRIV(sess);
1931
1932 if (s) {
1933 rte_free(s->cipher_key.data);
1934 rte_free(s->auth_key.data);
1935 memset(s, 0, sizeof(struct caam_jr_session));
1936 }
1937 return 0;
1938}
1939
1940static unsigned int
1941caam_jr_security_session_get_size(void *device __rte_unused)
1942{
1943 return sizeof(struct caam_jr_session);
1944}
1945
1946static int
1947caam_jr_dev_configure(struct rte_cryptodev *dev,
1948 struct rte_cryptodev_config *config __rte_unused)
1949{
1950 char str[20];
1951 struct sec_job_ring_t *internals;
1952
1953 PMD_INIT_FUNC_TRACE();
1954
1955 internals = dev->data->dev_private;
1956 snprintf(str, sizeof(str), "ctx_pool_%d", dev->data->dev_id);
1957 if (!internals->ctx_pool) {
1958 internals->ctx_pool = rte_mempool_create((const char *)str,
1959 CTX_POOL_NUM_BUFS,
1960 sizeof(struct caam_jr_op_ctx),
1961 CTX_POOL_CACHE_SIZE, 0,
1962 NULL, NULL, NULL, NULL,
1963 SOCKET_ID_ANY, 0);
1964 if (!internals->ctx_pool) {
1965 CAAM_JR_ERR("%s create failed\n", str);
1966 return -ENOMEM;
1967 }
1968 } else
1969 CAAM_JR_INFO("mempool already created for dev_id : %d",
1970 dev->data->dev_id);
1971
1972 return 0;
1973}
1974
1975static int
1976caam_jr_dev_start(struct rte_cryptodev *dev __rte_unused)
1977{
1978 PMD_INIT_FUNC_TRACE();
1979 return 0;
1980}
1981
1982static void
1983caam_jr_dev_stop(struct rte_cryptodev *dev __rte_unused)
1984{
1985 PMD_INIT_FUNC_TRACE();
1986}
1987
1988static int
1989caam_jr_dev_close(struct rte_cryptodev *dev)
1990{
1991 struct sec_job_ring_t *internals;
1992
1993 PMD_INIT_FUNC_TRACE();
1994
1995 if (dev == NULL)
1996 return -ENOMEM;
1997
1998 internals = dev->data->dev_private;
1999 rte_mempool_free(internals->ctx_pool);
2000 internals->ctx_pool = NULL;
2001
2002 return 0;
2003}
2004
2005static void
2006caam_jr_dev_infos_get(struct rte_cryptodev *dev,
2007 struct rte_cryptodev_info *info)
2008{
2009 struct sec_job_ring_t *internals = dev->data->dev_private;
2010
2011 PMD_INIT_FUNC_TRACE();
2012 if (info != NULL) {
2013 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2014 info->feature_flags = dev->feature_flags;
2015 info->capabilities = caam_jr_get_cryptodev_capabilities();
2016 info->sym.max_nb_sessions = internals->max_nb_sessions;
2017 info->driver_id = cryptodev_driver_id;
2018 }
2019}
2020
2021static struct rte_cryptodev_ops caam_jr_ops = {
2022 .dev_configure = caam_jr_dev_configure,
2023 .dev_start = caam_jr_dev_start,
2024 .dev_stop = caam_jr_dev_stop,
2025 .dev_close = caam_jr_dev_close,
2026 .dev_infos_get = caam_jr_dev_infos_get,
2027 .stats_get = caam_jr_stats_get,
2028 .stats_reset = caam_jr_stats_reset,
2029 .queue_pair_setup = caam_jr_queue_pair_setup,
2030 .queue_pair_release = caam_jr_queue_pair_release,
2031 .sym_session_get_size = caam_jr_sym_session_get_size,
2032 .sym_session_configure = caam_jr_sym_session_configure,
2033 .sym_session_clear = caam_jr_sym_session_clear
2034};
2035
2036static struct rte_security_ops caam_jr_security_ops = {
2037 .session_create = caam_jr_security_session_create,
2038 .session_update = NULL,
2039 .session_get_size = caam_jr_security_session_get_size,
2040 .session_stats_get = NULL,
2041 .session_destroy = caam_jr_security_session_destroy,
2042 .set_pkt_metadata = NULL,
2043 .capabilities_get = caam_jr_get_security_capabilities
2044};
2045
2046
2047
2048
2049
2050static void
2051close_job_ring(struct sec_job_ring_t *job_ring)
2052{
2053 if (job_ring->irq_fd != -1) {
2054
2055
2056
2057 while (job_ring->pidx != job_ring->cidx)
2058 hw_flush_job_ring(job_ring, false, NULL);
2059
2060
2061 free_job_ring(job_ring->irq_fd);
2062 job_ring->irq_fd = -1;
2063 caam_jr_dma_free(job_ring->input_ring);
2064 caam_jr_dma_free(job_ring->output_ring);
2065 g_job_rings_no--;
2066 }
2067}
2068
2069
2070
2071
2072
2073
2074
2075static int
2076shutdown_job_ring(struct sec_job_ring_t *job_ring)
2077{
2078 int ret = 0;
2079
2080 PMD_INIT_FUNC_TRACE();
2081 ASSERT(job_ring != NULL);
2082 ret = hw_shutdown_job_ring(job_ring);
2083 SEC_ASSERT(ret == 0, ret,
2084 "Failed to shutdown hardware job ring %p",
2085 job_ring);
2086
2087 if (job_ring->coalescing_en)
2088 hw_job_ring_disable_coalescing(job_ring);
2089
2090 if (job_ring->jr_mode != SEC_NOTIFICATION_TYPE_POLL) {
2091 ret = caam_jr_disable_irqs(job_ring->irq_fd);
2092 SEC_ASSERT(ret == 0, ret,
2093 "Failed to disable irqs for job ring %p",
2094 job_ring);
2095 }
2096
2097 return ret;
2098}
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114static int
2115caam_jr_dev_uninit(struct rte_cryptodev *dev)
2116{
2117 struct sec_job_ring_t *internals;
2118
2119 PMD_INIT_FUNC_TRACE();
2120 if (dev == NULL)
2121 return -ENODEV;
2122
2123 internals = dev->data->dev_private;
2124 rte_free(dev->security_ctx);
2125
2126
2127
2128
2129 if (internals) {
2130 shutdown_job_ring(internals);
2131 close_job_ring(internals);
2132 rte_mempool_free(internals->ctx_pool);
2133 }
2134
2135 CAAM_JR_INFO("Closing crypto device %s", dev->data->name);
2136
2137
2138 if (g_job_rings_no == 0)
2139 g_driver_state = SEC_DRIVER_STATE_IDLE;
2140
2141 return SEC_SUCCESS;
2142}
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165static void *
2166init_job_ring(void *reg_base_addr, int irq_id)
2167{
2168 struct sec_job_ring_t *job_ring = NULL;
2169 int i, ret = 0;
2170 int jr_mode = SEC_NOTIFICATION_TYPE_POLL;
2171 int napi_mode = 0;
2172 int irq_coalescing_timer = 0;
2173 int irq_coalescing_count = 0;
2174
2175 for (i = 0; i < MAX_SEC_JOB_RINGS; i++) {
2176 if (g_job_rings[i].irq_fd == -1) {
2177 job_ring = &g_job_rings[i];
2178 g_job_rings_no++;
2179 break;
2180 }
2181 }
2182 if (job_ring == NULL) {
2183 CAAM_JR_ERR("No free job ring\n");
2184 return NULL;
2185 }
2186
2187 job_ring->register_base_addr = reg_base_addr;
2188 job_ring->jr_mode = jr_mode;
2189 job_ring->napi_mode = 0;
2190 job_ring->irq_fd = irq_id;
2191
2192
2193
2194
2195 job_ring->input_ring = caam_jr_dma_mem_alloc(L1_CACHE_BYTES,
2196 SEC_DMA_MEM_INPUT_RING_SIZE);
2197 memset(job_ring->input_ring, 0, SEC_DMA_MEM_INPUT_RING_SIZE);
2198
2199
2200 job_ring->output_ring = caam_jr_dma_mem_alloc(L1_CACHE_BYTES,
2201 SEC_DMA_MEM_OUTPUT_RING_SIZE);
2202 memset(job_ring->output_ring, 0, SEC_DMA_MEM_OUTPUT_RING_SIZE);
2203
2204
2205 ret = hw_reset_job_ring(job_ring);
2206 if (ret != 0) {
2207 CAAM_JR_ERR("Failed to reset hardware job ring");
2208 goto cleanup;
2209 }
2210
2211 if (jr_mode == SEC_NOTIFICATION_TYPE_NAPI) {
2212
2213
2214
2215 if (napi_mode == SEC_STARTUP_INTERRUPT_MODE) {
2216 CAAM_JR_INFO("Enabling DONE IRQ generationon job ring - %p",
2217 job_ring);
2218 ret = caam_jr_enable_irqs(job_ring->irq_fd);
2219 if (ret != 0) {
2220 CAAM_JR_ERR("Failed to enable irqs for job ring");
2221 goto cleanup;
2222 }
2223 }
2224 } else if (jr_mode == SEC_NOTIFICATION_TYPE_IRQ) {
2225
2226
2227
2228 CAAM_JR_INFO("Enabling DONE IRQ generation on job ring - %p",
2229 job_ring);
2230 ret = caam_jr_enable_irqs(job_ring->irq_fd);
2231 if (ret != 0) {
2232 CAAM_JR_ERR("Failed to enable irqs for job ring");
2233 goto cleanup;
2234 }
2235 }
2236 if (irq_coalescing_timer || irq_coalescing_count) {
2237 hw_job_ring_set_coalescing_param(job_ring,
2238 irq_coalescing_timer,
2239 irq_coalescing_count);
2240
2241 hw_job_ring_enable_coalescing(job_ring);
2242 job_ring->coalescing_en = 1;
2243 }
2244
2245 job_ring->jr_state = SEC_JOB_RING_STATE_STARTED;
2246 job_ring->max_nb_queue_pairs = RTE_CAAM_MAX_NB_SEC_QPS;
2247 job_ring->max_nb_sessions = RTE_CAAM_JR_PMD_MAX_NB_SESSIONS;
2248
2249 return job_ring;
2250cleanup:
2251 caam_jr_dma_free(job_ring->output_ring);
2252 caam_jr_dma_free(job_ring->input_ring);
2253 return NULL;
2254}
2255
2256
2257static int
2258caam_jr_dev_init(const char *name,
2259 struct rte_vdev_device *vdev,
2260 struct rte_cryptodev_pmd_init_params *init_params)
2261{
2262 struct rte_cryptodev *dev;
2263 struct rte_security_ctx *security_instance;
2264 struct uio_job_ring *job_ring;
2265 char str[RTE_CRYPTODEV_NAME_MAX_LEN];
2266
2267 PMD_INIT_FUNC_TRACE();
2268
2269
2270 if (g_driver_state == SEC_DRIVER_STATE_IDLE) {
2271 g_job_rings_max = sec_configure();
2272 if (!g_job_rings_max) {
2273 CAAM_JR_ERR("No job ring detected on UIO !!!!");
2274 return -1;
2275 }
2276
2277 g_driver_state = SEC_DRIVER_STATE_STARTED;
2278 }
2279
2280 if (g_job_rings_no >= g_job_rings_max) {
2281 CAAM_JR_ERR("No more job rings available max=%d!!!!",
2282 g_job_rings_max);
2283 return -1;
2284 }
2285
2286 job_ring = config_job_ring();
2287 if (job_ring == NULL) {
2288 CAAM_JR_ERR("failed to create job ring");
2289 goto init_error;
2290 }
2291
2292 snprintf(str, sizeof(str), "caam_jr%d", job_ring->jr_id);
2293
2294 dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
2295 if (dev == NULL) {
2296 CAAM_JR_ERR("failed to create cryptodev vdev");
2297 goto cleanup;
2298 }
2299
2300 dev->data->dev_private = init_job_ring(job_ring->register_base_addr,
2301 job_ring->uio_fd);
2302
2303 if (!dev->data->dev_private) {
2304 CAAM_JR_ERR("Ring memory allocation failed\n");
2305 goto cleanup2;
2306 }
2307
2308 dev->driver_id = cryptodev_driver_id;
2309 dev->dev_ops = &caam_jr_ops;
2310
2311
2312 dev->dequeue_burst = caam_jr_dequeue_burst;
2313 dev->enqueue_burst = caam_jr_enqueue_burst;
2314 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2315 RTE_CRYPTODEV_FF_HW_ACCELERATED |
2316 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2317 RTE_CRYPTODEV_FF_SECURITY |
2318 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2319 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2320 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2321 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2322 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2323
2324
2325
2326
2327
2328 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2329 CAAM_JR_WARN("Device already init by primary process");
2330 return 0;
2331 }
2332
2333
2334 security_instance = rte_malloc("caam_jr",
2335 sizeof(struct rte_security_ctx), 0);
2336 if (security_instance == NULL) {
2337 CAAM_JR_ERR("memory allocation failed\n");
2338
2339 goto cleanup2;
2340 }
2341
2342 security_instance->device = (void *)dev;
2343 security_instance->ops = &caam_jr_security_ops;
2344 security_instance->sess_cnt = 0;
2345 dev->security_ctx = security_instance;
2346
2347 rte_cryptodev_pmd_probing_finish(dev);
2348
2349 RTE_LOG(INFO, PMD, "%s cryptodev init\n", dev->data->name);
2350
2351 return 0;
2352
2353cleanup2:
2354 caam_jr_dev_uninit(dev);
2355 rte_cryptodev_pmd_release_device(dev);
2356cleanup:
2357 free_job_ring(job_ring->uio_fd);
2358init_error:
2359 CAAM_JR_ERR("driver %s: cryptodev_caam_jr_create failed",
2360 init_params->name);
2361
2362 return -ENXIO;
2363}
2364
2365
2366static int
2367cryptodev_caam_jr_probe(struct rte_vdev_device *vdev)
2368{
2369 int ret;
2370
2371 struct rte_cryptodev_pmd_init_params init_params = {
2372 "",
2373 sizeof(struct sec_job_ring_t),
2374 rte_socket_id(),
2375 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
2376 };
2377 const char *name;
2378 const char *input_args;
2379
2380 name = rte_vdev_device_name(vdev);
2381 if (name == NULL)
2382 return -EINVAL;
2383
2384 input_args = rte_vdev_device_args(vdev);
2385 rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
2386
2387 ret = of_init();
2388 if (ret) {
2389 RTE_LOG(ERR, PMD,
2390 "of_init failed\n");
2391 return -EINVAL;
2392 }
2393
2394 if (!rta_get_sec_era()) {
2395 const struct device_node *caam_node;
2396
2397 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2398 const uint32_t *prop = of_get_property(caam_node,
2399 "fsl,sec-era",
2400 NULL);
2401 if (prop) {
2402 rta_set_sec_era(
2403 INTL_SEC_ERA(rte_be_to_cpu_32(*prop)));
2404 break;
2405 }
2406 }
2407 }
2408#ifdef RTE_LIBRTE_PMD_CAAM_JR_BE
2409 if (rta_get_sec_era() > RTA_SEC_ERA_8) {
2410 RTE_LOG(ERR, PMD,
2411 "CAAM is compiled in BE mode for device with sec era > 8???\n");
2412 return -EINVAL;
2413 }
2414#endif
2415
2416 return caam_jr_dev_init(name, vdev, &init_params);
2417}
2418
2419
2420static int
2421cryptodev_caam_jr_remove(struct rte_vdev_device *vdev)
2422{
2423 struct rte_cryptodev *cryptodev;
2424 const char *name;
2425
2426 name = rte_vdev_device_name(vdev);
2427 if (name == NULL)
2428 return -EINVAL;
2429
2430 cryptodev = rte_cryptodev_pmd_get_named_dev(name);
2431 if (cryptodev == NULL)
2432 return -ENODEV;
2433
2434 caam_jr_dev_uninit(cryptodev);
2435
2436 return rte_cryptodev_pmd_destroy(cryptodev);
2437}
2438
2439static void
2440sec_job_rings_init(void)
2441{
2442 int i;
2443
2444 for (i = 0; i < MAX_SEC_JOB_RINGS; i++)
2445 g_job_rings[i].irq_fd = -1;
2446}
2447
2448static struct rte_vdev_driver cryptodev_caam_jr_drv = {
2449 .probe = cryptodev_caam_jr_probe,
2450 .remove = cryptodev_caam_jr_remove
2451};
2452
2453static struct cryptodev_driver caam_jr_crypto_drv;
2454
2455RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CAAM_JR_PMD, cryptodev_caam_jr_drv);
2456RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CAAM_JR_PMD,
2457 "max_nb_queue_pairs=<int>"
2458 "socket_id=<int>");
2459RTE_PMD_REGISTER_CRYPTO_DRIVER(caam_jr_crypto_drv, cryptodev_caam_jr_drv.driver,
2460 cryptodev_driver_id);
2461
2462RTE_INIT(caam_jr_init)
2463{
2464 sec_uio_job_rings_init();
2465 sec_job_rings_init();
2466}
2467
2468RTE_LOG_REGISTER(caam_jr_logtype, pmd.crypto.caam, NOTICE);
2469