1
2
3
4
5#include "pmd_snow3g_priv.h"
6
7
8static int
9snow3g_session_configure(IMB_MGR *mgr, void *priv_sess,
10 const struct rte_crypto_sym_xform *xform)
11{
12 struct snow3g_session *sess = (struct snow3g_session *)priv_sess;
13 const struct rte_crypto_sym_xform *auth_xform = NULL;
14 const struct rte_crypto_sym_xform *cipher_xform = NULL;
15 enum ipsec_mb_operation mode;
16
17
18 int ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
19 &cipher_xform, NULL);
20 if (ret)
21 return ret;
22
23 if (cipher_xform) {
24
25 if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_SNOW3G_UEA2)
26 return -ENOTSUP;
27
28 if (cipher_xform->cipher.iv.length != SNOW3G_IV_LENGTH) {
29 IPSEC_MB_LOG(ERR, "Wrong IV length");
30 return -EINVAL;
31 }
32 if (cipher_xform->cipher.key.length > SNOW3G_MAX_KEY_SIZE) {
33 IPSEC_MB_LOG(ERR, "Not enough memory to store the key");
34 return -ENOMEM;
35 }
36
37 sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
38
39
40 IMB_SNOW3G_INIT_KEY_SCHED(mgr, cipher_xform->cipher.key.data,
41 &sess->pKeySched_cipher);
42 }
43
44 if (auth_xform) {
45
46 if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_SNOW3G_UIA2)
47 return -ENOTSUP;
48
49 if (auth_xform->auth.digest_length != SNOW3G_DIGEST_LENGTH) {
50 IPSEC_MB_LOG(ERR, "Wrong digest length");
51 return -EINVAL;
52 }
53 if (auth_xform->auth.key.length > SNOW3G_MAX_KEY_SIZE) {
54 IPSEC_MB_LOG(ERR, "Not enough memory to store the key");
55 return -ENOMEM;
56 }
57
58 sess->auth_op = auth_xform->auth.op;
59
60 if (auth_xform->auth.iv.length != SNOW3G_IV_LENGTH) {
61 IPSEC_MB_LOG(ERR, "Wrong IV length");
62 return -EINVAL;
63 }
64 sess->auth_iv_offset = auth_xform->auth.iv.offset;
65
66
67 IMB_SNOW3G_INIT_KEY_SCHED(mgr, auth_xform->auth.key.data,
68 &sess->pKeySched_hash);
69 }
70
71 sess->op = mode;
72
73 return 0;
74}
75
76
77static uint8_t *
78snow3g_digest_appended_in_src(struct rte_crypto_op *op)
79{
80 unsigned int auth_size, cipher_size;
81
82 auth_size = (op->sym->auth.data.offset >> 3) +
83 (op->sym->auth.data.length >> 3);
84 cipher_size = (op->sym->cipher.data.offset >> 3) +
85 (op->sym->cipher.data.length >> 3);
86
87 if (auth_size < cipher_size)
88 return rte_pktmbuf_mtod_offset(op->sym->m_src,
89 uint8_t *, auth_size);
90
91 return NULL;
92}
93
94
95static uint8_t
96process_snow3g_cipher_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
97 struct snow3g_session *session,
98 uint8_t num_ops)
99{
100 uint32_t i;
101 uint8_t processed_ops = 0;
102 const void *src[SNOW3G_MAX_BURST] = {NULL};
103 void *dst[SNOW3G_MAX_BURST] = {NULL};
104 uint8_t *digest_appended[SNOW3G_MAX_BURST] = {NULL};
105 const void *iv[SNOW3G_MAX_BURST] = {NULL};
106 uint32_t num_bytes[SNOW3G_MAX_BURST] = {0};
107 uint32_t cipher_off, cipher_len;
108 int unencrypted_bytes = 0;
109
110 for (i = 0; i < num_ops; i++) {
111
112 cipher_off = ops[i]->sym->cipher.data.offset >> 3;
113 cipher_len = ops[i]->sym->cipher.data.length >> 3;
114 src[i] = rte_pktmbuf_mtod_offset(
115 ops[i]->sym->m_src, uint8_t *, cipher_off);
116
117
118 if (ops[i]->sym->m_dst &&
119 ops[i]->sym->m_src != ops[i]->sym->m_dst) {
120 dst[i] = rte_pktmbuf_mtod_offset(
121 ops[i]->sym->m_dst, uint8_t *, cipher_off);
122
123
124
125
126
127 if (session->op == IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT
128 || session->op == IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT)
129 unencrypted_bytes =
130 (ops[i]->sym->auth.data.offset >> 3) +
131 (ops[i]->sym->auth.data.length >> 3) +
132 (SNOW3G_DIGEST_LENGTH) -
133 cipher_off - cipher_len;
134 if (unencrypted_bytes > 0)
135 rte_memcpy(
136 rte_pktmbuf_mtod_offset(
137 ops[i]->sym->m_dst, uint8_t *,
138 cipher_off + cipher_len),
139 rte_pktmbuf_mtod_offset(
140 ops[i]->sym->m_src, uint8_t *,
141 cipher_off + cipher_len),
142 unencrypted_bytes);
143 } else
144 dst[i] = rte_pktmbuf_mtod_offset(ops[i]->sym->m_src,
145 uint8_t *, cipher_off);
146
147 iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
148 session->cipher_iv_offset);
149 num_bytes[i] = cipher_len;
150 processed_ops++;
151 }
152
153 IMB_SNOW3G_F8_N_BUFFER(qp->mb_mgr, &session->pKeySched_cipher, iv,
154 src, dst, num_bytes, processed_ops);
155
156
157 for (i = 0; i < num_ops; i++) {
158 if ((session->op == IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT ||
159 session->op == IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT) &&
160 ops[i]->sym->m_dst != NULL) {
161 digest_appended[i] =
162 snow3g_digest_appended_in_src(ops[i]);
163
164
165
166 if (digest_appended[i] != NULL)
167 memset(digest_appended[i],
168 0, SNOW3G_DIGEST_LENGTH);
169 }
170 }
171 return processed_ops;
172}
173
174
175static uint8_t
176process_snow3g_cipher_op_bit(struct ipsec_mb_qp *qp,
177 struct rte_crypto_op *op,
178 struct snow3g_session *session)
179{
180 uint8_t *src, *dst;
181 uint8_t *iv;
182 uint32_t length_in_bits, offset_in_bits;
183 int unencrypted_bytes = 0;
184
185 offset_in_bits = op->sym->cipher.data.offset;
186 src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
187 if (op->sym->m_dst == NULL) {
188 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
189 IPSEC_MB_LOG(ERR, "bit-level in-place not supported\n");
190 return 0;
191 }
192 length_in_bits = op->sym->cipher.data.length;
193 dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
194
195
196
197
198 if (session->op == IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT ||
199 session->op == IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT)
200 unencrypted_bytes =
201 (op->sym->auth.data.offset >> 3) +
202 (op->sym->auth.data.length >> 3) +
203 (SNOW3G_DIGEST_LENGTH) -
204 (offset_in_bits >> 3) -
205 (length_in_bits >> 3);
206 if (unencrypted_bytes > 0)
207 rte_memcpy(
208 rte_pktmbuf_mtod_offset(
209 op->sym->m_dst, uint8_t *,
210 (length_in_bits >> 3)),
211 rte_pktmbuf_mtod_offset(
212 op->sym->m_src, uint8_t *,
213 (length_in_bits >> 3)),
214 unencrypted_bytes);
215
216 iv = rte_crypto_op_ctod_offset(op, uint8_t *,
217 session->cipher_iv_offset);
218
219 IMB_SNOW3G_F8_1_BUFFER_BIT(qp->mb_mgr, &session->pKeySched_cipher, iv,
220 src, dst, length_in_bits, offset_in_bits);
221
222 return 1;
223}
224
225
226static int
227process_snow3g_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops,
228 struct snow3g_session *session,
229 uint8_t num_ops)
230{
231 uint32_t i;
232 uint8_t processed_ops = 0;
233 uint8_t *src, *dst;
234 uint32_t length_in_bits;
235 uint8_t *iv;
236 uint8_t digest_appended = 0;
237 struct snow3g_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
238
239 for (i = 0; i < num_ops; i++) {
240
241 if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
242 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
243 IPSEC_MB_LOG(ERR, "Offset");
244 break;
245 }
246
247 dst = NULL;
248
249 length_in_bits = ops[i]->sym->auth.data.length;
250
251 src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
252 (ops[i]->sym->auth.data.offset >> 3);
253 iv = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
254 session->auth_iv_offset);
255
256 if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
257 dst = qp_data->temp_digest;
258
259 if ((session->op ==
260 IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN ||
261 session->op ==
262 IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY) &&
263 ops[i]->sym->m_dst != NULL)
264 src = rte_pktmbuf_mtod_offset(
265 ops[i]->sym->m_dst, uint8_t *,
266 ops[i]->sym->auth.data.offset >> 3);
267
268 IMB_SNOW3G_F9_1_BUFFER(qp->mb_mgr,
269 &session->pKeySched_hash,
270 iv, src, length_in_bits, dst);
271
272 if (memcmp(dst, ops[i]->sym->auth.digest.data,
273 SNOW3G_DIGEST_LENGTH) != 0)
274 ops[i]->status =
275 RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
276 } else {
277 if (session->op ==
278 IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT ||
279 session->op ==
280 IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT)
281 dst = snow3g_digest_appended_in_src(ops[i]);
282
283 if (dst != NULL)
284 digest_appended = 1;
285 else
286 dst = ops[i]->sym->auth.digest.data;
287
288 IMB_SNOW3G_F9_1_BUFFER(qp->mb_mgr,
289 &session->pKeySched_hash,
290 iv, src, length_in_bits, dst);
291
292
293 if (digest_appended)
294 rte_memcpy(ops[i]->sym->auth.digest.data,
295 dst, SNOW3G_DIGEST_LENGTH);
296 }
297 processed_ops++;
298 }
299
300 return processed_ops;
301}
302
303
304static int
305process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
306 struct ipsec_mb_qp *qp, uint8_t num_ops)
307{
308 uint32_t i;
309 uint32_t processed_ops;
310
311#ifdef RTE_LIBRTE_PMD_SNOW3G_DEBUG
312 for (i = 0; i < num_ops; i++) {
313 if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
314 (ops[i]->sym->m_dst != NULL &&
315 !rte_pktmbuf_is_contiguous(
316 ops[i]->sym->m_dst))) {
317 IPSEC_MB_LOG(ERR,
318 "PMD supports only contiguous mbufs, "
319 "op (%p) provides noncontiguous mbuf as "
320 "source/destination buffer.\n", ops[i]);
321 ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
322 return 0;
323 }
324 }
325#endif
326
327 switch (session->op) {
328 case IPSEC_MB_OP_ENCRYPT_ONLY:
329 case IPSEC_MB_OP_DECRYPT_ONLY:
330 processed_ops = process_snow3g_cipher_op(qp, ops,
331 session, num_ops);
332 break;
333 case IPSEC_MB_OP_HASH_GEN_ONLY:
334 case IPSEC_MB_OP_HASH_VERIFY_ONLY:
335 processed_ops = process_snow3g_hash_op(qp, ops, session,
336 num_ops);
337 break;
338 case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
339 case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
340 processed_ops = process_snow3g_cipher_op(qp, ops, session,
341 num_ops);
342 process_snow3g_hash_op(qp, ops, session, processed_ops);
343 break;
344 case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
345 case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
346 processed_ops = process_snow3g_hash_op(qp, ops, session,
347 num_ops);
348 process_snow3g_cipher_op(qp, ops, session, processed_ops);
349 break;
350 default:
351
352 processed_ops = 0;
353 }
354
355 for (i = 0; i < num_ops; i++) {
356
357
358
359
360 if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
361 ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
362
363 if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
364 memset(session, 0, sizeof(struct snow3g_session));
365 memset(ops[i]->sym->session, 0,
366 rte_cryptodev_sym_get_existing_header_session_size(
367 ops[i]->sym->session));
368 rte_mempool_put(qp->sess_mp_priv, session);
369 rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
370 ops[i]->sym->session = NULL;
371 }
372 }
373 return processed_ops;
374}
375
376
377static int
378process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session,
379 struct ipsec_mb_qp *qp, uint16_t *accumulated_enqueued_ops)
380{
381 uint32_t enqueued_op, processed_op;
382
383 switch (session->op) {
384 case IPSEC_MB_OP_ENCRYPT_ONLY:
385 case IPSEC_MB_OP_DECRYPT_ONLY:
386
387 processed_op = process_snow3g_cipher_op_bit(qp, op,
388 session);
389 break;
390 case IPSEC_MB_OP_HASH_GEN_ONLY:
391 case IPSEC_MB_OP_HASH_VERIFY_ONLY:
392 processed_op = process_snow3g_hash_op(qp, &op, session, 1);
393 break;
394 case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
395 case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
396 processed_op = process_snow3g_cipher_op_bit(qp, op, session);
397 if (processed_op == 1)
398 process_snow3g_hash_op(qp, &op, session, 1);
399 break;
400 case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
401 case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
402 processed_op = process_snow3g_hash_op(qp, &op, session, 1);
403 if (processed_op == 1)
404 process_snow3g_cipher_op_bit(qp, op, session);
405 break;
406 default:
407
408 processed_op = 0;
409 }
410
411
412
413
414
415 if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
416 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
417
418
419 if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
420 memset(op->sym->session, 0, sizeof(struct snow3g_session));
421 rte_cryptodev_sym_session_free(op->sym->session);
422 op->sym->session = NULL;
423 }
424
425 enqueued_op = rte_ring_enqueue_burst(qp->ingress_queue,
426 (void **)&op, processed_op, NULL);
427 qp->stats.enqueued_count += enqueued_op;
428 *accumulated_enqueued_ops += enqueued_op;
429
430 return enqueued_op;
431}
432
433static uint16_t
434snow3g_pmd_dequeue_burst(void *queue_pair,
435 struct rte_crypto_op **ops, uint16_t nb_ops)
436{
437 struct ipsec_mb_qp *qp = queue_pair;
438 struct rte_crypto_op *c_ops[SNOW3G_MAX_BURST];
439 struct rte_crypto_op *curr_c_op;
440
441 struct snow3g_session *prev_sess = NULL, *curr_sess = NULL;
442 uint32_t i;
443 uint8_t burst_size = 0;
444 uint16_t enqueued_ops = 0;
445 uint8_t processed_ops;
446 uint32_t nb_dequeued;
447
448 nb_dequeued = rte_ring_dequeue_burst(qp->ingress_queue,
449 (void **)ops, nb_ops, NULL);
450
451 for (i = 0; i < nb_dequeued; i++) {
452 curr_c_op = ops[i];
453
454
455 curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
456
457 curr_sess = ipsec_mb_get_session_private(qp, curr_c_op);
458 if (unlikely(curr_sess == NULL ||
459 curr_sess->op == IPSEC_MB_OP_NOT_SUPPORTED)) {
460 curr_c_op->status =
461 RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
462 break;
463 }
464
465
466
467
468 if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
469 || ((curr_c_op->sym->cipher.data.offset
470 % BYTE_LEN) != 0)) {
471
472 if (prev_sess != NULL) {
473 processed_ops = process_ops(c_ops, prev_sess,
474 qp, burst_size);
475 if (processed_ops < burst_size) {
476 burst_size = 0;
477 break;
478 }
479
480 burst_size = 0;
481 prev_sess = NULL;
482 }
483
484 processed_ops = process_op_bit(curr_c_op, curr_sess,
485 qp, &enqueued_ops);
486 if (processed_ops != 1)
487 break;
488
489 continue;
490 }
491
492
493 if (prev_sess == NULL) {
494 prev_sess = curr_sess;
495 c_ops[burst_size++] = curr_c_op;
496 } else if (curr_sess == prev_sess) {
497 c_ops[burst_size++] = curr_c_op;
498
499
500
501
502 if (burst_size == SNOW3G_MAX_BURST) {
503 processed_ops = process_ops(c_ops, prev_sess,
504 qp, burst_size);
505 if (processed_ops < burst_size) {
506 burst_size = 0;
507 break;
508 }
509
510 burst_size = 0;
511 prev_sess = NULL;
512 }
513 } else {
514
515
516
517
518 processed_ops = process_ops(c_ops, prev_sess,
519 qp, burst_size);
520 if (processed_ops < burst_size) {
521 burst_size = 0;
522 break;
523 }
524
525 burst_size = 0;
526 prev_sess = curr_sess;
527
528 c_ops[burst_size++] = curr_c_op;
529 }
530 }
531
532 if (burst_size != 0) {
533
534 processed_ops = process_ops(c_ops, prev_sess,
535 qp, burst_size);
536 }
537
538 qp->stats.dequeued_count += i;
539 return i;
540}
541
542struct rte_cryptodev_ops snow3g_pmd_ops = {
543 .dev_configure = ipsec_mb_config,
544 .dev_start = ipsec_mb_start,
545 .dev_stop = ipsec_mb_stop,
546 .dev_close = ipsec_mb_close,
547
548 .stats_get = ipsec_mb_stats_get,
549 .stats_reset = ipsec_mb_stats_reset,
550
551 .dev_infos_get = ipsec_mb_info_get,
552
553 .queue_pair_setup = ipsec_mb_qp_setup,
554 .queue_pair_release = ipsec_mb_qp_release,
555
556 .sym_session_get_size = ipsec_mb_sym_session_get_size,
557 .sym_session_configure = ipsec_mb_sym_session_configure,
558 .sym_session_clear = ipsec_mb_sym_session_clear
559};
560
561struct rte_cryptodev_ops *rte_snow3g_pmd_ops = &snow3g_pmd_ops;
562
563static int
564snow3g_probe(struct rte_vdev_device *vdev)
565{
566 return ipsec_mb_create(vdev, IPSEC_MB_PMD_TYPE_SNOW3G);
567}
568
569static struct rte_vdev_driver cryptodev_snow3g_pmd_drv = {
570 .probe = snow3g_probe,
571 .remove = ipsec_mb_remove
572};
573
574static struct cryptodev_driver snow3g_crypto_drv;
575
576RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd_drv);
577RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd);
578RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SNOW3G_PMD,
579 "max_nb_queue_pairs=<int> socket_id=<int>");
580RTE_PMD_REGISTER_CRYPTO_DRIVER(snow3g_crypto_drv,
581 cryptodev_snow3g_pmd_drv.driver,
582 pmd_driver_id_snow3g);
583
584
585RTE_INIT(ipsec_mb_register_snow3g)
586{
587 struct ipsec_mb_internals *snow3g_data
588 = &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_SNOW3G];
589
590 snow3g_data->caps = snow3g_capabilities;
591 snow3g_data->dequeue_burst = snow3g_pmd_dequeue_burst;
592 snow3g_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
593 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
594 RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
595 RTE_CRYPTODEV_FF_SYM_SESSIONLESS |
596 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
597 RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
598 snow3g_data->internals_priv_size = 0;
599 snow3g_data->ops = &snow3g_pmd_ops;
600 snow3g_data->qp_priv_size = sizeof(struct snow3g_qp_data);
601 snow3g_data->session_configure = snow3g_session_configure;
602 snow3g_data->session_priv_size = sizeof(struct snow3g_session);
603}
604