1
2
3
4
5#include "pmd_aesni_mb_priv.h"
6
7struct aesni_mb_op_buf_data {
8 struct rte_mbuf *m;
9 uint32_t offset;
10};
11
12
13
14
15
16
17
18
19
20
21
22
23static void
24calculate_auth_precomputes(hash_one_block_t one_block_hash,
25 uint8_t *ipad, uint8_t *opad,
26 const uint8_t *hkey, uint16_t hkey_len,
27 uint16_t blocksize)
28{
29 uint32_t i, length;
30
31 uint8_t ipad_buf[blocksize] __rte_aligned(16);
32 uint8_t opad_buf[blocksize] __rte_aligned(16);
33
34
35 memset(ipad_buf, HMAC_IPAD_VALUE, blocksize);
36 memset(opad_buf, HMAC_OPAD_VALUE, blocksize);
37
38
39 length = hkey_len > blocksize ? blocksize : hkey_len;
40
41 for (i = 0; i < length; i++) {
42 ipad_buf[i] ^= hkey[i];
43 opad_buf[i] ^= hkey[i];
44 }
45
46
47 (*one_block_hash)(ipad_buf, ipad);
48 (*one_block_hash)(opad_buf, opad);
49
50
51 memset(ipad_buf, 0, blocksize);
52 memset(opad_buf, 0, blocksize);
53}
54
55static inline int
56is_aead_algo(IMB_HASH_ALG hash_alg, IMB_CIPHER_MODE cipher_mode)
57{
58 return (hash_alg == IMB_AUTH_CHACHA20_POLY1305 ||
59 hash_alg == IMB_AUTH_AES_CCM ||
60 (hash_alg == IMB_AUTH_AES_GMAC &&
61 cipher_mode == IMB_CIPHER_GCM));
62}
63
64
65static int
66aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr,
67 struct aesni_mb_session *sess,
68 const struct rte_crypto_sym_xform *xform)
69{
70 hash_one_block_t hash_oneblock_fn = NULL;
71 unsigned int key_larger_block_size = 0;
72 uint8_t hashed_key[HMAC_MAX_BLOCK_SIZE] = { 0 };
73 uint32_t auth_precompute = 1;
74
75 if (xform == NULL) {
76 sess->auth.algo = IMB_AUTH_NULL;
77 return 0;
78 }
79
80 if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
81 IPSEC_MB_LOG(ERR, "Crypto xform struct not of type auth");
82 return -1;
83 }
84
85
86 sess->auth_iv.offset = xform->auth.iv.offset;
87 sess->auth_iv.length = xform->auth.iv.length;
88
89
90 sess->auth.req_digest_len = xform->auth.digest_length;
91
92
93 sess->auth.operation = xform->auth.op;
94
95
96 if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL) {
97 sess->auth.algo = IMB_AUTH_NULL;
98 sess->auth.gen_digest_len = 0;
99 return 0;
100 }
101
102 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) {
103 sess->auth.algo = IMB_AUTH_AES_XCBC;
104
105 uint16_t xcbc_mac_digest_len =
106 get_truncated_digest_byte_length(IMB_AUTH_AES_XCBC);
107 if (sess->auth.req_digest_len != xcbc_mac_digest_len) {
108 IPSEC_MB_LOG(ERR, "Invalid digest size\n");
109 return -EINVAL;
110 }
111 sess->auth.gen_digest_len = sess->auth.req_digest_len;
112
113 IMB_AES_XCBC_KEYEXP(mb_mgr, xform->auth.key.data,
114 sess->auth.xcbc.k1_expanded,
115 sess->auth.xcbc.k2, sess->auth.xcbc.k3);
116 return 0;
117 }
118
119 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) {
120 uint32_t dust[4*15];
121
122 sess->auth.algo = IMB_AUTH_AES_CMAC;
123
124 uint16_t cmac_digest_len =
125 get_digest_byte_length(IMB_AUTH_AES_CMAC);
126
127 if (sess->auth.req_digest_len > cmac_digest_len) {
128 IPSEC_MB_LOG(ERR, "Invalid digest size\n");
129 return -EINVAL;
130 }
131
132
133
134
135
136
137
138
139 if (sess->auth.req_digest_len < 4)
140 sess->auth.gen_digest_len = cmac_digest_len;
141 else
142 sess->auth.gen_digest_len = sess->auth.req_digest_len;
143
144 IMB_AES_KEYEXP_128(mb_mgr, xform->auth.key.data,
145 sess->auth.cmac.expkey, dust);
146 IMB_AES_CMAC_SUBKEY_GEN_128(mb_mgr, sess->auth.cmac.expkey,
147 sess->auth.cmac.skey1, sess->auth.cmac.skey2);
148 return 0;
149 }
150
151 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
152 if (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) {
153 sess->cipher.direction = IMB_DIR_ENCRYPT;
154 sess->chain_order = IMB_ORDER_CIPHER_HASH;
155 } else
156 sess->cipher.direction = IMB_DIR_DECRYPT;
157
158 sess->auth.algo = IMB_AUTH_AES_GMAC;
159 if (sess->auth.req_digest_len >
160 get_digest_byte_length(IMB_AUTH_AES_GMAC)) {
161 IPSEC_MB_LOG(ERR, "Invalid digest size\n");
162 return -EINVAL;
163 }
164 sess->auth.gen_digest_len = sess->auth.req_digest_len;
165 sess->iv.length = xform->auth.iv.length;
166 sess->iv.offset = xform->auth.iv.offset;
167
168 switch (xform->auth.key.length) {
169 case IMB_KEY_128_BYTES:
170 IMB_AES128_GCM_PRE(mb_mgr, xform->auth.key.data,
171 &sess->cipher.gcm_key);
172 sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES;
173 break;
174 case IMB_KEY_192_BYTES:
175 IMB_AES192_GCM_PRE(mb_mgr, xform->auth.key.data,
176 &sess->cipher.gcm_key);
177 sess->cipher.key_length_in_bytes = IMB_KEY_192_BYTES;
178 break;
179 case IMB_KEY_256_BYTES:
180 IMB_AES256_GCM_PRE(mb_mgr, xform->auth.key.data,
181 &sess->cipher.gcm_key);
182 sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES;
183 break;
184 default:
185 IPSEC_MB_LOG(ERR, "Invalid authentication key length\n");
186 return -EINVAL;
187 }
188
189 return 0;
190 }
191
192 if (xform->auth.algo == RTE_CRYPTO_AUTH_ZUC_EIA3) {
193 if (xform->auth.key.length == 16) {
194 sess->auth.algo = IMB_AUTH_ZUC_EIA3_BITLEN;
195
196 if (sess->auth.req_digest_len != 4) {
197 IPSEC_MB_LOG(ERR, "Invalid digest size\n");
198 return -EINVAL;
199 }
200 } else if (xform->auth.key.length == 32) {
201 sess->auth.algo = IMB_AUTH_ZUC256_EIA3_BITLEN;
202#if IMB_VERSION(1, 2, 0) > IMB_VERSION_NUM
203 if (sess->auth.req_digest_len != 4 &&
204 sess->auth.req_digest_len != 8 &&
205 sess->auth.req_digest_len != 16) {
206#else
207 if (sess->auth.req_digest_len != 4) {
208#endif
209 IPSEC_MB_LOG(ERR, "Invalid digest size\n");
210 return -EINVAL;
211 }
212 } else {
213 IPSEC_MB_LOG(ERR, "Invalid authentication key length\n");
214 return -EINVAL;
215 }
216
217 sess->auth.gen_digest_len = sess->auth.req_digest_len;
218
219 memcpy(sess->auth.zuc_auth_key, xform->auth.key.data,
220 xform->auth.key.length);
221 return 0;
222 } else if (xform->auth.algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
223 sess->auth.algo = IMB_AUTH_SNOW3G_UIA2_BITLEN;
224 uint16_t snow3g_uia2_digest_len =
225 get_truncated_digest_byte_length(
226 IMB_AUTH_SNOW3G_UIA2_BITLEN);
227 if (sess->auth.req_digest_len != snow3g_uia2_digest_len) {
228 IPSEC_MB_LOG(ERR, "Invalid digest size\n");
229 return -EINVAL;
230 }
231 sess->auth.gen_digest_len = sess->auth.req_digest_len;
232
233 IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, xform->auth.key.data,
234 &sess->auth.pKeySched_snow3g_auth);
235 return 0;
236 } else if (xform->auth.algo == RTE_CRYPTO_AUTH_KASUMI_F9) {
237 sess->auth.algo = IMB_AUTH_KASUMI_UIA1;
238 uint16_t kasumi_f9_digest_len =
239 get_truncated_digest_byte_length(IMB_AUTH_KASUMI_UIA1);
240 if (sess->auth.req_digest_len != kasumi_f9_digest_len) {
241 IPSEC_MB_LOG(ERR, "Invalid digest size\n");
242 return -EINVAL;
243 }
244 sess->auth.gen_digest_len = sess->auth.req_digest_len;
245
246 IMB_KASUMI_INIT_F9_KEY_SCHED(mb_mgr, xform->auth.key.data,
247 &sess->auth.pKeySched_kasumi_auth);
248 return 0;
249 }
250
251 switch (xform->auth.algo) {
252 case RTE_CRYPTO_AUTH_MD5_HMAC:
253 sess->auth.algo = IMB_AUTH_MD5;
254 hash_oneblock_fn = mb_mgr->md5_one_block;
255 break;
256 case RTE_CRYPTO_AUTH_SHA1_HMAC:
257 sess->auth.algo = IMB_AUTH_HMAC_SHA_1;
258 hash_oneblock_fn = mb_mgr->sha1_one_block;
259 if (xform->auth.key.length > get_auth_algo_blocksize(
260 IMB_AUTH_HMAC_SHA_1)) {
261 IMB_SHA1(mb_mgr,
262 xform->auth.key.data,
263 xform->auth.key.length,
264 hashed_key);
265 key_larger_block_size = 1;
266 }
267 break;
268 case RTE_CRYPTO_AUTH_SHA1:
269 sess->auth.algo = IMB_AUTH_SHA_1;
270 auth_precompute = 0;
271 break;
272 case RTE_CRYPTO_AUTH_SHA224_HMAC:
273 sess->auth.algo = IMB_AUTH_HMAC_SHA_224;
274 hash_oneblock_fn = mb_mgr->sha224_one_block;
275 if (xform->auth.key.length > get_auth_algo_blocksize(
276 IMB_AUTH_HMAC_SHA_224)) {
277 IMB_SHA224(mb_mgr,
278 xform->auth.key.data,
279 xform->auth.key.length,
280 hashed_key);
281 key_larger_block_size = 1;
282 }
283 break;
284 case RTE_CRYPTO_AUTH_SHA224:
285 sess->auth.algo = IMB_AUTH_SHA_224;
286 auth_precompute = 0;
287 break;
288 case RTE_CRYPTO_AUTH_SHA256_HMAC:
289 sess->auth.algo = IMB_AUTH_HMAC_SHA_256;
290 hash_oneblock_fn = mb_mgr->sha256_one_block;
291 if (xform->auth.key.length > get_auth_algo_blocksize(
292 IMB_AUTH_HMAC_SHA_256)) {
293 IMB_SHA256(mb_mgr,
294 xform->auth.key.data,
295 xform->auth.key.length,
296 hashed_key);
297 key_larger_block_size = 1;
298 }
299 break;
300 case RTE_CRYPTO_AUTH_SHA256:
301 sess->auth.algo = IMB_AUTH_SHA_256;
302 auth_precompute = 0;
303 break;
304 case RTE_CRYPTO_AUTH_SHA384_HMAC:
305 sess->auth.algo = IMB_AUTH_HMAC_SHA_384;
306 hash_oneblock_fn = mb_mgr->sha384_one_block;
307 if (xform->auth.key.length > get_auth_algo_blocksize(
308 IMB_AUTH_HMAC_SHA_384)) {
309 IMB_SHA384(mb_mgr,
310 xform->auth.key.data,
311 xform->auth.key.length,
312 hashed_key);
313 key_larger_block_size = 1;
314 }
315 break;
316 case RTE_CRYPTO_AUTH_SHA384:
317 sess->auth.algo = IMB_AUTH_SHA_384;
318 auth_precompute = 0;
319 break;
320 case RTE_CRYPTO_AUTH_SHA512_HMAC:
321 sess->auth.algo = IMB_AUTH_HMAC_SHA_512;
322 hash_oneblock_fn = mb_mgr->sha512_one_block;
323 if (xform->auth.key.length > get_auth_algo_blocksize(
324 IMB_AUTH_HMAC_SHA_512)) {
325 IMB_SHA512(mb_mgr,
326 xform->auth.key.data,
327 xform->auth.key.length,
328 hashed_key);
329 key_larger_block_size = 1;
330 }
331 break;
332 case RTE_CRYPTO_AUTH_SHA512:
333 sess->auth.algo = IMB_AUTH_SHA_512;
334 auth_precompute = 0;
335 break;
336 default:
337 IPSEC_MB_LOG(ERR,
338 "Unsupported authentication algorithm selection");
339 return -ENOTSUP;
340 }
341 uint16_t trunc_digest_size =
342 get_truncated_digest_byte_length(sess->auth.algo);
343 uint16_t full_digest_size =
344 get_digest_byte_length(sess->auth.algo);
345
346 if (sess->auth.req_digest_len > full_digest_size ||
347 sess->auth.req_digest_len == 0) {
348 IPSEC_MB_LOG(ERR, "Invalid digest size\n");
349 return -EINVAL;
350 }
351
352 if (sess->auth.req_digest_len != trunc_digest_size &&
353 sess->auth.req_digest_len != full_digest_size)
354 sess->auth.gen_digest_len = full_digest_size;
355 else
356 sess->auth.gen_digest_len = sess->auth.req_digest_len;
357
358
359 if (auth_precompute == 0)
360 return 0;
361
362
363 if (key_larger_block_size) {
364 calculate_auth_precomputes(hash_oneblock_fn,
365 sess->auth.pads.inner, sess->auth.pads.outer,
366 hashed_key,
367 xform->auth.key.length,
368 get_auth_algo_blocksize(sess->auth.algo));
369 } else {
370 calculate_auth_precomputes(hash_oneblock_fn,
371 sess->auth.pads.inner, sess->auth.pads.outer,
372 xform->auth.key.data,
373 xform->auth.key.length,
374 get_auth_algo_blocksize(sess->auth.algo));
375 }
376
377 return 0;
378}
379
380
381static int
382aesni_mb_set_session_cipher_parameters(const IMB_MGR *mb_mgr,
383 struct aesni_mb_session *sess,
384 const struct rte_crypto_sym_xform *xform)
385{
386 uint8_t is_aes = 0;
387 uint8_t is_3DES = 0;
388 uint8_t is_docsis = 0;
389 uint8_t is_zuc = 0;
390 uint8_t is_snow3g = 0;
391 uint8_t is_kasumi = 0;
392
393 if (xform == NULL) {
394 sess->cipher.mode = IMB_CIPHER_NULL;
395 return 0;
396 }
397
398 if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
399 IPSEC_MB_LOG(ERR, "Crypto xform struct not of type cipher");
400 return -EINVAL;
401 }
402
403
404 switch (xform->cipher.op) {
405 case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
406 sess->cipher.direction = IMB_DIR_ENCRYPT;
407 break;
408 case RTE_CRYPTO_CIPHER_OP_DECRYPT:
409 sess->cipher.direction = IMB_DIR_DECRYPT;
410 break;
411 default:
412 IPSEC_MB_LOG(ERR, "Invalid cipher operation parameter");
413 return -EINVAL;
414 }
415
416
417 switch (xform->cipher.algo) {
418 case RTE_CRYPTO_CIPHER_AES_CBC:
419 sess->cipher.mode = IMB_CIPHER_CBC;
420 is_aes = 1;
421 break;
422 case RTE_CRYPTO_CIPHER_AES_CTR:
423 sess->cipher.mode = IMB_CIPHER_CNTR;
424 is_aes = 1;
425 break;
426 case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
427 sess->cipher.mode = IMB_CIPHER_DOCSIS_SEC_BPI;
428 is_docsis = 1;
429 break;
430 case RTE_CRYPTO_CIPHER_DES_CBC:
431 sess->cipher.mode = IMB_CIPHER_DES;
432 break;
433 case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
434 sess->cipher.mode = IMB_CIPHER_DOCSIS_DES;
435 break;
436 case RTE_CRYPTO_CIPHER_3DES_CBC:
437 sess->cipher.mode = IMB_CIPHER_DES3;
438 is_3DES = 1;
439 break;
440 case RTE_CRYPTO_CIPHER_AES_ECB:
441 sess->cipher.mode = IMB_CIPHER_ECB;
442 is_aes = 1;
443 break;
444 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
445 sess->cipher.mode = IMB_CIPHER_ZUC_EEA3;
446 is_zuc = 1;
447 break;
448 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
449 sess->cipher.mode = IMB_CIPHER_SNOW3G_UEA2_BITLEN;
450 is_snow3g = 1;
451 break;
452 case RTE_CRYPTO_CIPHER_KASUMI_F8:
453 sess->cipher.mode = IMB_CIPHER_KASUMI_UEA1_BITLEN;
454 is_kasumi = 1;
455 break;
456 case RTE_CRYPTO_CIPHER_NULL:
457 sess->cipher.mode = IMB_CIPHER_NULL;
458 sess->cipher.key_length_in_bytes = 0;
459 sess->iv.offset = xform->cipher.iv.offset;
460 sess->iv.length = xform->cipher.iv.length;
461 return 0;
462 default:
463 IPSEC_MB_LOG(ERR, "Unsupported cipher mode parameter");
464 return -ENOTSUP;
465 }
466
467
468 sess->iv.offset = xform->cipher.iv.offset;
469 sess->iv.length = xform->cipher.iv.length;
470
471
472 if (is_aes) {
473 switch (xform->cipher.key.length) {
474 case IMB_KEY_128_BYTES:
475 sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES;
476 IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data,
477 sess->cipher.expanded_aes_keys.encode,
478 sess->cipher.expanded_aes_keys.decode);
479 break;
480 case IMB_KEY_192_BYTES:
481 sess->cipher.key_length_in_bytes = IMB_KEY_192_BYTES;
482 IMB_AES_KEYEXP_192(mb_mgr, xform->cipher.key.data,
483 sess->cipher.expanded_aes_keys.encode,
484 sess->cipher.expanded_aes_keys.decode);
485 break;
486 case IMB_KEY_256_BYTES:
487 sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES;
488 IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data,
489 sess->cipher.expanded_aes_keys.encode,
490 sess->cipher.expanded_aes_keys.decode);
491 break;
492 default:
493 IPSEC_MB_LOG(ERR, "Invalid cipher key length");
494 return -EINVAL;
495 }
496 } else if (is_docsis) {
497 switch (xform->cipher.key.length) {
498 case IMB_KEY_128_BYTES:
499 sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES;
500 IMB_AES_KEYEXP_128(mb_mgr, xform->cipher.key.data,
501 sess->cipher.expanded_aes_keys.encode,
502 sess->cipher.expanded_aes_keys.decode);
503 break;
504 case IMB_KEY_256_BYTES:
505 sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES;
506 IMB_AES_KEYEXP_256(mb_mgr, xform->cipher.key.data,
507 sess->cipher.expanded_aes_keys.encode,
508 sess->cipher.expanded_aes_keys.decode);
509 break;
510 default:
511 IPSEC_MB_LOG(ERR, "Invalid cipher key length");
512 return -EINVAL;
513 }
514 } else if (is_3DES) {
515 uint64_t *keys[3] = {sess->cipher.exp_3des_keys.key[0],
516 sess->cipher.exp_3des_keys.key[1],
517 sess->cipher.exp_3des_keys.key[2]};
518
519 switch (xform->cipher.key.length) {
520 case 24:
521 IMB_DES_KEYSCHED(mb_mgr, keys[0],
522 xform->cipher.key.data);
523 IMB_DES_KEYSCHED(mb_mgr, keys[1],
524 xform->cipher.key.data + 8);
525 IMB_DES_KEYSCHED(mb_mgr, keys[2],
526 xform->cipher.key.data + 16);
527
528
529 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
530 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
531 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[2];
532 break;
533 case 16:
534 IMB_DES_KEYSCHED(mb_mgr, keys[0],
535 xform->cipher.key.data);
536 IMB_DES_KEYSCHED(mb_mgr, keys[1],
537 xform->cipher.key.data + 8);
538
539 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
540 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
541 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
542 break;
543 case 8:
544 IMB_DES_KEYSCHED(mb_mgr, keys[0],
545 xform->cipher.key.data);
546
547
548 sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
549 sess->cipher.exp_3des_keys.ks_ptr[1] = keys[0];
550 sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
551 break;
552 default:
553 IPSEC_MB_LOG(ERR, "Invalid cipher key length");
554 return -EINVAL;
555 }
556
557 sess->cipher.key_length_in_bytes = 24;
558 } else if (is_zuc) {
559 if (xform->cipher.key.length != 16 &&
560 xform->cipher.key.length != 32) {
561 IPSEC_MB_LOG(ERR, "Invalid cipher key length");
562 return -EINVAL;
563 }
564 sess->cipher.key_length_in_bytes = xform->cipher.key.length;
565 memcpy(sess->cipher.zuc_cipher_key, xform->cipher.key.data,
566 xform->cipher.key.length);
567 } else if (is_snow3g) {
568 if (xform->cipher.key.length != 16) {
569 IPSEC_MB_LOG(ERR, "Invalid cipher key length");
570 return -EINVAL;
571 }
572 sess->cipher.key_length_in_bytes = 16;
573 IMB_SNOW3G_INIT_KEY_SCHED(mb_mgr, xform->cipher.key.data,
574 &sess->cipher.pKeySched_snow3g_cipher);
575 } else if (is_kasumi) {
576 if (xform->cipher.key.length != 16) {
577 IPSEC_MB_LOG(ERR, "Invalid cipher key length");
578 return -EINVAL;
579 }
580 sess->cipher.key_length_in_bytes = 16;
581 IMB_KASUMI_INIT_F8_KEY_SCHED(mb_mgr, xform->cipher.key.data,
582 &sess->cipher.pKeySched_kasumi_cipher);
583 } else {
584 if (xform->cipher.key.length != 8) {
585 IPSEC_MB_LOG(ERR, "Invalid cipher key length");
586 return -EINVAL;
587 }
588 sess->cipher.key_length_in_bytes = 8;
589
590 IMB_DES_KEYSCHED(mb_mgr,
591 (uint64_t *)sess->cipher.expanded_aes_keys.encode,
592 xform->cipher.key.data);
593 IMB_DES_KEYSCHED(mb_mgr,
594 (uint64_t *)sess->cipher.expanded_aes_keys.decode,
595 xform->cipher.key.data);
596 }
597
598 return 0;
599}
600
601static int
602aesni_mb_set_session_aead_parameters(const IMB_MGR *mb_mgr,
603 struct aesni_mb_session *sess,
604 const struct rte_crypto_sym_xform *xform)
605{
606 switch (xform->aead.op) {
607 case RTE_CRYPTO_AEAD_OP_ENCRYPT:
608 sess->cipher.direction = IMB_DIR_ENCRYPT;
609 sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
610 break;
611 case RTE_CRYPTO_AEAD_OP_DECRYPT:
612 sess->cipher.direction = IMB_DIR_DECRYPT;
613 sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
614 break;
615 default:
616 IPSEC_MB_LOG(ERR, "Invalid aead operation parameter");
617 return -EINVAL;
618 }
619
620
621 sess->iv.offset = xform->aead.iv.offset;
622 sess->iv.length = xform->aead.iv.length;
623
624
625 sess->auth.req_digest_len = xform->aead.digest_length;
626 sess->auth.gen_digest_len = sess->auth.req_digest_len;
627
628 switch (xform->aead.algo) {
629 case RTE_CRYPTO_AEAD_AES_CCM:
630 sess->cipher.mode = IMB_CIPHER_CCM;
631 sess->auth.algo = IMB_AUTH_AES_CCM;
632
633
634 switch (xform->aead.key.length) {
635 case IMB_KEY_128_BYTES:
636 sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES;
637 IMB_AES_KEYEXP_128(mb_mgr, xform->aead.key.data,
638 sess->cipher.expanded_aes_keys.encode,
639 sess->cipher.expanded_aes_keys.decode);
640 break;
641 case IMB_KEY_256_BYTES:
642 sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES;
643 IMB_AES_KEYEXP_256(mb_mgr, xform->aead.key.data,
644 sess->cipher.expanded_aes_keys.encode,
645 sess->cipher.expanded_aes_keys.decode);
646 break;
647 default:
648 IPSEC_MB_LOG(ERR, "Invalid cipher key length");
649 return -EINVAL;
650 }
651
652
653 if (sess->auth.req_digest_len < AES_CCM_DIGEST_MIN_LEN ||
654 sess->auth.req_digest_len > AES_CCM_DIGEST_MAX_LEN ||
655 (sess->auth.req_digest_len & 1) == 1) {
656 IPSEC_MB_LOG(ERR, "Invalid digest size\n");
657 return -EINVAL;
658 }
659 break;
660
661 case RTE_CRYPTO_AEAD_AES_GCM:
662 sess->cipher.mode = IMB_CIPHER_GCM;
663 sess->auth.algo = IMB_AUTH_AES_GMAC;
664
665 switch (xform->aead.key.length) {
666 case IMB_KEY_128_BYTES:
667 sess->cipher.key_length_in_bytes = IMB_KEY_128_BYTES;
668 IMB_AES128_GCM_PRE(mb_mgr, xform->aead.key.data,
669 &sess->cipher.gcm_key);
670 break;
671 case IMB_KEY_192_BYTES:
672 sess->cipher.key_length_in_bytes = IMB_KEY_192_BYTES;
673 IMB_AES192_GCM_PRE(mb_mgr, xform->aead.key.data,
674 &sess->cipher.gcm_key);
675 break;
676 case IMB_KEY_256_BYTES:
677 sess->cipher.key_length_in_bytes = IMB_KEY_256_BYTES;
678 IMB_AES256_GCM_PRE(mb_mgr, xform->aead.key.data,
679 &sess->cipher.gcm_key);
680 break;
681 default:
682 IPSEC_MB_LOG(ERR, "Invalid cipher key length");
683 return -EINVAL;
684 }
685
686
687 if (sess->auth.req_digest_len == 0 ||
688 sess->auth.req_digest_len > 16) {
689 IPSEC_MB_LOG(ERR, "Invalid digest size\n");
690 return -EINVAL;
691 }
692 break;
693
694 case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
695 sess->cipher.mode = IMB_CIPHER_CHACHA20_POLY1305;
696 sess->auth.algo = IMB_AUTH_CHACHA20_POLY1305;
697
698 if (xform->aead.key.length != 32) {
699 IPSEC_MB_LOG(ERR, "Invalid key length");
700 return -EINVAL;
701 }
702 sess->cipher.key_length_in_bytes = 32;
703 memcpy(sess->cipher.expanded_aes_keys.encode,
704 xform->aead.key.data, 32);
705 if (sess->auth.req_digest_len != 16) {
706 IPSEC_MB_LOG(ERR, "Invalid digest size\n");
707 return -EINVAL;
708 }
709 break;
710 default:
711 IPSEC_MB_LOG(ERR, "Unsupported aead mode parameter");
712 return -ENOTSUP;
713 }
714
715 return 0;
716}
717
718
719static int
720aesni_mb_session_configure(IMB_MGR *mb_mgr,
721 void *priv_sess,
722 const struct rte_crypto_sym_xform *xform)
723{
724 const struct rte_crypto_sym_xform *auth_xform = NULL;
725 const struct rte_crypto_sym_xform *cipher_xform = NULL;
726 const struct rte_crypto_sym_xform *aead_xform = NULL;
727 enum ipsec_mb_operation mode;
728 struct aesni_mb_session *sess = (struct aesni_mb_session *) priv_sess;
729 int ret;
730
731 ret = ipsec_mb_parse_xform(xform, &mode, &auth_xform,
732 &cipher_xform, &aead_xform);
733 if (ret)
734 return ret;
735
736
737 switch (mode) {
738 case IPSEC_MB_OP_HASH_VERIFY_THEN_DECRYPT:
739 sess->chain_order = IMB_ORDER_HASH_CIPHER;
740 break;
741 case IPSEC_MB_OP_ENCRYPT_THEN_HASH_GEN:
742 case IPSEC_MB_OP_DECRYPT_THEN_HASH_VERIFY:
743 sess->chain_order = IMB_ORDER_CIPHER_HASH;
744 break;
745 case IPSEC_MB_OP_HASH_GEN_ONLY:
746 case IPSEC_MB_OP_HASH_VERIFY_ONLY:
747 case IPSEC_MB_OP_HASH_GEN_THEN_ENCRYPT:
748 sess->chain_order = IMB_ORDER_HASH_CIPHER;
749 break;
750
751
752
753
754
755
756
757 case IPSEC_MB_OP_ENCRYPT_ONLY:
758 sess->chain_order = IMB_ORDER_CIPHER_HASH;
759 break;
760 case IPSEC_MB_OP_DECRYPT_ONLY:
761 sess->chain_order = IMB_ORDER_HASH_CIPHER;
762 break;
763 case IPSEC_MB_OP_AEAD_AUTHENTICATED_ENCRYPT:
764 sess->chain_order = IMB_ORDER_CIPHER_HASH;
765 sess->aead.aad_len = xform->aead.aad_length;
766 break;
767 case IPSEC_MB_OP_AEAD_AUTHENTICATED_DECRYPT:
768 sess->chain_order = IMB_ORDER_HASH_CIPHER;
769 sess->aead.aad_len = xform->aead.aad_length;
770 break;
771 case IPSEC_MB_OP_NOT_SUPPORTED:
772 default:
773 IPSEC_MB_LOG(ERR,
774 "Unsupported operation chain order parameter");
775 return -ENOTSUP;
776 }
777
778
779 sess->iv.length = 0;
780 sess->auth_iv.length = 0;
781
782 ret = aesni_mb_set_session_auth_parameters(mb_mgr, sess, auth_xform);
783 if (ret != 0) {
784 IPSEC_MB_LOG(ERR,
785 "Invalid/unsupported authentication parameters");
786 return ret;
787 }
788
789 ret = aesni_mb_set_session_cipher_parameters(mb_mgr, sess,
790 cipher_xform);
791 if (ret != 0) {
792 IPSEC_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
793 return ret;
794 }
795
796 if (aead_xform) {
797 ret = aesni_mb_set_session_aead_parameters(mb_mgr, sess,
798 aead_xform);
799 if (ret != 0) {
800 IPSEC_MB_LOG(ERR,
801 "Invalid/unsupported aead parameters");
802 return ret;
803 }
804 }
805
806 return 0;
807}
808
809#ifdef AESNI_MB_DOCSIS_SEC_ENABLED
810
811static int
812check_docsis_sec_session(struct rte_security_session_conf *conf)
813{
814 struct rte_crypto_sym_xform *crypto_sym = conf->crypto_xform;
815 struct rte_security_docsis_xform *docsis = &conf->docsis;
816
817
818 if (docsis->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
819
820 if (crypto_sym != NULL &&
821 crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
822 crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
823 crypto_sym->cipher.algo ==
824 RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
825 (crypto_sym->cipher.key.length == IMB_KEY_128_BYTES ||
826 crypto_sym->cipher.key.length == IMB_KEY_256_BYTES) &&
827 crypto_sym->cipher.iv.length == IMB_AES_BLOCK_SIZE &&
828 crypto_sym->next == NULL) {
829 return 0;
830 }
831
832 } else if (docsis->direction == RTE_SECURITY_DOCSIS_UPLINK) {
833
834 if (crypto_sym != NULL &&
835 crypto_sym->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
836 crypto_sym->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
837 crypto_sym->cipher.algo ==
838 RTE_CRYPTO_CIPHER_AES_DOCSISBPI &&
839 (crypto_sym->cipher.key.length == IMB_KEY_128_BYTES ||
840 crypto_sym->cipher.key.length == IMB_KEY_256_BYTES) &&
841 crypto_sym->cipher.iv.length == IMB_AES_BLOCK_SIZE &&
842 crypto_sym->next == NULL) {
843 return 0;
844 }
845 }
846
847 return -EINVAL;
848}
849
850
851static int
852aesni_mb_set_docsis_sec_session_auth_parameters(struct aesni_mb_session *sess,
853 struct rte_security_docsis_xform *xform)
854{
855 if (xform == NULL) {
856 IPSEC_MB_LOG(ERR, "Invalid DOCSIS xform");
857 return -EINVAL;
858 }
859
860
861 if (xform->direction == RTE_SECURITY_DOCSIS_UPLINK) {
862 sess->auth.algo = IMB_AUTH_DOCSIS_CRC32;
863 sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
864 } else if (xform->direction == RTE_SECURITY_DOCSIS_DOWNLINK) {
865 sess->auth.algo = IMB_AUTH_DOCSIS_CRC32;
866 sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
867 } else {
868 IPSEC_MB_LOG(ERR, "Unsupported DOCSIS direction");
869 return -ENOTSUP;
870 }
871
872 sess->auth.req_digest_len = RTE_ETHER_CRC_LEN;
873 sess->auth.gen_digest_len = RTE_ETHER_CRC_LEN;
874
875 return 0;
876}
877
878
879
880
881
882static int
883aesni_mb_set_docsis_sec_session_parameters(
884 __rte_unused struct rte_cryptodev *dev,
885 struct rte_security_session_conf *conf,
886 void *sess)
887{
888 IMB_MGR *mb_mgr = alloc_init_mb_mgr();
889 struct rte_security_docsis_xform *docsis_xform;
890 struct rte_crypto_sym_xform *cipher_xform;
891 struct aesni_mb_session *ipsec_sess = sess;
892 int ret = 0;
893
894 if (!mb_mgr)
895 return -ENOMEM;
896
897 ret = check_docsis_sec_session(conf);
898 if (ret) {
899 IPSEC_MB_LOG(ERR, "Unsupported DOCSIS security configuration");
900 goto error_exit;
901 }
902
903 switch (conf->docsis.direction) {
904 case RTE_SECURITY_DOCSIS_UPLINK:
905 ipsec_sess->chain_order = IMB_ORDER_CIPHER_HASH;
906 docsis_xform = &conf->docsis;
907 cipher_xform = conf->crypto_xform;
908 break;
909 case RTE_SECURITY_DOCSIS_DOWNLINK:
910 ipsec_sess->chain_order = IMB_ORDER_HASH_CIPHER;
911 cipher_xform = conf->crypto_xform;
912 docsis_xform = &conf->docsis;
913 break;
914 default:
915 IPSEC_MB_LOG(ERR, "Unsupported DOCSIS security configuration");
916 ret = -EINVAL;
917 goto error_exit;
918 }
919
920
921 ipsec_sess->iv.length = 0;
922
923 ret = aesni_mb_set_docsis_sec_session_auth_parameters(ipsec_sess,
924 docsis_xform);
925 if (ret != 0) {
926 IPSEC_MB_LOG(ERR, "Invalid/unsupported DOCSIS parameters");
927 goto error_exit;
928 }
929
930 ret = aesni_mb_set_session_cipher_parameters(mb_mgr,
931 ipsec_sess, cipher_xform);
932
933 if (ret != 0) {
934 IPSEC_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
935 goto error_exit;
936 }
937
938error_exit:
939 free_mb_mgr(mb_mgr);
940 return ret;
941}
942#endif
943
944static inline uint64_t
945auth_start_offset(struct rte_crypto_op *op, struct aesni_mb_session *session,
946 uint32_t oop, const uint32_t auth_offset,
947 const uint32_t cipher_offset, const uint32_t auth_length,
948 const uint32_t cipher_length, uint8_t lb_sgl)
949{
950 struct rte_mbuf *m_src, *m_dst;
951 uint8_t *p_src, *p_dst;
952 uintptr_t u_src, u_dst;
953 uint32_t cipher_end, auth_end;
954
955
956 if (!oop || session->chain_order != IMB_ORDER_CIPHER_HASH || lb_sgl)
957 return auth_offset;
958
959 m_src = op->sym->m_src;
960 m_dst = op->sym->m_dst;
961
962 p_src = rte_pktmbuf_mtod(m_src, uint8_t *);
963 p_dst = rte_pktmbuf_mtod(m_dst, uint8_t *);
964 u_src = (uintptr_t)p_src;
965 u_dst = (uintptr_t)p_dst + auth_offset;
966
967
968
969
970
971 if (cipher_offset > auth_offset)
972 memcpy(p_dst + auth_offset,
973 p_src + auth_offset,
974 cipher_offset -
975 auth_offset);
976
977
978
979
980
981 cipher_end = cipher_offset + cipher_length;
982 auth_end = auth_offset + auth_length;
983 if (cipher_end < auth_end)
984 memcpy(p_dst + cipher_end, p_src + cipher_end,
985 auth_end - cipher_end);
986
987
988
989
990
991
992 return u_src < u_dst ? (u_dst - u_src) :
993 (UINT64_MAX - u_src + u_dst + 1);
994}
995
996static inline void
997set_cpu_mb_job_params(IMB_JOB *job, struct aesni_mb_session *session,
998 union rte_crypto_sym_ofs sofs, void *buf, uint32_t len,
999 struct rte_crypto_va_iova_ptr *iv,
1000 struct rte_crypto_va_iova_ptr *aad, void *digest, void *udata)
1001{
1002
1003 job->chain_order = session->chain_order;
1004
1005
1006 job->cipher_direction = session->cipher.direction;
1007 job->cipher_mode = session->cipher.mode;
1008
1009 job->key_len_in_bytes = session->cipher.key_length_in_bytes;
1010
1011
1012 job->hash_alg = session->auth.algo;
1013 job->iv = iv->va;
1014
1015 switch (job->hash_alg) {
1016 case IMB_AUTH_AES_XCBC:
1017 job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
1018 job->u.XCBC._k2 = session->auth.xcbc.k2;
1019 job->u.XCBC._k3 = session->auth.xcbc.k3;
1020
1021 job->enc_keys = session->cipher.expanded_aes_keys.encode;
1022 job->dec_keys = session->cipher.expanded_aes_keys.decode;
1023 break;
1024
1025 case IMB_AUTH_AES_CCM:
1026 job->u.CCM.aad = (uint8_t *)aad->va + 18;
1027 job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
1028 job->enc_keys = session->cipher.expanded_aes_keys.encode;
1029 job->dec_keys = session->cipher.expanded_aes_keys.decode;
1030 job->iv++;
1031 break;
1032
1033 case IMB_AUTH_AES_CMAC:
1034 job->u.CMAC._key_expanded = session->auth.cmac.expkey;
1035 job->u.CMAC._skey1 = session->auth.cmac.skey1;
1036 job->u.CMAC._skey2 = session->auth.cmac.skey2;
1037 job->enc_keys = session->cipher.expanded_aes_keys.encode;
1038 job->dec_keys = session->cipher.expanded_aes_keys.decode;
1039 break;
1040
1041 case IMB_AUTH_AES_GMAC:
1042 if (session->cipher.mode == IMB_CIPHER_GCM) {
1043 job->u.GCM.aad = aad->va;
1044 job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
1045 } else {
1046
1047 job->u.GCM.aad = buf;
1048 job->u.GCM.aad_len_in_bytes = len;
1049 job->cipher_mode = IMB_CIPHER_GCM;
1050 }
1051 job->enc_keys = &session->cipher.gcm_key;
1052 job->dec_keys = &session->cipher.gcm_key;
1053 break;
1054
1055 case IMB_AUTH_CHACHA20_POLY1305:
1056 job->u.CHACHA20_POLY1305.aad = aad->va;
1057 job->u.CHACHA20_POLY1305.aad_len_in_bytes =
1058 session->aead.aad_len;
1059 job->enc_keys = session->cipher.expanded_aes_keys.encode;
1060 job->dec_keys = session->cipher.expanded_aes_keys.encode;
1061 break;
1062 default:
1063 job->u.HMAC._hashed_auth_key_xor_ipad =
1064 session->auth.pads.inner;
1065 job->u.HMAC._hashed_auth_key_xor_opad =
1066 session->auth.pads.outer;
1067
1068 if (job->cipher_mode == IMB_CIPHER_DES3) {
1069 job->enc_keys = session->cipher.exp_3des_keys.ks_ptr;
1070 job->dec_keys = session->cipher.exp_3des_keys.ks_ptr;
1071 } else {
1072 job->enc_keys = session->cipher.expanded_aes_keys.encode;
1073 job->dec_keys = session->cipher.expanded_aes_keys.decode;
1074 }
1075 }
1076
1077
1078
1079
1080
1081
1082
1083 job->auth_tag_output = digest;
1084 job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
1085
1086
1087 job->iv_len_in_bytes = session->iv.length;
1088
1089
1090 job->src = buf;
1091 job->dst = (uint8_t *)buf + sofs.ofs.cipher.head;
1092 job->cipher_start_src_offset_in_bytes = sofs.ofs.cipher.head;
1093 job->hash_start_src_offset_in_bytes = sofs.ofs.auth.head;
1094 if (job->hash_alg == IMB_AUTH_AES_GMAC &&
1095 session->cipher.mode != IMB_CIPHER_GCM) {
1096 job->msg_len_to_hash_in_bytes = 0;
1097 job->msg_len_to_cipher_in_bytes = 0;
1098 } else {
1099 job->msg_len_to_hash_in_bytes = len - sofs.ofs.auth.head -
1100 sofs.ofs.auth.tail;
1101 job->msg_len_to_cipher_in_bytes = len - sofs.ofs.cipher.head -
1102 sofs.ofs.cipher.tail;
1103 }
1104
1105 job->user_data = udata;
1106}
1107
1108static int
1109handle_aead_sgl_job(IMB_JOB *job, IMB_MGR *mb_mgr,
1110 uint32_t *total_len,
1111 struct aesni_mb_op_buf_data *src_data,
1112 struct aesni_mb_op_buf_data *dst_data)
1113{
1114 uint32_t data_len, part_len;
1115
1116 if (*total_len == 0) {
1117 job->sgl_state = IMB_SGL_COMPLETE;
1118 return 0;
1119 }
1120
1121 if (src_data->m == NULL) {
1122 IPSEC_MB_LOG(ERR, "Invalid source buffer");
1123 return -EINVAL;
1124 }
1125
1126 job->sgl_state = IMB_SGL_UPDATE;
1127
1128 data_len = src_data->m->data_len - src_data->offset;
1129
1130 job->src = rte_pktmbuf_mtod_offset(src_data->m, uint8_t *,
1131 src_data->offset);
1132
1133 if (dst_data->m != NULL) {
1134 if (dst_data->m->data_len - dst_data->offset == 0) {
1135 dst_data->m = dst_data->m->next;
1136 if (dst_data->m == NULL) {
1137 IPSEC_MB_LOG(ERR, "Invalid destination buffer");
1138 return -EINVAL;
1139 }
1140 dst_data->offset = 0;
1141 }
1142 part_len = RTE_MIN(data_len, (dst_data->m->data_len -
1143 dst_data->offset));
1144 job->dst = rte_pktmbuf_mtod_offset(dst_data->m,
1145 uint8_t *, dst_data->offset);
1146 dst_data->offset += part_len;
1147 } else {
1148 part_len = RTE_MIN(data_len, *total_len);
1149 job->dst = rte_pktmbuf_mtod_offset(src_data->m, uint8_t *,
1150 src_data->offset);
1151 }
1152
1153 job->msg_len_to_cipher_in_bytes = part_len;
1154 job->msg_len_to_hash_in_bytes = part_len;
1155
1156 job = IMB_SUBMIT_JOB(mb_mgr);
1157
1158 *total_len -= part_len;
1159
1160 if (part_len != data_len) {
1161 src_data->offset += part_len;
1162 } else {
1163 src_data->m = src_data->m->next;
1164 src_data->offset = 0;
1165 }
1166
1167 return 0;
1168}
1169
1170static uint64_t
1171sgl_linear_cipher_auth_len(IMB_JOB *job, uint64_t *auth_len)
1172{
1173 uint64_t cipher_len;
1174
1175 if (job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN ||
1176 job->cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN)
1177 cipher_len = (job->msg_len_to_cipher_in_bits >> 3) +
1178 (job->cipher_start_src_offset_in_bits >> 3);
1179 else
1180 cipher_len = job->msg_len_to_cipher_in_bytes +
1181 job->cipher_start_src_offset_in_bytes;
1182
1183 if (job->hash_alg == IMB_AUTH_SNOW3G_UIA2_BITLEN ||
1184 job->hash_alg == IMB_AUTH_ZUC_EIA3_BITLEN)
1185 *auth_len = (job->msg_len_to_hash_in_bits >> 3) +
1186 job->hash_start_src_offset_in_bytes;
1187 else if (job->hash_alg == IMB_AUTH_AES_GMAC)
1188 *auth_len = job->u.GCM.aad_len_in_bytes;
1189 else
1190 *auth_len = job->msg_len_to_hash_in_bytes +
1191 job->hash_start_src_offset_in_bytes;
1192
1193 return RTE_MAX(*auth_len, cipher_len);
1194}
1195
1196static int
1197handle_sgl_linear(IMB_JOB *job, struct rte_crypto_op *op, uint32_t dst_offset,
1198 struct aesni_mb_session *session)
1199{
1200 uint64_t auth_len, total_len;
1201 uint8_t *src, *linear_buf = NULL;
1202 int lb_offset = 0;
1203 struct rte_mbuf *src_seg;
1204 uint16_t src_len;
1205
1206 total_len = sgl_linear_cipher_auth_len(job, &auth_len);
1207 linear_buf = rte_zmalloc(NULL, total_len + job->auth_tag_output_len_in_bytes, 0);
1208 if (linear_buf == NULL) {
1209 IPSEC_MB_LOG(ERR, "Error allocating memory for SGL Linear Buffer\n");
1210 return -1;
1211 }
1212
1213 for (src_seg = op->sym->m_src; (src_seg != NULL) &&
1214 (total_len - lb_offset > 0);
1215 src_seg = src_seg->next) {
1216 src = rte_pktmbuf_mtod(src_seg, uint8_t *);
1217 src_len = RTE_MIN(src_seg->data_len, total_len - lb_offset);
1218 rte_memcpy(linear_buf + lb_offset, src, src_len);
1219 lb_offset += src_len;
1220 }
1221
1222 job->src = linear_buf;
1223 job->dst = linear_buf + dst_offset;
1224 job->user_data2 = linear_buf;
1225
1226 if (job->hash_alg == IMB_AUTH_AES_GMAC)
1227 job->u.GCM.aad = linear_buf;
1228
1229 if (session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY)
1230 job->auth_tag_output = linear_buf + lb_offset;
1231 else
1232 job->auth_tag_output = linear_buf + auth_len;
1233
1234 return 0;
1235}
1236
1237static inline int
1238imb_lib_support_sgl_algo(IMB_CIPHER_MODE alg)
1239{
1240 if (alg == IMB_CIPHER_CHACHA20_POLY1305
1241 || alg == IMB_CIPHER_GCM)
1242 return 1;
1243 return 0;
1244}
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260static inline int
1261set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
1262 struct rte_crypto_op *op, uint8_t *digest_idx,
1263 IMB_MGR *mb_mgr)
1264{
1265 struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
1266 struct aesni_mb_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
1267 struct aesni_mb_op_buf_data src_sgl = {0};
1268 struct aesni_mb_op_buf_data dst_sgl = {0};
1269 struct aesni_mb_session *session;
1270 uint32_t m_offset, oop;
1271 uint32_t auth_off_in_bytes;
1272 uint32_t ciph_off_in_bytes;
1273 uint32_t auth_len_in_bytes;
1274 uint32_t ciph_len_in_bytes;
1275 uint32_t total_len;
1276 IMB_JOB base_job;
1277 uint8_t sgl = 0;
1278 uint8_t lb_sgl = 0;
1279 int ret;
1280
1281 session = ipsec_mb_get_session_private(qp, op);
1282 if (session == NULL) {
1283 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1284 return -1;
1285 }
1286
1287
1288 job->chain_order = session->chain_order;
1289
1290
1291 job->cipher_direction = session->cipher.direction;
1292 job->cipher_mode = session->cipher.mode;
1293
1294 job->key_len_in_bytes = session->cipher.key_length_in_bytes;
1295
1296
1297 job->hash_alg = session->auth.algo;
1298
1299 const int aead = is_aead_algo(job->hash_alg, job->cipher_mode);
1300
1301 if (job->cipher_mode == IMB_CIPHER_DES3) {
1302 job->enc_keys = session->cipher.exp_3des_keys.ks_ptr;
1303 job->dec_keys = session->cipher.exp_3des_keys.ks_ptr;
1304 } else {
1305 job->enc_keys = session->cipher.expanded_aes_keys.encode;
1306 job->dec_keys = session->cipher.expanded_aes_keys.decode;
1307 }
1308
1309 if (!op->sym->m_dst) {
1310
1311 m_dst = m_src;
1312 oop = 0;
1313 } else if (op->sym->m_dst == op->sym->m_src) {
1314
1315 m_dst = m_src;
1316 oop = 0;
1317 } else {
1318
1319 m_dst = op->sym->m_dst;
1320 oop = 1;
1321 }
1322
1323 if (m_src->nb_segs > 1 || m_dst->nb_segs > 1) {
1324 sgl = 1;
1325 if (!imb_lib_support_sgl_algo(session->cipher.mode))
1326 lb_sgl = 1;
1327 }
1328
1329 switch (job->hash_alg) {
1330 case IMB_AUTH_AES_XCBC:
1331 job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
1332 job->u.XCBC._k2 = session->auth.xcbc.k2;
1333 job->u.XCBC._k3 = session->auth.xcbc.k3;
1334
1335 job->enc_keys = session->cipher.expanded_aes_keys.encode;
1336 job->dec_keys = session->cipher.expanded_aes_keys.decode;
1337 break;
1338
1339 case IMB_AUTH_AES_CCM:
1340 job->u.CCM.aad = op->sym->aead.aad.data + 18;
1341 job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
1342 job->enc_keys = session->cipher.expanded_aes_keys.encode;
1343 job->dec_keys = session->cipher.expanded_aes_keys.decode;
1344 break;
1345
1346 case IMB_AUTH_AES_CMAC:
1347 job->u.CMAC._key_expanded = session->auth.cmac.expkey;
1348 job->u.CMAC._skey1 = session->auth.cmac.skey1;
1349 job->u.CMAC._skey2 = session->auth.cmac.skey2;
1350 job->enc_keys = session->cipher.expanded_aes_keys.encode;
1351 job->dec_keys = session->cipher.expanded_aes_keys.decode;
1352 break;
1353
1354 case IMB_AUTH_AES_GMAC:
1355 if (session->cipher.mode == IMB_CIPHER_GCM) {
1356 job->u.GCM.aad = op->sym->aead.aad.data;
1357 job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
1358 if (sgl) {
1359 job->u.GCM.ctx = &qp_data->gcm_sgl_ctx;
1360 job->cipher_mode = IMB_CIPHER_GCM_SGL;
1361 job->hash_alg = IMB_AUTH_GCM_SGL;
1362 }
1363 } else {
1364
1365 job->u.GCM.aad = rte_pktmbuf_mtod_offset(m_src,
1366 uint8_t *, op->sym->auth.data.offset);
1367 job->u.GCM.aad_len_in_bytes = op->sym->auth.data.length;
1368 job->cipher_mode = IMB_CIPHER_GCM;
1369 }
1370 job->enc_keys = &session->cipher.gcm_key;
1371 job->dec_keys = &session->cipher.gcm_key;
1372 break;
1373 case IMB_AUTH_ZUC_EIA3_BITLEN:
1374 case IMB_AUTH_ZUC256_EIA3_BITLEN:
1375 job->u.ZUC_EIA3._key = session->auth.zuc_auth_key;
1376 job->u.ZUC_EIA3._iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1377 session->auth_iv.offset);
1378 break;
1379 case IMB_AUTH_SNOW3G_UIA2_BITLEN:
1380 job->u.SNOW3G_UIA2._key = (void *)
1381 &session->auth.pKeySched_snow3g_auth;
1382 job->u.SNOW3G_UIA2._iv =
1383 rte_crypto_op_ctod_offset(op, uint8_t *,
1384 session->auth_iv.offset);
1385 break;
1386 case IMB_AUTH_KASUMI_UIA1:
1387 job->u.KASUMI_UIA1._key = (void *)
1388 &session->auth.pKeySched_kasumi_auth;
1389 break;
1390 case IMB_AUTH_CHACHA20_POLY1305:
1391 job->u.CHACHA20_POLY1305.aad = op->sym->aead.aad.data;
1392 job->u.CHACHA20_POLY1305.aad_len_in_bytes =
1393 session->aead.aad_len;
1394 if (sgl) {
1395 job->u.CHACHA20_POLY1305.ctx = &qp_data->chacha_sgl_ctx;
1396 job->cipher_mode = IMB_CIPHER_CHACHA20_POLY1305_SGL;
1397 job->hash_alg = IMB_AUTH_CHACHA20_POLY1305_SGL;
1398 }
1399 job->enc_keys = session->cipher.expanded_aes_keys.encode;
1400 job->dec_keys = session->cipher.expanded_aes_keys.encode;
1401 break;
1402 default:
1403 job->u.HMAC._hashed_auth_key_xor_ipad =
1404 session->auth.pads.inner;
1405 job->u.HMAC._hashed_auth_key_xor_opad =
1406 session->auth.pads.outer;
1407
1408 }
1409
1410 if (aead)
1411 m_offset = op->sym->aead.data.offset;
1412 else
1413 m_offset = op->sym->cipher.data.offset;
1414
1415 if (job->cipher_mode == IMB_CIPHER_ZUC_EEA3) {
1416 job->enc_keys = session->cipher.zuc_cipher_key;
1417 job->dec_keys = session->cipher.zuc_cipher_key;
1418 m_offset >>= 3;
1419 } else if (job->cipher_mode == IMB_CIPHER_SNOW3G_UEA2_BITLEN) {
1420 job->enc_keys = &session->cipher.pKeySched_snow3g_cipher;
1421 m_offset = 0;
1422 } else if (job->cipher_mode == IMB_CIPHER_KASUMI_UEA1_BITLEN) {
1423 job->enc_keys = &session->cipher.pKeySched_kasumi_cipher;
1424 m_offset = 0;
1425 }
1426
1427
1428 if (job->hash_alg != IMB_AUTH_NULL &&
1429 session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
1430 job->auth_tag_output = qp_data->temp_digests[*digest_idx];
1431 *digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS;
1432 } else {
1433 if (aead)
1434 job->auth_tag_output = op->sym->aead.digest.data;
1435 else
1436 job->auth_tag_output = op->sym->auth.digest.data;
1437
1438 if (session->auth.req_digest_len !=
1439 session->auth.gen_digest_len) {
1440 job->auth_tag_output =
1441 qp_data->temp_digests[*digest_idx];
1442 *digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS;
1443 }
1444 }
1445
1446
1447
1448
1449
1450
1451 job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
1452
1453
1454 job->iv_len_in_bytes = session->iv.length;
1455
1456
1457 if (sgl) {
1458 job->src = NULL;
1459 job->dst = NULL;
1460 } else {
1461 job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
1462 job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
1463 }
1464
1465 switch (job->hash_alg) {
1466 case IMB_AUTH_AES_CCM:
1467 job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset;
1468 job->msg_len_to_hash_in_bytes = op->sym->aead.data.length;
1469
1470 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1471 session->iv.offset + 1);
1472 break;
1473
1474 case IMB_AUTH_AES_GMAC:
1475 if (session->cipher.mode == IMB_CIPHER_GCM) {
1476 job->hash_start_src_offset_in_bytes =
1477 op->sym->aead.data.offset;
1478 job->msg_len_to_hash_in_bytes =
1479 op->sym->aead.data.length;
1480 } else {
1481 job->msg_len_to_hash_in_bytes = 0;
1482 job->hash_start_src_offset_in_bytes = 0;
1483 }
1484
1485 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1486 session->iv.offset);
1487 break;
1488
1489 case IMB_AUTH_GCM_SGL:
1490 case IMB_AUTH_CHACHA20_POLY1305_SGL:
1491 job->hash_start_src_offset_in_bytes = 0;
1492 job->msg_len_to_hash_in_bytes = 0;
1493 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1494 session->iv.offset);
1495 break;
1496
1497 case IMB_AUTH_CHACHA20_POLY1305:
1498 job->hash_start_src_offset_in_bytes =
1499 op->sym->aead.data.offset;
1500 job->msg_len_to_hash_in_bytes =
1501 op->sym->aead.data.length;
1502 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1503 session->iv.offset);
1504 break;
1505
1506 case IMB_AUTH_ZUC_EIA3_BITLEN:
1507 case IMB_AUTH_ZUC256_EIA3_BITLEN:
1508 case IMB_AUTH_SNOW3G_UIA2_BITLEN:
1509 auth_off_in_bytes = op->sym->auth.data.offset >> 3;
1510 ciph_off_in_bytes = op->sym->cipher.data.offset >> 3;
1511 auth_len_in_bytes = op->sym->auth.data.length >> 3;
1512 ciph_len_in_bytes = op->sym->cipher.data.length >> 3;
1513
1514 job->hash_start_src_offset_in_bytes = auth_start_offset(op,
1515 session, oop, auth_off_in_bytes,
1516 ciph_off_in_bytes, auth_len_in_bytes,
1517 ciph_len_in_bytes, lb_sgl);
1518 job->msg_len_to_hash_in_bits = op->sym->auth.data.length;
1519
1520 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1521 session->iv.offset);
1522 break;
1523
1524
1525 case IMB_AUTH_KASUMI_UIA1:
1526 auth_off_in_bytes = op->sym->auth.data.offset >> 3;
1527 ciph_off_in_bytes = op->sym->cipher.data.offset >> 3;
1528 auth_len_in_bytes = op->sym->auth.data.length >> 3;
1529 ciph_len_in_bytes = op->sym->cipher.data.length >> 3;
1530
1531 job->hash_start_src_offset_in_bytes = auth_start_offset(op,
1532 session, oop, auth_off_in_bytes,
1533 ciph_off_in_bytes, auth_len_in_bytes,
1534 ciph_len_in_bytes, lb_sgl);
1535 job->msg_len_to_hash_in_bytes = auth_len_in_bytes;
1536
1537 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1538 session->iv.offset);
1539 break;
1540
1541 default:
1542 job->hash_start_src_offset_in_bytes = auth_start_offset(op,
1543 session, oop, op->sym->auth.data.offset,
1544 op->sym->cipher.data.offset,
1545 op->sym->auth.data.length,
1546 op->sym->cipher.data.length, lb_sgl);
1547 job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
1548
1549 job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
1550 session->iv.offset);
1551 }
1552
1553 switch (job->cipher_mode) {
1554
1555 case IMB_CIPHER_ZUC_EEA3:
1556 job->cipher_start_src_offset_in_bytes =
1557 op->sym->cipher.data.offset >> 3;
1558 job->msg_len_to_cipher_in_bytes =
1559 op->sym->cipher.data.length >> 3;
1560 break;
1561
1562 case IMB_CIPHER_SNOW3G_UEA2_BITLEN:
1563 case IMB_CIPHER_KASUMI_UEA1_BITLEN:
1564 job->cipher_start_src_offset_in_bits =
1565 op->sym->cipher.data.offset;
1566 job->msg_len_to_cipher_in_bits =
1567 op->sym->cipher.data.length;
1568 break;
1569 case IMB_CIPHER_GCM:
1570 if (session->cipher.mode == IMB_CIPHER_NULL) {
1571
1572 job->msg_len_to_cipher_in_bytes = 0;
1573 job->cipher_start_src_offset_in_bytes = 0;
1574 } else {
1575 job->cipher_start_src_offset_in_bytes =
1576 op->sym->aead.data.offset;
1577 job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
1578 }
1579 break;
1580 case IMB_CIPHER_CCM:
1581 case IMB_CIPHER_CHACHA20_POLY1305:
1582 job->cipher_start_src_offset_in_bytes =
1583 op->sym->aead.data.offset;
1584 job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
1585 break;
1586 case IMB_CIPHER_GCM_SGL:
1587 case IMB_CIPHER_CHACHA20_POLY1305_SGL:
1588 job->msg_len_to_cipher_in_bytes = 0;
1589 job->cipher_start_src_offset_in_bytes = 0;
1590 break;
1591 default:
1592 job->cipher_start_src_offset_in_bytes =
1593 op->sym->cipher.data.offset;
1594 job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
1595 }
1596
1597 if (job->cipher_mode == IMB_CIPHER_NULL && oop) {
1598 memcpy(job->dst + job->cipher_start_src_offset_in_bytes,
1599 job->src + job->cipher_start_src_offset_in_bytes,
1600 job->msg_len_to_cipher_in_bytes);
1601 }
1602
1603
1604 job->user_data = op;
1605
1606 if (sgl) {
1607
1608 if (lb_sgl)
1609 return handle_sgl_linear(job, op, m_offset, session);
1610
1611 base_job = *job;
1612 job->sgl_state = IMB_SGL_INIT;
1613 job = IMB_SUBMIT_JOB(mb_mgr);
1614 total_len = op->sym->aead.data.length;
1615
1616 src_sgl.m = m_src;
1617 src_sgl.offset = m_offset;
1618
1619 while (src_sgl.offset >= src_sgl.m->data_len) {
1620 src_sgl.offset -= src_sgl.m->data_len;
1621 src_sgl.m = src_sgl.m->next;
1622
1623 RTE_ASSERT(src_sgl.m != NULL);
1624 }
1625
1626 if (oop) {
1627 dst_sgl.m = m_dst;
1628 dst_sgl.offset = m_offset;
1629
1630 while (dst_sgl.offset >= dst_sgl.m->data_len) {
1631 dst_sgl.offset -= dst_sgl.m->data_len;
1632 dst_sgl.m = dst_sgl.m->next;
1633
1634 RTE_ASSERT(dst_sgl.m != NULL);
1635 }
1636 }
1637
1638 while (job->sgl_state != IMB_SGL_COMPLETE) {
1639 job = IMB_GET_NEXT_JOB(mb_mgr);
1640 *job = base_job;
1641 ret = handle_aead_sgl_job(job, mb_mgr, &total_len,
1642 &src_sgl, &dst_sgl);
1643 if (ret < 0)
1644 return ret;
1645 }
1646 }
1647
1648 return 0;
1649}
1650
1651#ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1652
1653
1654
1655
1656
1657static inline int
1658set_sec_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp,
1659 struct rte_crypto_op *op, uint8_t *digest_idx)
1660{
1661 struct aesni_mb_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp);
1662 struct rte_mbuf *m_src, *m_dst;
1663 struct rte_crypto_sym_op *sym;
1664 struct aesni_mb_session *session = NULL;
1665
1666 if (unlikely(op->sess_type != RTE_CRYPTO_OP_SECURITY_SESSION)) {
1667 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1668 return -1;
1669 }
1670 session = SECURITY_GET_SESS_PRIV(op->sym->session);
1671
1672 if (unlikely(session == NULL)) {
1673 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1674 return -1;
1675 }
1676
1677 if (session->cipher.mode != IMB_CIPHER_DOCSIS_SEC_BPI ||
1678 session->auth.algo != IMB_AUTH_DOCSIS_CRC32) {
1679 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1680 return -1;
1681 }
1682
1683 sym = op->sym;
1684 m_src = sym->m_src;
1685
1686 if (likely(sym->m_dst == NULL || sym->m_dst == m_src)) {
1687
1688 m_dst = m_src;
1689 } else {
1690
1691 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1692 return -ENOTSUP;
1693 }
1694
1695
1696 job->chain_order = session->chain_order;
1697
1698
1699 job->cipher_direction = session->cipher.direction;
1700 job->cipher_mode = session->cipher.mode;
1701
1702 job->key_len_in_bytes = session->cipher.key_length_in_bytes;
1703 job->enc_keys = session->cipher.expanded_aes_keys.encode;
1704 job->dec_keys = session->cipher.expanded_aes_keys.decode;
1705
1706
1707 job->iv_len_in_bytes = session->iv.length;
1708 job->iv = (uint8_t *)op + session->iv.offset;
1709
1710
1711 job->hash_alg = session->auth.algo;
1712
1713
1714 job->auth_tag_output = qp_data->temp_digests[*digest_idx];
1715 *digest_idx = (*digest_idx + 1) % IMB_MAX_JOBS;
1716
1717
1718 job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
1719
1720
1721 job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
1722 job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *,
1723 sym->cipher.data.offset);
1724
1725 job->cipher_start_src_offset_in_bytes = sym->cipher.data.offset;
1726 job->msg_len_to_cipher_in_bytes = sym->cipher.data.length;
1727
1728 job->hash_start_src_offset_in_bytes = sym->auth.data.offset;
1729 job->msg_len_to_hash_in_bytes = sym->auth.data.length;
1730
1731 job->user_data = op;
1732
1733 return 0;
1734}
1735
1736static inline void
1737verify_docsis_sec_crc(IMB_JOB *job, uint8_t *status)
1738{
1739 uint16_t crc_offset;
1740 uint8_t *crc;
1741
1742 if (!job->msg_len_to_hash_in_bytes)
1743 return;
1744
1745 crc_offset = job->hash_start_src_offset_in_bytes +
1746 job->msg_len_to_hash_in_bytes -
1747 job->cipher_start_src_offset_in_bytes;
1748 crc = job->dst + crc_offset;
1749
1750
1751 if (memcmp(job->auth_tag_output, crc, RTE_ETHER_CRC_LEN) != 0)
1752 *status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1753}
1754#endif
1755
1756static inline void
1757verify_digest(IMB_JOB *job, void *digest, uint16_t len, uint8_t *status)
1758{
1759
1760 if (memcmp(job->auth_tag_output, digest, len) != 0)
1761 *status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1762}
1763
1764static inline void
1765generate_digest(IMB_JOB *job, struct rte_crypto_op *op,
1766 struct aesni_mb_session *sess)
1767{
1768
1769 if (likely(sess->auth.req_digest_len == sess->auth.gen_digest_len))
1770 return;
1771
1772
1773
1774
1775
1776 memcpy(op->sym->auth.digest.data, job->auth_tag_output,
1777 sess->auth.req_digest_len);
1778}
1779
1780static void
1781post_process_sgl_linear(struct rte_crypto_op *op, IMB_JOB *job,
1782 struct aesni_mb_session *sess, uint8_t *linear_buf)
1783{
1784
1785 int lb_offset = 0;
1786 struct rte_mbuf *m_dst = op->sym->m_dst == NULL ?
1787 op->sym->m_src : op->sym->m_dst;
1788 uint16_t total_len, dst_len;
1789 uint64_t auth_len;
1790 uint8_t *dst;
1791
1792 total_len = sgl_linear_cipher_auth_len(job, &auth_len);
1793
1794 if (sess->auth.operation != RTE_CRYPTO_AUTH_OP_VERIFY)
1795 total_len += job->auth_tag_output_len_in_bytes;
1796
1797 for (; (m_dst != NULL) && (total_len - lb_offset > 0); m_dst = m_dst->next) {
1798 dst = rte_pktmbuf_mtod(m_dst, uint8_t *);
1799 dst_len = RTE_MIN(m_dst->data_len, total_len - lb_offset);
1800 rte_memcpy(dst, linear_buf + lb_offset, dst_len);
1801 lb_offset += dst_len;
1802 }
1803}
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815static inline struct rte_crypto_op *
1816post_process_mb_job(struct ipsec_mb_qp *qp, IMB_JOB *job)
1817{
1818 struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data;
1819 struct aesni_mb_session *sess = NULL;
1820 uint8_t *linear_buf = NULL;
1821
1822#ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1823 uint8_t is_docsis_sec = 0;
1824
1825 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1826
1827
1828
1829
1830 is_docsis_sec = 1;
1831 sess = SECURITY_GET_SESS_PRIV(op->sym->session);
1832 } else
1833#endif
1834 sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);
1835
1836 if (likely(op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)) {
1837 switch (job->status) {
1838 case IMB_STATUS_COMPLETED:
1839 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1840
1841 if ((op->sym->m_src->nb_segs > 1 ||
1842 (op->sym->m_dst != NULL &&
1843 op->sym->m_dst->nb_segs > 1)) &&
1844 !imb_lib_support_sgl_algo(sess->cipher.mode)) {
1845 linear_buf = (uint8_t *) job->user_data2;
1846 post_process_sgl_linear(op, job, sess, linear_buf);
1847 }
1848
1849 if (job->hash_alg == IMB_AUTH_NULL)
1850 break;
1851
1852 if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
1853 if (is_aead_algo(job->hash_alg,
1854 sess->cipher.mode))
1855 verify_digest(job,
1856 op->sym->aead.digest.data,
1857 sess->auth.req_digest_len,
1858 &op->status);
1859#ifdef AESNI_MB_DOCSIS_SEC_ENABLED
1860 else if (is_docsis_sec)
1861 verify_docsis_sec_crc(job,
1862 &op->status);
1863#endif
1864 else
1865 verify_digest(job,
1866 op->sym->auth.digest.data,
1867 sess->auth.req_digest_len,
1868 &op->status);
1869 } else
1870 generate_digest(job, op, sess);
1871 break;
1872 default:
1873 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1874 }
1875 rte_free(linear_buf);
1876 }
1877
1878
1879 if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1880 memset(sess, 0, sizeof(struct aesni_mb_session));
1881 rte_mempool_put(qp->sess_mp, op->sym->session);
1882 op->sym->session = NULL;
1883 }
1884
1885 return op;
1886}
1887
1888static inline void
1889post_process_mb_sync_job(IMB_JOB *job)
1890{
1891 uint32_t *st;
1892
1893 st = job->user_data;
1894 st[0] = (job->status == IMB_STATUS_COMPLETED) ? 0 : EBADMSG;
1895}
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910static unsigned
1911handle_completed_jobs(struct ipsec_mb_qp *qp, IMB_MGR *mb_mgr,
1912 IMB_JOB *job, struct rte_crypto_op **ops,
1913 uint16_t nb_ops)
1914{
1915 struct rte_crypto_op *op = NULL;
1916 uint16_t processed_jobs = 0;
1917
1918 while (job != NULL) {
1919 op = post_process_mb_job(qp, job);
1920
1921 if (op) {
1922 ops[processed_jobs++] = op;
1923 qp->stats.dequeued_count++;
1924 } else {
1925 qp->stats.dequeue_err_count++;
1926 break;
1927 }
1928 if (processed_jobs == nb_ops)
1929 break;
1930
1931 job = IMB_GET_COMPLETED_JOB(mb_mgr);
1932 }
1933
1934 return processed_jobs;
1935}
1936
1937static inline uint32_t
1938handle_completed_sync_jobs(IMB_JOB *job, IMB_MGR *mb_mgr)
1939{
1940 uint32_t i;
1941
1942 for (i = 0; job != NULL; i++, job = IMB_GET_COMPLETED_JOB(mb_mgr))
1943 post_process_mb_sync_job(job);
1944
1945 return i;
1946}
1947
1948static inline uint32_t
1949flush_mb_sync_mgr(IMB_MGR *mb_mgr)
1950{
1951 IMB_JOB *job;
1952
1953 job = IMB_FLUSH_JOB(mb_mgr);
1954 return handle_completed_sync_jobs(job, mb_mgr);
1955}
1956
1957static inline uint16_t
1958flush_mb_mgr(struct ipsec_mb_qp *qp, IMB_MGR *mb_mgr,
1959 struct rte_crypto_op **ops, uint16_t nb_ops)
1960{
1961 int processed_ops = 0;
1962
1963
1964 IMB_JOB *job = IMB_FLUSH_JOB(mb_mgr);
1965
1966 if (job)
1967 processed_ops += handle_completed_jobs(qp, mb_mgr, job,
1968 &ops[processed_ops], nb_ops - processed_ops);
1969
1970 return processed_ops;
1971}
1972
1973static inline IMB_JOB *
1974set_job_null_op(IMB_JOB *job, struct rte_crypto_op *op)
1975{
1976 job->chain_order = IMB_ORDER_HASH_CIPHER;
1977 job->cipher_mode = IMB_CIPHER_NULL;
1978 job->hash_alg = IMB_AUTH_NULL;
1979 job->cipher_direction = IMB_DIR_DECRYPT;
1980
1981
1982 job->user_data = op;
1983
1984 return job;
1985}
1986
1987static uint16_t
1988aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
1989 uint16_t nb_ops)
1990{
1991 struct ipsec_mb_qp *qp = queue_pair;
1992 IMB_MGR *mb_mgr = qp->mb_mgr;
1993 struct rte_crypto_op *op;
1994 IMB_JOB *job;
1995 int retval, processed_jobs = 0;
1996
1997 if (unlikely(nb_ops == 0 || mb_mgr == NULL))
1998 return 0;
1999
2000 uint8_t digest_idx = qp->digest_idx;
2001
2002 do {
2003
2004 job = IMB_GET_NEXT_JOB(mb_mgr);
2005 if (unlikely(job == NULL)) {
2006
2007 processed_jobs += flush_mb_mgr(qp, mb_mgr,
2008 &ops[processed_jobs],
2009 nb_ops - processed_jobs);
2010
2011 if (nb_ops == processed_jobs)
2012 break;
2013
2014 job = IMB_GET_NEXT_JOB(mb_mgr);
2015 }
2016
2017
2018
2019
2020
2021
2022
2023 retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
2024 if (retval < 0)
2025 break;
2026
2027#ifdef AESNI_MB_DOCSIS_SEC_ENABLED
2028 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
2029 retval = set_sec_mb_job_params(job, qp, op,
2030 &digest_idx);
2031 else
2032#endif
2033 retval = set_mb_job_params(job, qp, op,
2034 &digest_idx, mb_mgr);
2035
2036 if (unlikely(retval != 0)) {
2037 qp->stats.dequeue_err_count++;
2038 set_job_null_op(job, op);
2039 }
2040
2041
2042#ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
2043 job = IMB_SUBMIT_JOB(mb_mgr);
2044#else
2045 job = IMB_SUBMIT_JOB_NOCHECK(mb_mgr);
2046#endif
2047
2048
2049
2050
2051 if (job)
2052 processed_jobs += handle_completed_jobs(qp, mb_mgr,
2053 job, &ops[processed_jobs],
2054 nb_ops - processed_jobs);
2055
2056 } while (processed_jobs < nb_ops);
2057
2058 qp->digest_idx = digest_idx;
2059
2060 if (processed_jobs < 1)
2061 processed_jobs += flush_mb_mgr(qp, mb_mgr,
2062 &ops[processed_jobs],
2063 nb_ops - processed_jobs);
2064
2065 return processed_jobs;
2066}
2067
2068static inline int
2069check_crypto_sgl(union rte_crypto_sym_ofs so, const struct rte_crypto_sgl *sgl)
2070{
2071
2072 if (sgl->num != 1)
2073 return -ENOTSUP;
2074 else if (so.ofs.cipher.head + so.ofs.cipher.tail > sgl->vec[0].len)
2075 return -EINVAL;
2076 return 0;
2077}
2078
2079static inline IMB_JOB *
2080submit_sync_job(IMB_MGR *mb_mgr)
2081{
2082#ifdef RTE_LIBRTE_PMD_AESNI_MB_DEBUG
2083 return IMB_SUBMIT_JOB(mb_mgr);
2084#else
2085 return IMB_SUBMIT_JOB_NOCHECK(mb_mgr);
2086#endif
2087}
2088
2089static inline uint32_t
2090generate_sync_dgst(struct rte_crypto_sym_vec *vec,
2091 const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len)
2092{
2093 uint32_t i, k;
2094
2095 for (i = 0, k = 0; i != vec->num; i++) {
2096 if (vec->status[i] == 0) {
2097 memcpy(vec->digest[i].va, dgst[i], len);
2098 k++;
2099 }
2100 }
2101
2102 return k;
2103}
2104
2105static inline uint32_t
2106verify_sync_dgst(struct rte_crypto_sym_vec *vec,
2107 const uint8_t dgst[][DIGEST_LENGTH_MAX], uint32_t len)
2108{
2109 uint32_t i, k;
2110
2111 for (i = 0, k = 0; i != vec->num; i++) {
2112 if (vec->status[i] == 0) {
2113 if (memcmp(vec->digest[i].va, dgst[i], len) != 0)
2114 vec->status[i] = EBADMSG;
2115 else
2116 k++;
2117 }
2118 }
2119
2120 return k;
2121}
2122
2123static uint32_t
2124aesni_mb_process_bulk(struct rte_cryptodev *dev __rte_unused,
2125 struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs sofs,
2126 struct rte_crypto_sym_vec *vec)
2127{
2128 int32_t ret;
2129 uint32_t i, j, k, len;
2130 void *buf;
2131 IMB_JOB *job;
2132 IMB_MGR *mb_mgr;
2133 struct aesni_mb_session *s = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
2134 uint8_t tmp_dgst[vec->num][DIGEST_LENGTH_MAX];
2135
2136
2137 mb_mgr = get_per_thread_mb_mgr();
2138 if (unlikely(mb_mgr == NULL))
2139 return 0;
2140
2141 for (i = 0, j = 0, k = 0; i != vec->num; i++) {
2142 ret = check_crypto_sgl(sofs, vec->src_sgl + i);
2143 if (ret != 0) {
2144 vec->status[i] = ret;
2145 continue;
2146 }
2147
2148 buf = vec->src_sgl[i].vec[0].base;
2149 len = vec->src_sgl[i].vec[0].len;
2150
2151 job = IMB_GET_NEXT_JOB(mb_mgr);
2152 if (job == NULL) {
2153 k += flush_mb_sync_mgr(mb_mgr);
2154 job = IMB_GET_NEXT_JOB(mb_mgr);
2155 RTE_ASSERT(job != NULL);
2156 }
2157
2158
2159 set_cpu_mb_job_params(job, s, sofs, buf, len, &vec->iv[i],
2160 &vec->aad[i], tmp_dgst[i], &vec->status[i]);
2161 job = submit_sync_job(mb_mgr);
2162 j++;
2163
2164
2165 k += handle_completed_sync_jobs(job, mb_mgr);
2166 }
2167
2168
2169 while (k != j)
2170 k += flush_mb_sync_mgr(mb_mgr);
2171
2172
2173 if (k != 0) {
2174 if (s->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY)
2175 k = verify_sync_dgst(vec,
2176 (const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst,
2177 s->auth.req_digest_len);
2178 else
2179 k = generate_sync_dgst(vec,
2180 (const uint8_t (*)[DIGEST_LENGTH_MAX])tmp_dgst,
2181 s->auth.req_digest_len);
2182 }
2183
2184 return k;
2185}
2186
2187struct rte_cryptodev_ops aesni_mb_pmd_ops = {
2188 .dev_configure = ipsec_mb_config,
2189 .dev_start = ipsec_mb_start,
2190 .dev_stop = ipsec_mb_stop,
2191 .dev_close = ipsec_mb_close,
2192
2193 .stats_get = ipsec_mb_stats_get,
2194 .stats_reset = ipsec_mb_stats_reset,
2195
2196 .dev_infos_get = ipsec_mb_info_get,
2197
2198 .queue_pair_setup = ipsec_mb_qp_setup,
2199 .queue_pair_release = ipsec_mb_qp_release,
2200
2201 .sym_cpu_process = aesni_mb_process_bulk,
2202
2203 .sym_session_get_size = ipsec_mb_sym_session_get_size,
2204 .sym_session_configure = ipsec_mb_sym_session_configure,
2205 .sym_session_clear = ipsec_mb_sym_session_clear
2206};
2207
2208#ifdef AESNI_MB_DOCSIS_SEC_ENABLED
2209
2210
2211
2212
2213static int
2214aesni_mb_pmd_sec_sess_create(void *dev, struct rte_security_session_conf *conf,
2215 struct rte_security_session *sess)
2216{
2217 void *sess_private_data = SECURITY_GET_SESS_PRIV(sess);
2218 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2219 int ret;
2220
2221 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
2222 conf->protocol != RTE_SECURITY_PROTOCOL_DOCSIS) {
2223 IPSEC_MB_LOG(ERR, "Invalid security protocol");
2224 return -EINVAL;
2225 }
2226
2227 ret = aesni_mb_set_docsis_sec_session_parameters(cdev, conf,
2228 sess_private_data);
2229
2230 if (ret != 0) {
2231 IPSEC_MB_LOG(ERR, "Failed to configure session parameters");
2232 return ret;
2233 }
2234
2235 return ret;
2236}
2237
2238
2239static int
2240aesni_mb_pmd_sec_sess_destroy(void *dev __rte_unused,
2241 struct rte_security_session *sess)
2242{
2243 void *sess_priv = SECURITY_GET_SESS_PRIV(sess);
2244
2245 if (sess_priv) {
2246 memset(sess_priv, 0, sizeof(struct aesni_mb_session));
2247 }
2248 return 0;
2249}
2250
2251static unsigned int
2252aesni_mb_pmd_sec_sess_get_size(void *device __rte_unused)
2253{
2254 return sizeof(struct aesni_mb_session);
2255}
2256
2257
2258static const struct rte_security_capability *
2259aesni_mb_pmd_sec_capa_get(void *device __rte_unused)
2260{
2261 return aesni_mb_pmd_security_cap;
2262}
2263
2264static struct rte_security_ops aesni_mb_pmd_sec_ops = {
2265 .session_create = aesni_mb_pmd_sec_sess_create,
2266 .session_update = NULL,
2267 .session_get_size = aesni_mb_pmd_sec_sess_get_size,
2268 .session_stats_get = NULL,
2269 .session_destroy = aesni_mb_pmd_sec_sess_destroy,
2270 .set_pkt_metadata = NULL,
2271 .capabilities_get = aesni_mb_pmd_sec_capa_get
2272};
2273
2274struct rte_security_ops *rte_aesni_mb_pmd_sec_ops = &aesni_mb_pmd_sec_ops;
2275
2276static int
2277aesni_mb_configure_dev(struct rte_cryptodev *dev)
2278{
2279 struct rte_security_ctx *security_instance;
2280
2281 security_instance = rte_malloc("aesni_mb_sec",
2282 sizeof(struct rte_security_ctx),
2283 RTE_CACHE_LINE_SIZE);
2284 if (security_instance != NULL) {
2285 security_instance->device = (void *)dev;
2286 security_instance->ops = rte_aesni_mb_pmd_sec_ops;
2287 security_instance->sess_cnt = 0;
2288 dev->security_ctx = security_instance;
2289
2290 return 0;
2291 }
2292
2293 return -ENOMEM;
2294}
2295
2296#endif
2297
2298static int
2299aesni_mb_probe(struct rte_vdev_device *vdev)
2300{
2301 return ipsec_mb_create(vdev, IPSEC_MB_PMD_TYPE_AESNI_MB);
2302}
2303
2304static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = {
2305 .probe = aesni_mb_probe,
2306 .remove = ipsec_mb_remove
2307};
2308
2309static struct cryptodev_driver aesni_mb_crypto_drv;
2310
2311RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD,
2312 cryptodev_aesni_mb_pmd_drv);
2313RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd);
2314RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
2315 "max_nb_queue_pairs=<int> socket_id=<int>");
2316RTE_PMD_REGISTER_CRYPTO_DRIVER(
2317 aesni_mb_crypto_drv,
2318 cryptodev_aesni_mb_pmd_drv.driver,
2319 pmd_driver_id_aesni_mb);
2320
2321
2322RTE_INIT(ipsec_mb_register_aesni_mb)
2323{
2324 struct ipsec_mb_internals *aesni_mb_data =
2325 &ipsec_mb_pmds[IPSEC_MB_PMD_TYPE_AESNI_MB];
2326
2327 aesni_mb_data->caps = aesni_mb_capabilities;
2328 aesni_mb_data->dequeue_burst = aesni_mb_dequeue_burst;
2329 aesni_mb_data->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2330 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2331 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
2332 RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO |
2333 RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
2334 RTE_CRYPTODEV_FF_SYM_SESSIONLESS |
2335 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2336 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2337 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2338 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT;
2339
2340 aesni_mb_data->internals_priv_size = 0;
2341 aesni_mb_data->ops = &aesni_mb_pmd_ops;
2342 aesni_mb_data->qp_priv_size = sizeof(struct aesni_mb_qp_data);
2343 aesni_mb_data->queue_pair_configure = NULL;
2344#ifdef AESNI_MB_DOCSIS_SEC_ENABLED
2345 aesni_mb_data->security_ops = &aesni_mb_pmd_sec_ops;
2346 aesni_mb_data->dev_config = aesni_mb_configure_dev;
2347 aesni_mb_data->feature_flags |= RTE_CRYPTODEV_FF_SECURITY;
2348#endif
2349 aesni_mb_data->session_configure = aesni_mb_session_configure;
2350 aesni_mb_data->session_priv_size = sizeof(struct aesni_mb_session);
2351}
2352