1
2
3
4
5
6
7
8
9#include "compat.h"
10#include "ctrl.h"
11#include "regs.h"
12#include "intern.h"
13#include "desc_constr.h"
14#include "error.h"
15#include "sg_sw_qm.h"
16#include "key_gen.h"
17#include "qi.h"
18#include "jr.h"
19#include "caamalg_desc.h"
20
21
22
23
24#define CAAM_CRA_PRIORITY 2000
25
26#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
27 SHA512_DIGEST_SIZE * 2)
28
29#define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \
30 CAAM_MAX_KEY_SIZE)
31#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
32
33struct caam_alg_entry {
34 int class1_alg_type;
35 int class2_alg_type;
36 bool rfc3686;
37 bool geniv;
38};
39
40struct caam_aead_alg {
41 struct aead_alg aead;
42 struct caam_alg_entry caam;
43 bool registered;
44};
45
46
47
48
49struct caam_ctx {
50 struct device *jrdev;
51 u32 sh_desc_enc[DESC_MAX_USED_LEN];
52 u32 sh_desc_dec[DESC_MAX_USED_LEN];
53 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
54 u8 key[CAAM_MAX_KEY_SIZE];
55 dma_addr_t key_dma;
56 enum dma_data_direction dir;
57 struct alginfo adata;
58 struct alginfo cdata;
59 unsigned int authsize;
60 struct device *qidev;
61 spinlock_t lock;
62 struct caam_drv_ctx *drv_ctx[NUM_OP];
63};
64
65static int aead_set_sh_desc(struct crypto_aead *aead)
66{
67 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
68 typeof(*alg), aead);
69 struct caam_ctx *ctx = crypto_aead_ctx(aead);
70 unsigned int ivsize = crypto_aead_ivsize(aead);
71 u32 ctx1_iv_off = 0;
72 u32 *nonce = NULL;
73 unsigned int data_len[2];
74 u32 inl_mask;
75 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
76 OP_ALG_AAI_CTR_MOD128);
77 const bool is_rfc3686 = alg->caam.rfc3686;
78 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
79
80 if (!ctx->cdata.keylen || !ctx->authsize)
81 return 0;
82
83
84
85
86
87
88 if (ctr_mode)
89 ctx1_iv_off = 16;
90
91
92
93
94
95 if (is_rfc3686) {
96 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
97 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
98 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
99 }
100
101 data_len[0] = ctx->adata.keylen_pad;
102 data_len[1] = ctx->cdata.keylen;
103
104 if (alg->caam.geniv)
105 goto skip_enc;
106
107
108 if (desc_inline_query(DESC_QI_AEAD_ENC_LEN +
109 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
110 DESC_JOB_IO_LEN, data_len, &inl_mask,
111 ARRAY_SIZE(data_len)) < 0)
112 return -EINVAL;
113
114 if (inl_mask & 1)
115 ctx->adata.key_virt = ctx->key;
116 else
117 ctx->adata.key_dma = ctx->key_dma;
118
119 if (inl_mask & 2)
120 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
121 else
122 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
123
124 ctx->adata.key_inline = !!(inl_mask & 1);
125 ctx->cdata.key_inline = !!(inl_mask & 2);
126
127 cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
128 ivsize, ctx->authsize, is_rfc3686, nonce,
129 ctx1_iv_off, true, ctrlpriv->era);
130
131skip_enc:
132
133 if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
134 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
135 DESC_JOB_IO_LEN, data_len, &inl_mask,
136 ARRAY_SIZE(data_len)) < 0)
137 return -EINVAL;
138
139 if (inl_mask & 1)
140 ctx->adata.key_virt = ctx->key;
141 else
142 ctx->adata.key_dma = ctx->key_dma;
143
144 if (inl_mask & 2)
145 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
146 else
147 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
148
149 ctx->adata.key_inline = !!(inl_mask & 1);
150 ctx->cdata.key_inline = !!(inl_mask & 2);
151
152 cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
153 ivsize, ctx->authsize, alg->caam.geniv,
154 is_rfc3686, nonce, ctx1_iv_off, true,
155 ctrlpriv->era);
156
157 if (!alg->caam.geniv)
158 goto skip_givenc;
159
160
161 if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN +
162 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
163 DESC_JOB_IO_LEN, data_len, &inl_mask,
164 ARRAY_SIZE(data_len)) < 0)
165 return -EINVAL;
166
167 if (inl_mask & 1)
168 ctx->adata.key_virt = ctx->key;
169 else
170 ctx->adata.key_dma = ctx->key_dma;
171
172 if (inl_mask & 2)
173 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
174 else
175 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
176
177 ctx->adata.key_inline = !!(inl_mask & 1);
178 ctx->cdata.key_inline = !!(inl_mask & 2);
179
180 cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
181 ivsize, ctx->authsize, is_rfc3686, nonce,
182 ctx1_iv_off, true, ctrlpriv->era);
183
184skip_givenc:
185 return 0;
186}
187
188static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
189{
190 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
191
192 ctx->authsize = authsize;
193 aead_set_sh_desc(authenc);
194
195 return 0;
196}
197
198static int aead_setkey(struct crypto_aead *aead, const u8 *key,
199 unsigned int keylen)
200{
201 struct caam_ctx *ctx = crypto_aead_ctx(aead);
202 struct device *jrdev = ctx->jrdev;
203 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
204 struct crypto_authenc_keys keys;
205 int ret = 0;
206
207 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
208 goto badkey;
209
210#ifdef DEBUG
211 dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
212 keys.authkeylen + keys.enckeylen, keys.enckeylen,
213 keys.authkeylen);
214 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
215 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
216#endif
217
218
219
220
221
222 if (ctrlpriv->era >= 6) {
223 ctx->adata.keylen = keys.authkeylen;
224 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
225 OP_ALG_ALGSEL_MASK);
226
227 if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
228 goto badkey;
229
230 memcpy(ctx->key, keys.authkey, keys.authkeylen);
231 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
232 keys.enckeylen);
233 dma_sync_single_for_device(jrdev, ctx->key_dma,
234 ctx->adata.keylen_pad +
235 keys.enckeylen, ctx->dir);
236 goto skip_split_key;
237 }
238
239 ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
240 keys.authkeylen, CAAM_MAX_KEY_SIZE -
241 keys.enckeylen);
242 if (ret)
243 goto badkey;
244
245
246 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
247 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
248 keys.enckeylen, ctx->dir);
249#ifdef DEBUG
250 print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
251 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
252 ctx->adata.keylen_pad + keys.enckeylen, 1);
253#endif
254
255skip_split_key:
256 ctx->cdata.keylen = keys.enckeylen;
257
258 ret = aead_set_sh_desc(aead);
259 if (ret)
260 goto badkey;
261
262
263 if (ctx->drv_ctx[ENCRYPT]) {
264 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
265 ctx->sh_desc_enc);
266 if (ret) {
267 dev_err(jrdev, "driver enc context update failed\n");
268 goto badkey;
269 }
270 }
271
272 if (ctx->drv_ctx[DECRYPT]) {
273 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
274 ctx->sh_desc_dec);
275 if (ret) {
276 dev_err(jrdev, "driver dec context update failed\n");
277 goto badkey;
278 }
279 }
280
281 memzero_explicit(&keys, sizeof(keys));
282 return ret;
283badkey:
284 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
285 memzero_explicit(&keys, sizeof(keys));
286 return -EINVAL;
287}
288
289static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
290 unsigned int keylen)
291{
292 struct crypto_authenc_keys keys;
293 u32 flags;
294 int err;
295
296 err = crypto_authenc_extractkeys(&keys, key, keylen);
297 if (unlikely(err))
298 goto badkey;
299
300 err = -EINVAL;
301 if (keys.enckeylen != DES3_EDE_KEY_SIZE)
302 goto badkey;
303
304 flags = crypto_aead_get_flags(aead);
305 err = __des3_verify_key(&flags, keys.enckey);
306 if (unlikely(err)) {
307 crypto_aead_set_flags(aead, flags);
308 goto out;
309 }
310
311 err = aead_setkey(aead, key, keylen);
312
313out:
314 memzero_explicit(&keys, sizeof(keys));
315 return err;
316
317badkey:
318 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
319 goto out;
320}
321
322static int gcm_set_sh_desc(struct crypto_aead *aead)
323{
324 struct caam_ctx *ctx = crypto_aead_ctx(aead);
325 unsigned int ivsize = crypto_aead_ivsize(aead);
326 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
327 ctx->cdata.keylen;
328
329 if (!ctx->cdata.keylen || !ctx->authsize)
330 return 0;
331
332
333
334
335
336 if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
337 ctx->cdata.key_inline = true;
338 ctx->cdata.key_virt = ctx->key;
339 } else {
340 ctx->cdata.key_inline = false;
341 ctx->cdata.key_dma = ctx->key_dma;
342 }
343
344 cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
345 ctx->authsize, true);
346
347
348
349
350
351 if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
352 ctx->cdata.key_inline = true;
353 ctx->cdata.key_virt = ctx->key;
354 } else {
355 ctx->cdata.key_inline = false;
356 ctx->cdata.key_dma = ctx->key_dma;
357 }
358
359 cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
360 ctx->authsize, true);
361
362 return 0;
363}
364
365static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
366{
367 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
368
369 ctx->authsize = authsize;
370 gcm_set_sh_desc(authenc);
371
372 return 0;
373}
374
375static int gcm_setkey(struct crypto_aead *aead,
376 const u8 *key, unsigned int keylen)
377{
378 struct caam_ctx *ctx = crypto_aead_ctx(aead);
379 struct device *jrdev = ctx->jrdev;
380 int ret;
381
382#ifdef DEBUG
383 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
384 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
385#endif
386
387 memcpy(ctx->key, key, keylen);
388 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
389 ctx->cdata.keylen = keylen;
390
391 ret = gcm_set_sh_desc(aead);
392 if (ret)
393 return ret;
394
395
396 if (ctx->drv_ctx[ENCRYPT]) {
397 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
398 ctx->sh_desc_enc);
399 if (ret) {
400 dev_err(jrdev, "driver enc context update failed\n");
401 return ret;
402 }
403 }
404
405 if (ctx->drv_ctx[DECRYPT]) {
406 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
407 ctx->sh_desc_dec);
408 if (ret) {
409 dev_err(jrdev, "driver dec context update failed\n");
410 return ret;
411 }
412 }
413
414 return 0;
415}
416
417static int rfc4106_set_sh_desc(struct crypto_aead *aead)
418{
419 struct caam_ctx *ctx = crypto_aead_ctx(aead);
420 unsigned int ivsize = crypto_aead_ivsize(aead);
421 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
422 ctx->cdata.keylen;
423
424 if (!ctx->cdata.keylen || !ctx->authsize)
425 return 0;
426
427 ctx->cdata.key_virt = ctx->key;
428
429
430
431
432
433 if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
434 ctx->cdata.key_inline = true;
435 } else {
436 ctx->cdata.key_inline = false;
437 ctx->cdata.key_dma = ctx->key_dma;
438 }
439
440 cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
441 ctx->authsize, true);
442
443
444
445
446
447 if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
448 ctx->cdata.key_inline = true;
449 } else {
450 ctx->cdata.key_inline = false;
451 ctx->cdata.key_dma = ctx->key_dma;
452 }
453
454 cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
455 ctx->authsize, true);
456
457 return 0;
458}
459
460static int rfc4106_setauthsize(struct crypto_aead *authenc,
461 unsigned int authsize)
462{
463 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
464
465 ctx->authsize = authsize;
466 rfc4106_set_sh_desc(authenc);
467
468 return 0;
469}
470
471static int rfc4106_setkey(struct crypto_aead *aead,
472 const u8 *key, unsigned int keylen)
473{
474 struct caam_ctx *ctx = crypto_aead_ctx(aead);
475 struct device *jrdev = ctx->jrdev;
476 int ret;
477
478 if (keylen < 4)
479 return -EINVAL;
480
481#ifdef DEBUG
482 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
483 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
484#endif
485
486 memcpy(ctx->key, key, keylen);
487
488
489
490
491 ctx->cdata.keylen = keylen - 4;
492 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
493 ctx->dir);
494
495 ret = rfc4106_set_sh_desc(aead);
496 if (ret)
497 return ret;
498
499
500 if (ctx->drv_ctx[ENCRYPT]) {
501 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
502 ctx->sh_desc_enc);
503 if (ret) {
504 dev_err(jrdev, "driver enc context update failed\n");
505 return ret;
506 }
507 }
508
509 if (ctx->drv_ctx[DECRYPT]) {
510 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
511 ctx->sh_desc_dec);
512 if (ret) {
513 dev_err(jrdev, "driver dec context update failed\n");
514 return ret;
515 }
516 }
517
518 return 0;
519}
520
521static int rfc4543_set_sh_desc(struct crypto_aead *aead)
522{
523 struct caam_ctx *ctx = crypto_aead_ctx(aead);
524 unsigned int ivsize = crypto_aead_ivsize(aead);
525 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
526 ctx->cdata.keylen;
527
528 if (!ctx->cdata.keylen || !ctx->authsize)
529 return 0;
530
531 ctx->cdata.key_virt = ctx->key;
532
533
534
535
536
537 if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
538 ctx->cdata.key_inline = true;
539 } else {
540 ctx->cdata.key_inline = false;
541 ctx->cdata.key_dma = ctx->key_dma;
542 }
543
544 cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
545 ctx->authsize, true);
546
547
548
549
550
551 if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
552 ctx->cdata.key_inline = true;
553 } else {
554 ctx->cdata.key_inline = false;
555 ctx->cdata.key_dma = ctx->key_dma;
556 }
557
558 cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
559 ctx->authsize, true);
560
561 return 0;
562}
563
564static int rfc4543_setauthsize(struct crypto_aead *authenc,
565 unsigned int authsize)
566{
567 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
568
569 ctx->authsize = authsize;
570 rfc4543_set_sh_desc(authenc);
571
572 return 0;
573}
574
575static int rfc4543_setkey(struct crypto_aead *aead,
576 const u8 *key, unsigned int keylen)
577{
578 struct caam_ctx *ctx = crypto_aead_ctx(aead);
579 struct device *jrdev = ctx->jrdev;
580 int ret;
581
582 if (keylen < 4)
583 return -EINVAL;
584
585#ifdef DEBUG
586 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
587 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
588#endif
589
590 memcpy(ctx->key, key, keylen);
591
592
593
594
595 ctx->cdata.keylen = keylen - 4;
596 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
597 ctx->dir);
598
599 ret = rfc4543_set_sh_desc(aead);
600 if (ret)
601 return ret;
602
603
604 if (ctx->drv_ctx[ENCRYPT]) {
605 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
606 ctx->sh_desc_enc);
607 if (ret) {
608 dev_err(jrdev, "driver enc context update failed\n");
609 return ret;
610 }
611 }
612
613 if (ctx->drv_ctx[DECRYPT]) {
614 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
615 ctx->sh_desc_dec);
616 if (ret) {
617 dev_err(jrdev, "driver dec context update failed\n");
618 return ret;
619 }
620 }
621
622 return 0;
623}
624
625static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
626 const u8 *key, unsigned int keylen)
627{
628 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
629 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
630 const char *alg_name = crypto_tfm_alg_name(tfm);
631 struct device *jrdev = ctx->jrdev;
632 unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
633 u32 ctx1_iv_off = 0;
634 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
635 OP_ALG_AAI_CTR_MOD128);
636 const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
637 int ret = 0;
638
639#ifdef DEBUG
640 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
641 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
642#endif
643
644
645
646
647
648 if (ctr_mode)
649 ctx1_iv_off = 16;
650
651
652
653
654
655
656 if (is_rfc3686) {
657 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
658 keylen -= CTR_RFC3686_NONCE_SIZE;
659 }
660
661 ctx->cdata.keylen = keylen;
662 ctx->cdata.key_virt = key;
663 ctx->cdata.key_inline = true;
664
665
666 cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
667 is_rfc3686, ctx1_iv_off);
668 cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
669 is_rfc3686, ctx1_iv_off);
670 cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
671 ivsize, is_rfc3686, ctx1_iv_off);
672
673
674 if (ctx->drv_ctx[ENCRYPT]) {
675 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
676 ctx->sh_desc_enc);
677 if (ret) {
678 dev_err(jrdev, "driver enc context update failed\n");
679 goto badkey;
680 }
681 }
682
683 if (ctx->drv_ctx[DECRYPT]) {
684 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
685 ctx->sh_desc_dec);
686 if (ret) {
687 dev_err(jrdev, "driver dec context update failed\n");
688 goto badkey;
689 }
690 }
691
692 if (ctx->drv_ctx[GIVENCRYPT]) {
693 ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
694 ctx->sh_desc_givenc);
695 if (ret) {
696 dev_err(jrdev, "driver givenc context update failed\n");
697 goto badkey;
698 }
699 }
700
701 return ret;
702badkey:
703 crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
704 return -EINVAL;
705}
706
707static int des3_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
708 const u8 *key, unsigned int keylen)
709{
710 return unlikely(des3_verify_key(skcipher, key)) ?:
711 ablkcipher_setkey(ablkcipher, key, keylen);
712}
713
714static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
715 const u8 *key, unsigned int keylen)
716{
717 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
718 struct device *jrdev = ctx->jrdev;
719 int ret = 0;
720
721 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
722 crypto_ablkcipher_set_flags(ablkcipher,
723 CRYPTO_TFM_RES_BAD_KEY_LEN);
724 dev_err(jrdev, "key size mismatch\n");
725 return -EINVAL;
726 }
727
728 ctx->cdata.keylen = keylen;
729 ctx->cdata.key_virt = key;
730 ctx->cdata.key_inline = true;
731
732
733 cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
734 cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
735
736
737 if (ctx->drv_ctx[ENCRYPT]) {
738 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
739 ctx->sh_desc_enc);
740 if (ret) {
741 dev_err(jrdev, "driver enc context update failed\n");
742 goto badkey;
743 }
744 }
745
746 if (ctx->drv_ctx[DECRYPT]) {
747 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
748 ctx->sh_desc_dec);
749 if (ret) {
750 dev_err(jrdev, "driver dec context update failed\n");
751 goto badkey;
752 }
753 }
754
755 return ret;
756badkey:
757 crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
758 return 0;
759}
760
761
762
763
764
765
766
767
768
769
770
771
772
773struct aead_edesc {
774 int src_nents;
775 int dst_nents;
776 dma_addr_t iv_dma;
777 int qm_sg_bytes;
778 dma_addr_t qm_sg_dma;
779 unsigned int assoclen;
780 dma_addr_t assoclen_dma;
781 struct caam_drv_req drv_req;
782 struct qm_sg_entry sgt[0];
783};
784
785
786
787
788
789
790
791
792
793
794
795struct ablkcipher_edesc {
796 int src_nents;
797 int dst_nents;
798 dma_addr_t iv_dma;
799 int qm_sg_bytes;
800 dma_addr_t qm_sg_dma;
801 struct caam_drv_req drv_req;
802 struct qm_sg_entry sgt[0];
803};
804
805static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
806 enum optype type)
807{
808
809
810
811
812
813 struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
814 u32 *desc;
815
816 if (unlikely(!drv_ctx)) {
817 spin_lock(&ctx->lock);
818
819
820 drv_ctx = ctx->drv_ctx[type];
821 if (!drv_ctx) {
822 int cpu;
823
824 if (type == ENCRYPT)
825 desc = ctx->sh_desc_enc;
826 else if (type == DECRYPT)
827 desc = ctx->sh_desc_dec;
828 else
829 desc = ctx->sh_desc_givenc;
830
831 cpu = smp_processor_id();
832 drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
833 if (likely(!IS_ERR_OR_NULL(drv_ctx)))
834 drv_ctx->op_type = type;
835
836 ctx->drv_ctx[type] = drv_ctx;
837 }
838
839 spin_unlock(&ctx->lock);
840 }
841
842 return drv_ctx;
843}
844
845static void caam_unmap(struct device *dev, struct scatterlist *src,
846 struct scatterlist *dst, int src_nents,
847 int dst_nents, dma_addr_t iv_dma, int ivsize,
848 enum optype op_type, dma_addr_t qm_sg_dma,
849 int qm_sg_bytes)
850{
851 if (dst != src) {
852 if (src_nents)
853 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
854 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
855 } else {
856 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
857 }
858
859 if (iv_dma)
860 dma_unmap_single(dev, iv_dma, ivsize,
861 op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
862 DMA_TO_DEVICE);
863 if (qm_sg_bytes)
864 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
865}
866
867static void aead_unmap(struct device *dev,
868 struct aead_edesc *edesc,
869 struct aead_request *req)
870{
871 struct crypto_aead *aead = crypto_aead_reqtfm(req);
872 int ivsize = crypto_aead_ivsize(aead);
873
874 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
875 edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
876 edesc->qm_sg_dma, edesc->qm_sg_bytes);
877 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
878}
879
880static void ablkcipher_unmap(struct device *dev,
881 struct ablkcipher_edesc *edesc,
882 struct ablkcipher_request *req)
883{
884 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
885 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
886
887 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
888 edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
889 edesc->qm_sg_dma, edesc->qm_sg_bytes);
890}
891
892static void aead_done(struct caam_drv_req *drv_req, u32 status)
893{
894 struct device *qidev;
895 struct aead_edesc *edesc;
896 struct aead_request *aead_req = drv_req->app_ctx;
897 struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
898 struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
899 int ecode = 0;
900
901 qidev = caam_ctx->qidev;
902
903 if (unlikely(status)) {
904 u32 ssrc = status & JRSTA_SSRC_MASK;
905 u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
906
907 caam_jr_strstatus(qidev, status);
908
909
910
911 if (ssrc == JRSTA_SSRC_CCB_ERROR &&
912 err_id == JRSTA_CCBERR_ERRID_ICVCHK)
913 ecode = -EBADMSG;
914 else
915 ecode = -EIO;
916 }
917
918 edesc = container_of(drv_req, typeof(*edesc), drv_req);
919 aead_unmap(qidev, edesc, aead_req);
920
921 aead_request_complete(aead_req, ecode);
922 qi_cache_free(edesc);
923}
924
925
926
927
928static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
929 bool encrypt)
930{
931 struct crypto_aead *aead = crypto_aead_reqtfm(req);
932 struct caam_ctx *ctx = crypto_aead_ctx(aead);
933 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
934 typeof(*alg), aead);
935 struct device *qidev = ctx->qidev;
936 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
937 GFP_KERNEL : GFP_ATOMIC;
938 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
939 struct aead_edesc *edesc;
940 dma_addr_t qm_sg_dma, iv_dma = 0;
941 int ivsize = 0;
942 unsigned int authsize = ctx->authsize;
943 int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
944 int in_len, out_len;
945 struct qm_sg_entry *sg_table, *fd_sgt;
946 struct caam_drv_ctx *drv_ctx;
947 enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
948
949 drv_ctx = get_drv_ctx(ctx, op_type);
950 if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
951 return (struct aead_edesc *)drv_ctx;
952
953
954 edesc = qi_cache_alloc(GFP_DMA | flags);
955 if (unlikely(!edesc)) {
956 dev_err(qidev, "could not allocate extended descriptor\n");
957 return ERR_PTR(-ENOMEM);
958 }
959
960 if (likely(req->src == req->dst)) {
961 src_nents = sg_nents_for_len(req->src, req->assoclen +
962 req->cryptlen +
963 (encrypt ? authsize : 0));
964 if (unlikely(src_nents < 0)) {
965 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
966 req->assoclen + req->cryptlen +
967 (encrypt ? authsize : 0));
968 qi_cache_free(edesc);
969 return ERR_PTR(src_nents);
970 }
971
972 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
973 DMA_BIDIRECTIONAL);
974 if (unlikely(!mapped_src_nents)) {
975 dev_err(qidev, "unable to map source\n");
976 qi_cache_free(edesc);
977 return ERR_PTR(-ENOMEM);
978 }
979 } else {
980 src_nents = sg_nents_for_len(req->src, req->assoclen +
981 req->cryptlen);
982 if (unlikely(src_nents < 0)) {
983 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
984 req->assoclen + req->cryptlen);
985 qi_cache_free(edesc);
986 return ERR_PTR(src_nents);
987 }
988
989 dst_nents = sg_nents_for_len(req->dst, req->assoclen +
990 req->cryptlen +
991 (encrypt ? authsize :
992 (-authsize)));
993 if (unlikely(dst_nents < 0)) {
994 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
995 req->assoclen + req->cryptlen +
996 (encrypt ? authsize : (-authsize)));
997 qi_cache_free(edesc);
998 return ERR_PTR(dst_nents);
999 }
1000
1001 if (src_nents) {
1002 mapped_src_nents = dma_map_sg(qidev, req->src,
1003 src_nents, DMA_TO_DEVICE);
1004 if (unlikely(!mapped_src_nents)) {
1005 dev_err(qidev, "unable to map source\n");
1006 qi_cache_free(edesc);
1007 return ERR_PTR(-ENOMEM);
1008 }
1009 } else {
1010 mapped_src_nents = 0;
1011 }
1012
1013 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
1014 DMA_FROM_DEVICE);
1015 if (unlikely(!mapped_dst_nents)) {
1016 dev_err(qidev, "unable to map destination\n");
1017 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
1018 qi_cache_free(edesc);
1019 return ERR_PTR(-ENOMEM);
1020 }
1021 }
1022
1023 if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
1024 ivsize = crypto_aead_ivsize(aead);
1025
1026
1027
1028
1029
1030 qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
1031 (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
1032 sg_table = &edesc->sgt[0];
1033 qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
1034 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
1035 CAAM_QI_MEMCACHE_SIZE)) {
1036 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
1037 qm_sg_ents, ivsize);
1038 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1039 0, 0, 0, 0);
1040 qi_cache_free(edesc);
1041 return ERR_PTR(-ENOMEM);
1042 }
1043
1044 if (ivsize) {
1045 u8 *iv = (u8 *)(sg_table + qm_sg_ents);
1046
1047
1048 memcpy(iv, req->iv, ivsize);
1049
1050 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
1051 if (dma_mapping_error(qidev, iv_dma)) {
1052 dev_err(qidev, "unable to map IV\n");
1053 caam_unmap(qidev, req->src, req->dst, src_nents,
1054 dst_nents, 0, 0, 0, 0, 0);
1055 qi_cache_free(edesc);
1056 return ERR_PTR(-ENOMEM);
1057 }
1058 }
1059
1060 edesc->src_nents = src_nents;
1061 edesc->dst_nents = dst_nents;
1062 edesc->iv_dma = iv_dma;
1063 edesc->drv_req.app_ctx = req;
1064 edesc->drv_req.cbk = aead_done;
1065 edesc->drv_req.drv_ctx = drv_ctx;
1066
1067 edesc->assoclen = cpu_to_caam32(req->assoclen);
1068 edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
1069 DMA_TO_DEVICE);
1070 if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
1071 dev_err(qidev, "unable to map assoclen\n");
1072 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1073 iv_dma, ivsize, op_type, 0, 0);
1074 qi_cache_free(edesc);
1075 return ERR_PTR(-ENOMEM);
1076 }
1077
1078 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
1079 qm_sg_index++;
1080 if (ivsize) {
1081 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
1082 qm_sg_index++;
1083 }
1084 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
1085 qm_sg_index += mapped_src_nents;
1086
1087 if (mapped_dst_nents > 1)
1088 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1089 qm_sg_index, 0);
1090
1091 qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
1092 if (dma_mapping_error(qidev, qm_sg_dma)) {
1093 dev_err(qidev, "unable to map S/G table\n");
1094 dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1095 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1096 iv_dma, ivsize, op_type, 0, 0);
1097 qi_cache_free(edesc);
1098 return ERR_PTR(-ENOMEM);
1099 }
1100
1101 edesc->qm_sg_dma = qm_sg_dma;
1102 edesc->qm_sg_bytes = qm_sg_bytes;
1103
1104 out_len = req->assoclen + req->cryptlen +
1105 (encrypt ? ctx->authsize : (-ctx->authsize));
1106 in_len = 4 + ivsize + req->assoclen + req->cryptlen;
1107
1108 fd_sgt = &edesc->drv_req.fd_sgt[0];
1109 dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
1110
1111 if (req->dst == req->src) {
1112 if (mapped_src_nents == 1)
1113 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
1114 out_len, 0);
1115 else
1116 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
1117 (1 + !!ivsize) * sizeof(*sg_table),
1118 out_len, 0);
1119 } else if (mapped_dst_nents == 1) {
1120 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
1121 0);
1122 } else {
1123 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
1124 qm_sg_index, out_len, 0);
1125 }
1126
1127 return edesc;
1128}
1129
1130static inline int aead_crypt(struct aead_request *req, bool encrypt)
1131{
1132 struct aead_edesc *edesc;
1133 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1134 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1135 int ret;
1136
1137 if (unlikely(caam_congested))
1138 return -EAGAIN;
1139
1140
1141 edesc = aead_edesc_alloc(req, encrypt);
1142 if (IS_ERR_OR_NULL(edesc))
1143 return PTR_ERR(edesc);
1144
1145
1146 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1147 if (!ret) {
1148 ret = -EINPROGRESS;
1149 } else {
1150 aead_unmap(ctx->qidev, edesc, req);
1151 qi_cache_free(edesc);
1152 }
1153
1154 return ret;
1155}
1156
1157static int aead_encrypt(struct aead_request *req)
1158{
1159 return aead_crypt(req, true);
1160}
1161
1162static int aead_decrypt(struct aead_request *req)
1163{
1164 return aead_crypt(req, false);
1165}
1166
1167static int ipsec_gcm_encrypt(struct aead_request *req)
1168{
1169 if (req->assoclen < 8)
1170 return -EINVAL;
1171
1172 return aead_crypt(req, true);
1173}
1174
1175static int ipsec_gcm_decrypt(struct aead_request *req)
1176{
1177 if (req->assoclen < 8)
1178 return -EINVAL;
1179
1180 return aead_crypt(req, false);
1181}
1182
1183static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
1184{
1185 struct ablkcipher_edesc *edesc;
1186 struct ablkcipher_request *req = drv_req->app_ctx;
1187 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1188 struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
1189 struct device *qidev = caam_ctx->qidev;
1190 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1191
1192#ifdef DEBUG
1193 dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
1194#endif
1195
1196 edesc = container_of(drv_req, typeof(*edesc), drv_req);
1197
1198 if (status)
1199 caam_jr_strstatus(qidev, status);
1200
1201#ifdef DEBUG
1202 print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
1203 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1204 edesc->src_nents > 1 ? 100 : ivsize, 1);
1205 caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
1206 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1207 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1208#endif
1209
1210 ablkcipher_unmap(qidev, edesc, req);
1211
1212
1213 if (edesc->drv_req.drv_ctx->op_type == GIVENCRYPT) {
1214 u8 *iv;
1215 struct skcipher_givcrypt_request *greq;
1216
1217 greq = container_of(req, struct skcipher_givcrypt_request,
1218 creq);
1219 iv = (u8 *)edesc->sgt + edesc->qm_sg_bytes;
1220 memcpy(greq->giv, iv, ivsize);
1221 }
1222
1223
1224
1225
1226
1227 if (edesc->drv_req.drv_ctx->op_type != DECRYPT)
1228 scatterwalk_map_and_copy(req->info, req->dst, req->nbytes -
1229 ivsize, ivsize, 0);
1230
1231 qi_cache_free(edesc);
1232 ablkcipher_request_complete(req, status);
1233}
1234
1235static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1236 *req, bool encrypt)
1237{
1238 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1239 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1240 struct device *qidev = ctx->qidev;
1241 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1242 GFP_KERNEL : GFP_ATOMIC;
1243 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1244 struct ablkcipher_edesc *edesc;
1245 dma_addr_t iv_dma;
1246 u8 *iv;
1247 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1248 int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1249 struct qm_sg_entry *sg_table, *fd_sgt;
1250 struct caam_drv_ctx *drv_ctx;
1251 enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
1252
1253 drv_ctx = get_drv_ctx(ctx, op_type);
1254 if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
1255 return (struct ablkcipher_edesc *)drv_ctx;
1256
1257 src_nents = sg_nents_for_len(req->src, req->nbytes);
1258 if (unlikely(src_nents < 0)) {
1259 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
1260 req->nbytes);
1261 return ERR_PTR(src_nents);
1262 }
1263
1264 if (unlikely(req->src != req->dst)) {
1265 dst_nents = sg_nents_for_len(req->dst, req->nbytes);
1266 if (unlikely(dst_nents < 0)) {
1267 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
1268 req->nbytes);
1269 return ERR_PTR(dst_nents);
1270 }
1271
1272 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1273 DMA_TO_DEVICE);
1274 if (unlikely(!mapped_src_nents)) {
1275 dev_err(qidev, "unable to map source\n");
1276 return ERR_PTR(-ENOMEM);
1277 }
1278
1279 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
1280 DMA_FROM_DEVICE);
1281 if (unlikely(!mapped_dst_nents)) {
1282 dev_err(qidev, "unable to map destination\n");
1283 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
1284 return ERR_PTR(-ENOMEM);
1285 }
1286 } else {
1287 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1288 DMA_BIDIRECTIONAL);
1289 if (unlikely(!mapped_src_nents)) {
1290 dev_err(qidev, "unable to map source\n");
1291 return ERR_PTR(-ENOMEM);
1292 }
1293 }
1294
1295 qm_sg_ents = 1 + mapped_src_nents;
1296 dst_sg_idx = qm_sg_ents;
1297
1298 qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
1299 qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
1300 if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
1301 ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1302 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
1303 qm_sg_ents, ivsize);
1304 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1305 0, 0, 0, 0);
1306 return ERR_PTR(-ENOMEM);
1307 }
1308
1309
1310 edesc = qi_cache_alloc(GFP_DMA | flags);
1311 if (unlikely(!edesc)) {
1312 dev_err(qidev, "could not allocate extended descriptor\n");
1313 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1314 0, 0, 0, 0);
1315 return ERR_PTR(-ENOMEM);
1316 }
1317
1318
1319 sg_table = &edesc->sgt[0];
1320 iv = (u8 *)(sg_table + qm_sg_ents);
1321 memcpy(iv, req->info, ivsize);
1322
1323 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
1324 if (dma_mapping_error(qidev, iv_dma)) {
1325 dev_err(qidev, "unable to map IV\n");
1326 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1327 0, 0, 0, 0);
1328 qi_cache_free(edesc);
1329 return ERR_PTR(-ENOMEM);
1330 }
1331
1332 edesc->src_nents = src_nents;
1333 edesc->dst_nents = dst_nents;
1334 edesc->iv_dma = iv_dma;
1335 edesc->qm_sg_bytes = qm_sg_bytes;
1336 edesc->drv_req.app_ctx = req;
1337 edesc->drv_req.cbk = ablkcipher_done;
1338 edesc->drv_req.drv_ctx = drv_ctx;
1339
1340 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1341 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
1342
1343 if (mapped_dst_nents > 1)
1344 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
1345 dst_sg_idx, 0);
1346
1347 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
1348 DMA_TO_DEVICE);
1349 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
1350 dev_err(qidev, "unable to map S/G table\n");
1351 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1352 iv_dma, ivsize, op_type, 0, 0);
1353 qi_cache_free(edesc);
1354 return ERR_PTR(-ENOMEM);
1355 }
1356
1357 fd_sgt = &edesc->drv_req.fd_sgt[0];
1358
1359 dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
1360 ivsize + req->nbytes, 0);
1361
1362 if (req->src == req->dst) {
1363 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
1364 sizeof(*sg_table), req->nbytes, 0);
1365 } else if (mapped_dst_nents > 1) {
1366 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1367 sizeof(*sg_table), req->nbytes, 0);
1368 } else {
1369 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
1370 req->nbytes, 0);
1371 }
1372
1373 return edesc;
1374}
1375
1376static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
1377 struct skcipher_givcrypt_request *creq)
1378{
1379 struct ablkcipher_request *req = &creq->creq;
1380 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1381 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1382 struct device *qidev = ctx->qidev;
1383 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1384 GFP_KERNEL : GFP_ATOMIC;
1385 int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
1386 struct ablkcipher_edesc *edesc;
1387 dma_addr_t iv_dma;
1388 u8 *iv;
1389 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1390 struct qm_sg_entry *sg_table, *fd_sgt;
1391 int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1392 struct caam_drv_ctx *drv_ctx;
1393
1394 drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
1395 if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
1396 return (struct ablkcipher_edesc *)drv_ctx;
1397
1398 src_nents = sg_nents_for_len(req->src, req->nbytes);
1399 if (unlikely(src_nents < 0)) {
1400 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
1401 req->nbytes);
1402 return ERR_PTR(src_nents);
1403 }
1404
1405 if (unlikely(req->src != req->dst)) {
1406 dst_nents = sg_nents_for_len(req->dst, req->nbytes);
1407 if (unlikely(dst_nents < 0)) {
1408 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
1409 req->nbytes);
1410 return ERR_PTR(dst_nents);
1411 }
1412
1413 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1414 DMA_TO_DEVICE);
1415 if (unlikely(!mapped_src_nents)) {
1416 dev_err(qidev, "unable to map source\n");
1417 return ERR_PTR(-ENOMEM);
1418 }
1419
1420 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
1421 DMA_FROM_DEVICE);
1422 if (unlikely(!mapped_dst_nents)) {
1423 dev_err(qidev, "unable to map destination\n");
1424 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
1425 return ERR_PTR(-ENOMEM);
1426 }
1427 } else {
1428 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
1429 DMA_BIDIRECTIONAL);
1430 if (unlikely(!mapped_src_nents)) {
1431 dev_err(qidev, "unable to map source\n");
1432 return ERR_PTR(-ENOMEM);
1433 }
1434
1435 dst_nents = src_nents;
1436 mapped_dst_nents = src_nents;
1437 }
1438
1439 qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
1440 dst_sg_idx = qm_sg_ents;
1441
1442 qm_sg_ents += 1 + mapped_dst_nents;
1443 qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
1444 if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
1445 ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1446 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
1447 qm_sg_ents, ivsize);
1448 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1449 0, 0, 0, 0);
1450 return ERR_PTR(-ENOMEM);
1451 }
1452
1453
1454 edesc = qi_cache_alloc(GFP_DMA | flags);
1455 if (!edesc) {
1456 dev_err(qidev, "could not allocate extended descriptor\n");
1457 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1458 0, 0, 0, 0);
1459 return ERR_PTR(-ENOMEM);
1460 }
1461
1462
1463 sg_table = &edesc->sgt[0];
1464 iv = (u8 *)(sg_table + qm_sg_ents);
1465 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_FROM_DEVICE);
1466 if (dma_mapping_error(qidev, iv_dma)) {
1467 dev_err(qidev, "unable to map IV\n");
1468 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
1469 0, 0, 0, 0);
1470 qi_cache_free(edesc);
1471 return ERR_PTR(-ENOMEM);
1472 }
1473
1474 edesc->src_nents = src_nents;
1475 edesc->dst_nents = dst_nents;
1476 edesc->iv_dma = iv_dma;
1477 edesc->qm_sg_bytes = qm_sg_bytes;
1478 edesc->drv_req.app_ctx = req;
1479 edesc->drv_req.cbk = ablkcipher_done;
1480 edesc->drv_req.drv_ctx = drv_ctx;
1481
1482 if (mapped_src_nents > 1)
1483 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
1484
1485 dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
1486 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + dst_sg_idx + 1,
1487 0);
1488
1489 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
1490 DMA_TO_DEVICE);
1491 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
1492 dev_err(qidev, "unable to map S/G table\n");
1493 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
1494 iv_dma, ivsize, GIVENCRYPT, 0, 0);
1495 qi_cache_free(edesc);
1496 return ERR_PTR(-ENOMEM);
1497 }
1498
1499 fd_sgt = &edesc->drv_req.fd_sgt[0];
1500
1501 if (mapped_src_nents > 1)
1502 dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
1503 0);
1504 else
1505 dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
1506 req->nbytes, 0);
1507
1508 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
1509 sizeof(*sg_table), ivsize + req->nbytes, 0);
1510
1511 return edesc;
1512}
1513
1514static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
1515{
1516 struct ablkcipher_edesc *edesc;
1517 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1518 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1519 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1520 int ret;
1521
1522 if (unlikely(caam_congested))
1523 return -EAGAIN;
1524
1525
1526 edesc = ablkcipher_edesc_alloc(req, encrypt);
1527 if (IS_ERR(edesc))
1528 return PTR_ERR(edesc);
1529
1530
1531
1532
1533
1534 if (!encrypt)
1535 scatterwalk_map_and_copy(req->info, req->src, req->nbytes -
1536 ivsize, ivsize, 0);
1537
1538 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1539 if (!ret) {
1540 ret = -EINPROGRESS;
1541 } else {
1542 ablkcipher_unmap(ctx->qidev, edesc, req);
1543 qi_cache_free(edesc);
1544 }
1545
1546 return ret;
1547}
1548
1549static int ablkcipher_encrypt(struct ablkcipher_request *req)
1550{
1551 return ablkcipher_crypt(req, true);
1552}
1553
1554static int ablkcipher_decrypt(struct ablkcipher_request *req)
1555{
1556 return ablkcipher_crypt(req, false);
1557}
1558
1559static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
1560{
1561 struct ablkcipher_request *req = &creq->creq;
1562 struct ablkcipher_edesc *edesc;
1563 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1564 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1565 int ret;
1566
1567 if (unlikely(caam_congested))
1568 return -EAGAIN;
1569
1570
1571 edesc = ablkcipher_giv_edesc_alloc(creq);
1572 if (IS_ERR(edesc))
1573 return PTR_ERR(edesc);
1574
1575 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
1576 if (!ret) {
1577 ret = -EINPROGRESS;
1578 } else {
1579 ablkcipher_unmap(ctx->qidev, edesc, req);
1580 qi_cache_free(edesc);
1581 }
1582
1583 return ret;
1584}
1585
1586#define template_ablkcipher template_u.ablkcipher
1587struct caam_alg_template {
1588 char name[CRYPTO_MAX_ALG_NAME];
1589 char driver_name[CRYPTO_MAX_ALG_NAME];
1590 unsigned int blocksize;
1591 u32 type;
1592 union {
1593 struct ablkcipher_alg ablkcipher;
1594 } template_u;
1595 u32 class1_alg_type;
1596 u32 class2_alg_type;
1597};
1598
1599static struct caam_alg_template driver_algs[] = {
1600
1601 {
1602 .name = "cbc(aes)",
1603 .driver_name = "cbc-aes-caam-qi",
1604 .blocksize = AES_BLOCK_SIZE,
1605 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1606 .template_ablkcipher = {
1607 .setkey = ablkcipher_setkey,
1608 .encrypt = ablkcipher_encrypt,
1609 .decrypt = ablkcipher_decrypt,
1610 .givencrypt = ablkcipher_givencrypt,
1611 .geniv = "<built-in>",
1612 .min_keysize = AES_MIN_KEY_SIZE,
1613 .max_keysize = AES_MAX_KEY_SIZE,
1614 .ivsize = AES_BLOCK_SIZE,
1615 },
1616 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1617 },
1618 {
1619 .name = "cbc(des3_ede)",
1620 .driver_name = "cbc-3des-caam-qi",
1621 .blocksize = DES3_EDE_BLOCK_SIZE,
1622 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1623 .template_ablkcipher = {
1624 .setkey = des3_ablkcipher_setkey,
1625 .encrypt = ablkcipher_encrypt,
1626 .decrypt = ablkcipher_decrypt,
1627 .givencrypt = ablkcipher_givencrypt,
1628 .geniv = "<built-in>",
1629 .min_keysize = DES3_EDE_KEY_SIZE,
1630 .max_keysize = DES3_EDE_KEY_SIZE,
1631 .ivsize = DES3_EDE_BLOCK_SIZE,
1632 },
1633 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1634 },
1635 {
1636 .name = "cbc(des)",
1637 .driver_name = "cbc-des-caam-qi",
1638 .blocksize = DES_BLOCK_SIZE,
1639 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1640 .template_ablkcipher = {
1641 .setkey = ablkcipher_setkey,
1642 .encrypt = ablkcipher_encrypt,
1643 .decrypt = ablkcipher_decrypt,
1644 .givencrypt = ablkcipher_givencrypt,
1645 .geniv = "<built-in>",
1646 .min_keysize = DES_KEY_SIZE,
1647 .max_keysize = DES_KEY_SIZE,
1648 .ivsize = DES_BLOCK_SIZE,
1649 },
1650 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1651 },
1652 {
1653 .name = "ctr(aes)",
1654 .driver_name = "ctr-aes-caam-qi",
1655 .blocksize = 1,
1656 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1657 .template_ablkcipher = {
1658 .setkey = ablkcipher_setkey,
1659 .encrypt = ablkcipher_encrypt,
1660 .decrypt = ablkcipher_decrypt,
1661 .geniv = "chainiv",
1662 .min_keysize = AES_MIN_KEY_SIZE,
1663 .max_keysize = AES_MAX_KEY_SIZE,
1664 .ivsize = AES_BLOCK_SIZE,
1665 },
1666 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
1667 },
1668 {
1669 .name = "rfc3686(ctr(aes))",
1670 .driver_name = "rfc3686-ctr-aes-caam-qi",
1671 .blocksize = 1,
1672 .type = CRYPTO_ALG_TYPE_GIVCIPHER,
1673 .template_ablkcipher = {
1674 .setkey = ablkcipher_setkey,
1675 .encrypt = ablkcipher_encrypt,
1676 .decrypt = ablkcipher_decrypt,
1677 .givencrypt = ablkcipher_givencrypt,
1678 .geniv = "<built-in>",
1679 .min_keysize = AES_MIN_KEY_SIZE +
1680 CTR_RFC3686_NONCE_SIZE,
1681 .max_keysize = AES_MAX_KEY_SIZE +
1682 CTR_RFC3686_NONCE_SIZE,
1683 .ivsize = CTR_RFC3686_IV_SIZE,
1684 },
1685 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
1686 },
1687 {
1688 .name = "xts(aes)",
1689 .driver_name = "xts-aes-caam-qi",
1690 .blocksize = AES_BLOCK_SIZE,
1691 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1692 .template_ablkcipher = {
1693 .setkey = xts_ablkcipher_setkey,
1694 .encrypt = ablkcipher_encrypt,
1695 .decrypt = ablkcipher_decrypt,
1696 .geniv = "eseqiv",
1697 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1698 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1699 .ivsize = AES_BLOCK_SIZE,
1700 },
1701 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1702 },
1703};
1704
1705static struct caam_aead_alg driver_aeads[] = {
1706 {
1707 .aead = {
1708 .base = {
1709 .cra_name = "rfc4106(gcm(aes))",
1710 .cra_driver_name = "rfc4106-gcm-aes-caam-qi",
1711 .cra_blocksize = 1,
1712 },
1713 .setkey = rfc4106_setkey,
1714 .setauthsize = rfc4106_setauthsize,
1715 .encrypt = ipsec_gcm_encrypt,
1716 .decrypt = ipsec_gcm_decrypt,
1717 .ivsize = 8,
1718 .maxauthsize = AES_BLOCK_SIZE,
1719 },
1720 .caam = {
1721 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1722 },
1723 },
1724 {
1725 .aead = {
1726 .base = {
1727 .cra_name = "rfc4543(gcm(aes))",
1728 .cra_driver_name = "rfc4543-gcm-aes-caam-qi",
1729 .cra_blocksize = 1,
1730 },
1731 .setkey = rfc4543_setkey,
1732 .setauthsize = rfc4543_setauthsize,
1733 .encrypt = ipsec_gcm_encrypt,
1734 .decrypt = ipsec_gcm_decrypt,
1735 .ivsize = 8,
1736 .maxauthsize = AES_BLOCK_SIZE,
1737 },
1738 .caam = {
1739 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1740 },
1741 },
1742
1743 {
1744 .aead = {
1745 .base = {
1746 .cra_name = "gcm(aes)",
1747 .cra_driver_name = "gcm-aes-caam-qi",
1748 .cra_blocksize = 1,
1749 },
1750 .setkey = gcm_setkey,
1751 .setauthsize = gcm_setauthsize,
1752 .encrypt = aead_encrypt,
1753 .decrypt = aead_decrypt,
1754 .ivsize = 12,
1755 .maxauthsize = AES_BLOCK_SIZE,
1756 },
1757 .caam = {
1758 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1759 }
1760 },
1761
1762 {
1763 .aead = {
1764 .base = {
1765 .cra_name = "authenc(hmac(md5),cbc(aes))",
1766 .cra_driver_name = "authenc-hmac-md5-"
1767 "cbc-aes-caam-qi",
1768 .cra_blocksize = AES_BLOCK_SIZE,
1769 },
1770 .setkey = aead_setkey,
1771 .setauthsize = aead_setauthsize,
1772 .encrypt = aead_encrypt,
1773 .decrypt = aead_decrypt,
1774 .ivsize = AES_BLOCK_SIZE,
1775 .maxauthsize = MD5_DIGEST_SIZE,
1776 },
1777 .caam = {
1778 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1779 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1780 OP_ALG_AAI_HMAC_PRECOMP,
1781 }
1782 },
1783 {
1784 .aead = {
1785 .base = {
1786 .cra_name = "echainiv(authenc(hmac(md5),"
1787 "cbc(aes)))",
1788 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1789 "cbc-aes-caam-qi",
1790 .cra_blocksize = AES_BLOCK_SIZE,
1791 },
1792 .setkey = aead_setkey,
1793 .setauthsize = aead_setauthsize,
1794 .encrypt = aead_encrypt,
1795 .decrypt = aead_decrypt,
1796 .ivsize = AES_BLOCK_SIZE,
1797 .maxauthsize = MD5_DIGEST_SIZE,
1798 },
1799 .caam = {
1800 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1801 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1802 OP_ALG_AAI_HMAC_PRECOMP,
1803 .geniv = true,
1804 }
1805 },
1806 {
1807 .aead = {
1808 .base = {
1809 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1810 .cra_driver_name = "authenc-hmac-sha1-"
1811 "cbc-aes-caam-qi",
1812 .cra_blocksize = AES_BLOCK_SIZE,
1813 },
1814 .setkey = aead_setkey,
1815 .setauthsize = aead_setauthsize,
1816 .encrypt = aead_encrypt,
1817 .decrypt = aead_decrypt,
1818 .ivsize = AES_BLOCK_SIZE,
1819 .maxauthsize = SHA1_DIGEST_SIZE,
1820 },
1821 .caam = {
1822 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1823 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1824 OP_ALG_AAI_HMAC_PRECOMP,
1825 }
1826 },
1827 {
1828 .aead = {
1829 .base = {
1830 .cra_name = "echainiv(authenc(hmac(sha1),"
1831 "cbc(aes)))",
1832 .cra_driver_name = "echainiv-authenc-"
1833 "hmac-sha1-cbc-aes-caam-qi",
1834 .cra_blocksize = AES_BLOCK_SIZE,
1835 },
1836 .setkey = aead_setkey,
1837 .setauthsize = aead_setauthsize,
1838 .encrypt = aead_encrypt,
1839 .decrypt = aead_decrypt,
1840 .ivsize = AES_BLOCK_SIZE,
1841 .maxauthsize = SHA1_DIGEST_SIZE,
1842 },
1843 .caam = {
1844 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1845 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1846 OP_ALG_AAI_HMAC_PRECOMP,
1847 .geniv = true,
1848 },
1849 },
1850 {
1851 .aead = {
1852 .base = {
1853 .cra_name = "authenc(hmac(sha224),cbc(aes))",
1854 .cra_driver_name = "authenc-hmac-sha224-"
1855 "cbc-aes-caam-qi",
1856 .cra_blocksize = AES_BLOCK_SIZE,
1857 },
1858 .setkey = aead_setkey,
1859 .setauthsize = aead_setauthsize,
1860 .encrypt = aead_encrypt,
1861 .decrypt = aead_decrypt,
1862 .ivsize = AES_BLOCK_SIZE,
1863 .maxauthsize = SHA224_DIGEST_SIZE,
1864 },
1865 .caam = {
1866 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1867 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1868 OP_ALG_AAI_HMAC_PRECOMP,
1869 }
1870 },
1871 {
1872 .aead = {
1873 .base = {
1874 .cra_name = "echainiv(authenc(hmac(sha224),"
1875 "cbc(aes)))",
1876 .cra_driver_name = "echainiv-authenc-"
1877 "hmac-sha224-cbc-aes-caam-qi",
1878 .cra_blocksize = AES_BLOCK_SIZE,
1879 },
1880 .setkey = aead_setkey,
1881 .setauthsize = aead_setauthsize,
1882 .encrypt = aead_encrypt,
1883 .decrypt = aead_decrypt,
1884 .ivsize = AES_BLOCK_SIZE,
1885 .maxauthsize = SHA224_DIGEST_SIZE,
1886 },
1887 .caam = {
1888 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1889 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1890 OP_ALG_AAI_HMAC_PRECOMP,
1891 .geniv = true,
1892 }
1893 },
1894 {
1895 .aead = {
1896 .base = {
1897 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1898 .cra_driver_name = "authenc-hmac-sha256-"
1899 "cbc-aes-caam-qi",
1900 .cra_blocksize = AES_BLOCK_SIZE,
1901 },
1902 .setkey = aead_setkey,
1903 .setauthsize = aead_setauthsize,
1904 .encrypt = aead_encrypt,
1905 .decrypt = aead_decrypt,
1906 .ivsize = AES_BLOCK_SIZE,
1907 .maxauthsize = SHA256_DIGEST_SIZE,
1908 },
1909 .caam = {
1910 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1911 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1912 OP_ALG_AAI_HMAC_PRECOMP,
1913 }
1914 },
1915 {
1916 .aead = {
1917 .base = {
1918 .cra_name = "echainiv(authenc(hmac(sha256),"
1919 "cbc(aes)))",
1920 .cra_driver_name = "echainiv-authenc-"
1921 "hmac-sha256-cbc-aes-"
1922 "caam-qi",
1923 .cra_blocksize = AES_BLOCK_SIZE,
1924 },
1925 .setkey = aead_setkey,
1926 .setauthsize = aead_setauthsize,
1927 .encrypt = aead_encrypt,
1928 .decrypt = aead_decrypt,
1929 .ivsize = AES_BLOCK_SIZE,
1930 .maxauthsize = SHA256_DIGEST_SIZE,
1931 },
1932 .caam = {
1933 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1934 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1935 OP_ALG_AAI_HMAC_PRECOMP,
1936 .geniv = true,
1937 }
1938 },
1939 {
1940 .aead = {
1941 .base = {
1942 .cra_name = "authenc(hmac(sha384),cbc(aes))",
1943 .cra_driver_name = "authenc-hmac-sha384-"
1944 "cbc-aes-caam-qi",
1945 .cra_blocksize = AES_BLOCK_SIZE,
1946 },
1947 .setkey = aead_setkey,
1948 .setauthsize = aead_setauthsize,
1949 .encrypt = aead_encrypt,
1950 .decrypt = aead_decrypt,
1951 .ivsize = AES_BLOCK_SIZE,
1952 .maxauthsize = SHA384_DIGEST_SIZE,
1953 },
1954 .caam = {
1955 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1956 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1957 OP_ALG_AAI_HMAC_PRECOMP,
1958 }
1959 },
1960 {
1961 .aead = {
1962 .base = {
1963 .cra_name = "echainiv(authenc(hmac(sha384),"
1964 "cbc(aes)))",
1965 .cra_driver_name = "echainiv-authenc-"
1966 "hmac-sha384-cbc-aes-"
1967 "caam-qi",
1968 .cra_blocksize = AES_BLOCK_SIZE,
1969 },
1970 .setkey = aead_setkey,
1971 .setauthsize = aead_setauthsize,
1972 .encrypt = aead_encrypt,
1973 .decrypt = aead_decrypt,
1974 .ivsize = AES_BLOCK_SIZE,
1975 .maxauthsize = SHA384_DIGEST_SIZE,
1976 },
1977 .caam = {
1978 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1979 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1980 OP_ALG_AAI_HMAC_PRECOMP,
1981 .geniv = true,
1982 }
1983 },
1984 {
1985 .aead = {
1986 .base = {
1987 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1988 .cra_driver_name = "authenc-hmac-sha512-"
1989 "cbc-aes-caam-qi",
1990 .cra_blocksize = AES_BLOCK_SIZE,
1991 },
1992 .setkey = aead_setkey,
1993 .setauthsize = aead_setauthsize,
1994 .encrypt = aead_encrypt,
1995 .decrypt = aead_decrypt,
1996 .ivsize = AES_BLOCK_SIZE,
1997 .maxauthsize = SHA512_DIGEST_SIZE,
1998 },
1999 .caam = {
2000 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2001 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2002 OP_ALG_AAI_HMAC_PRECOMP,
2003 }
2004 },
2005 {
2006 .aead = {
2007 .base = {
2008 .cra_name = "echainiv(authenc(hmac(sha512),"
2009 "cbc(aes)))",
2010 .cra_driver_name = "echainiv-authenc-"
2011 "hmac-sha512-cbc-aes-"
2012 "caam-qi",
2013 .cra_blocksize = AES_BLOCK_SIZE,
2014 },
2015 .setkey = aead_setkey,
2016 .setauthsize = aead_setauthsize,
2017 .encrypt = aead_encrypt,
2018 .decrypt = aead_decrypt,
2019 .ivsize = AES_BLOCK_SIZE,
2020 .maxauthsize = SHA512_DIGEST_SIZE,
2021 },
2022 .caam = {
2023 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2024 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2025 OP_ALG_AAI_HMAC_PRECOMP,
2026 .geniv = true,
2027 }
2028 },
2029 {
2030 .aead = {
2031 .base = {
2032 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2033 .cra_driver_name = "authenc-hmac-md5-"
2034 "cbc-des3_ede-caam-qi",
2035 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2036 },
2037 .setkey = des3_aead_setkey,
2038 .setauthsize = aead_setauthsize,
2039 .encrypt = aead_encrypt,
2040 .decrypt = aead_decrypt,
2041 .ivsize = DES3_EDE_BLOCK_SIZE,
2042 .maxauthsize = MD5_DIGEST_SIZE,
2043 },
2044 .caam = {
2045 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2046 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2047 OP_ALG_AAI_HMAC_PRECOMP,
2048 }
2049 },
2050 {
2051 .aead = {
2052 .base = {
2053 .cra_name = "echainiv(authenc(hmac(md5),"
2054 "cbc(des3_ede)))",
2055 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2056 "cbc-des3_ede-caam-qi",
2057 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2058 },
2059 .setkey = des3_aead_setkey,
2060 .setauthsize = aead_setauthsize,
2061 .encrypt = aead_encrypt,
2062 .decrypt = aead_decrypt,
2063 .ivsize = DES3_EDE_BLOCK_SIZE,
2064 .maxauthsize = MD5_DIGEST_SIZE,
2065 },
2066 .caam = {
2067 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2068 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2069 OP_ALG_AAI_HMAC_PRECOMP,
2070 .geniv = true,
2071 }
2072 },
2073 {
2074 .aead = {
2075 .base = {
2076 .cra_name = "authenc(hmac(sha1),"
2077 "cbc(des3_ede))",
2078 .cra_driver_name = "authenc-hmac-sha1-"
2079 "cbc-des3_ede-caam-qi",
2080 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2081 },
2082 .setkey = des3_aead_setkey,
2083 .setauthsize = aead_setauthsize,
2084 .encrypt = aead_encrypt,
2085 .decrypt = aead_decrypt,
2086 .ivsize = DES3_EDE_BLOCK_SIZE,
2087 .maxauthsize = SHA1_DIGEST_SIZE,
2088 },
2089 .caam = {
2090 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2091 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2092 OP_ALG_AAI_HMAC_PRECOMP,
2093 },
2094 },
2095 {
2096 .aead = {
2097 .base = {
2098 .cra_name = "echainiv(authenc(hmac(sha1),"
2099 "cbc(des3_ede)))",
2100 .cra_driver_name = "echainiv-authenc-"
2101 "hmac-sha1-"
2102 "cbc-des3_ede-caam-qi",
2103 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2104 },
2105 .setkey = des3_aead_setkey,
2106 .setauthsize = aead_setauthsize,
2107 .encrypt = aead_encrypt,
2108 .decrypt = aead_decrypt,
2109 .ivsize = DES3_EDE_BLOCK_SIZE,
2110 .maxauthsize = SHA1_DIGEST_SIZE,
2111 },
2112 .caam = {
2113 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2114 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2115 OP_ALG_AAI_HMAC_PRECOMP,
2116 .geniv = true,
2117 }
2118 },
2119 {
2120 .aead = {
2121 .base = {
2122 .cra_name = "authenc(hmac(sha224),"
2123 "cbc(des3_ede))",
2124 .cra_driver_name = "authenc-hmac-sha224-"
2125 "cbc-des3_ede-caam-qi",
2126 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2127 },
2128 .setkey = des3_aead_setkey,
2129 .setauthsize = aead_setauthsize,
2130 .encrypt = aead_encrypt,
2131 .decrypt = aead_decrypt,
2132 .ivsize = DES3_EDE_BLOCK_SIZE,
2133 .maxauthsize = SHA224_DIGEST_SIZE,
2134 },
2135 .caam = {
2136 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2137 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2138 OP_ALG_AAI_HMAC_PRECOMP,
2139 },
2140 },
2141 {
2142 .aead = {
2143 .base = {
2144 .cra_name = "echainiv(authenc(hmac(sha224),"
2145 "cbc(des3_ede)))",
2146 .cra_driver_name = "echainiv-authenc-"
2147 "hmac-sha224-"
2148 "cbc-des3_ede-caam-qi",
2149 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2150 },
2151 .setkey = des3_aead_setkey,
2152 .setauthsize = aead_setauthsize,
2153 .encrypt = aead_encrypt,
2154 .decrypt = aead_decrypt,
2155 .ivsize = DES3_EDE_BLOCK_SIZE,
2156 .maxauthsize = SHA224_DIGEST_SIZE,
2157 },
2158 .caam = {
2159 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2160 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2161 OP_ALG_AAI_HMAC_PRECOMP,
2162 .geniv = true,
2163 }
2164 },
2165 {
2166 .aead = {
2167 .base = {
2168 .cra_name = "authenc(hmac(sha256),"
2169 "cbc(des3_ede))",
2170 .cra_driver_name = "authenc-hmac-sha256-"
2171 "cbc-des3_ede-caam-qi",
2172 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2173 },
2174 .setkey = des3_aead_setkey,
2175 .setauthsize = aead_setauthsize,
2176 .encrypt = aead_encrypt,
2177 .decrypt = aead_decrypt,
2178 .ivsize = DES3_EDE_BLOCK_SIZE,
2179 .maxauthsize = SHA256_DIGEST_SIZE,
2180 },
2181 .caam = {
2182 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2183 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2184 OP_ALG_AAI_HMAC_PRECOMP,
2185 },
2186 },
2187 {
2188 .aead = {
2189 .base = {
2190 .cra_name = "echainiv(authenc(hmac(sha256),"
2191 "cbc(des3_ede)))",
2192 .cra_driver_name = "echainiv-authenc-"
2193 "hmac-sha256-"
2194 "cbc-des3_ede-caam-qi",
2195 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2196 },
2197 .setkey = des3_aead_setkey,
2198 .setauthsize = aead_setauthsize,
2199 .encrypt = aead_encrypt,
2200 .decrypt = aead_decrypt,
2201 .ivsize = DES3_EDE_BLOCK_SIZE,
2202 .maxauthsize = SHA256_DIGEST_SIZE,
2203 },
2204 .caam = {
2205 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2206 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2207 OP_ALG_AAI_HMAC_PRECOMP,
2208 .geniv = true,
2209 }
2210 },
2211 {
2212 .aead = {
2213 .base = {
2214 .cra_name = "authenc(hmac(sha384),"
2215 "cbc(des3_ede))",
2216 .cra_driver_name = "authenc-hmac-sha384-"
2217 "cbc-des3_ede-caam-qi",
2218 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2219 },
2220 .setkey = des3_aead_setkey,
2221 .setauthsize = aead_setauthsize,
2222 .encrypt = aead_encrypt,
2223 .decrypt = aead_decrypt,
2224 .ivsize = DES3_EDE_BLOCK_SIZE,
2225 .maxauthsize = SHA384_DIGEST_SIZE,
2226 },
2227 .caam = {
2228 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2229 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2230 OP_ALG_AAI_HMAC_PRECOMP,
2231 },
2232 },
2233 {
2234 .aead = {
2235 .base = {
2236 .cra_name = "echainiv(authenc(hmac(sha384),"
2237 "cbc(des3_ede)))",
2238 .cra_driver_name = "echainiv-authenc-"
2239 "hmac-sha384-"
2240 "cbc-des3_ede-caam-qi",
2241 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2242 },
2243 .setkey = des3_aead_setkey,
2244 .setauthsize = aead_setauthsize,
2245 .encrypt = aead_encrypt,
2246 .decrypt = aead_decrypt,
2247 .ivsize = DES3_EDE_BLOCK_SIZE,
2248 .maxauthsize = SHA384_DIGEST_SIZE,
2249 },
2250 .caam = {
2251 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2252 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2253 OP_ALG_AAI_HMAC_PRECOMP,
2254 .geniv = true,
2255 }
2256 },
2257 {
2258 .aead = {
2259 .base = {
2260 .cra_name = "authenc(hmac(sha512),"
2261 "cbc(des3_ede))",
2262 .cra_driver_name = "authenc-hmac-sha512-"
2263 "cbc-des3_ede-caam-qi",
2264 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2265 },
2266 .setkey = des3_aead_setkey,
2267 .setauthsize = aead_setauthsize,
2268 .encrypt = aead_encrypt,
2269 .decrypt = aead_decrypt,
2270 .ivsize = DES3_EDE_BLOCK_SIZE,
2271 .maxauthsize = SHA512_DIGEST_SIZE,
2272 },
2273 .caam = {
2274 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2275 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2276 OP_ALG_AAI_HMAC_PRECOMP,
2277 },
2278 },
2279 {
2280 .aead = {
2281 .base = {
2282 .cra_name = "echainiv(authenc(hmac(sha512),"
2283 "cbc(des3_ede)))",
2284 .cra_driver_name = "echainiv-authenc-"
2285 "hmac-sha512-"
2286 "cbc-des3_ede-caam-qi",
2287 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2288 },
2289 .setkey = des3_aead_setkey,
2290 .setauthsize = aead_setauthsize,
2291 .encrypt = aead_encrypt,
2292 .decrypt = aead_decrypt,
2293 .ivsize = DES3_EDE_BLOCK_SIZE,
2294 .maxauthsize = SHA512_DIGEST_SIZE,
2295 },
2296 .caam = {
2297 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2298 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2299 OP_ALG_AAI_HMAC_PRECOMP,
2300 .geniv = true,
2301 }
2302 },
2303 {
2304 .aead = {
2305 .base = {
2306 .cra_name = "authenc(hmac(md5),cbc(des))",
2307 .cra_driver_name = "authenc-hmac-md5-"
2308 "cbc-des-caam-qi",
2309 .cra_blocksize = DES_BLOCK_SIZE,
2310 },
2311 .setkey = aead_setkey,
2312 .setauthsize = aead_setauthsize,
2313 .encrypt = aead_encrypt,
2314 .decrypt = aead_decrypt,
2315 .ivsize = DES_BLOCK_SIZE,
2316 .maxauthsize = MD5_DIGEST_SIZE,
2317 },
2318 .caam = {
2319 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2320 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2321 OP_ALG_AAI_HMAC_PRECOMP,
2322 },
2323 },
2324 {
2325 .aead = {
2326 .base = {
2327 .cra_name = "echainiv(authenc(hmac(md5),"
2328 "cbc(des)))",
2329 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2330 "cbc-des-caam-qi",
2331 .cra_blocksize = DES_BLOCK_SIZE,
2332 },
2333 .setkey = aead_setkey,
2334 .setauthsize = aead_setauthsize,
2335 .encrypt = aead_encrypt,
2336 .decrypt = aead_decrypt,
2337 .ivsize = DES_BLOCK_SIZE,
2338 .maxauthsize = MD5_DIGEST_SIZE,
2339 },
2340 .caam = {
2341 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2342 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2343 OP_ALG_AAI_HMAC_PRECOMP,
2344 .geniv = true,
2345 }
2346 },
2347 {
2348 .aead = {
2349 .base = {
2350 .cra_name = "authenc(hmac(sha1),cbc(des))",
2351 .cra_driver_name = "authenc-hmac-sha1-"
2352 "cbc-des-caam-qi",
2353 .cra_blocksize = DES_BLOCK_SIZE,
2354 },
2355 .setkey = aead_setkey,
2356 .setauthsize = aead_setauthsize,
2357 .encrypt = aead_encrypt,
2358 .decrypt = aead_decrypt,
2359 .ivsize = DES_BLOCK_SIZE,
2360 .maxauthsize = SHA1_DIGEST_SIZE,
2361 },
2362 .caam = {
2363 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2364 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2365 OP_ALG_AAI_HMAC_PRECOMP,
2366 },
2367 },
2368 {
2369 .aead = {
2370 .base = {
2371 .cra_name = "echainiv(authenc(hmac(sha1),"
2372 "cbc(des)))",
2373 .cra_driver_name = "echainiv-authenc-"
2374 "hmac-sha1-cbc-des-caam-qi",
2375 .cra_blocksize = DES_BLOCK_SIZE,
2376 },
2377 .setkey = aead_setkey,
2378 .setauthsize = aead_setauthsize,
2379 .encrypt = aead_encrypt,
2380 .decrypt = aead_decrypt,
2381 .ivsize = DES_BLOCK_SIZE,
2382 .maxauthsize = SHA1_DIGEST_SIZE,
2383 },
2384 .caam = {
2385 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2386 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2387 OP_ALG_AAI_HMAC_PRECOMP,
2388 .geniv = true,
2389 }
2390 },
2391 {
2392 .aead = {
2393 .base = {
2394 .cra_name = "authenc(hmac(sha224),cbc(des))",
2395 .cra_driver_name = "authenc-hmac-sha224-"
2396 "cbc-des-caam-qi",
2397 .cra_blocksize = DES_BLOCK_SIZE,
2398 },
2399 .setkey = aead_setkey,
2400 .setauthsize = aead_setauthsize,
2401 .encrypt = aead_encrypt,
2402 .decrypt = aead_decrypt,
2403 .ivsize = DES_BLOCK_SIZE,
2404 .maxauthsize = SHA224_DIGEST_SIZE,
2405 },
2406 .caam = {
2407 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2408 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2409 OP_ALG_AAI_HMAC_PRECOMP,
2410 },
2411 },
2412 {
2413 .aead = {
2414 .base = {
2415 .cra_name = "echainiv(authenc(hmac(sha224),"
2416 "cbc(des)))",
2417 .cra_driver_name = "echainiv-authenc-"
2418 "hmac-sha224-cbc-des-"
2419 "caam-qi",
2420 .cra_blocksize = DES_BLOCK_SIZE,
2421 },
2422 .setkey = aead_setkey,
2423 .setauthsize = aead_setauthsize,
2424 .encrypt = aead_encrypt,
2425 .decrypt = aead_decrypt,
2426 .ivsize = DES_BLOCK_SIZE,
2427 .maxauthsize = SHA224_DIGEST_SIZE,
2428 },
2429 .caam = {
2430 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2431 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2432 OP_ALG_AAI_HMAC_PRECOMP,
2433 .geniv = true,
2434 }
2435 },
2436 {
2437 .aead = {
2438 .base = {
2439 .cra_name = "authenc(hmac(sha256),cbc(des))",
2440 .cra_driver_name = "authenc-hmac-sha256-"
2441 "cbc-des-caam-qi",
2442 .cra_blocksize = DES_BLOCK_SIZE,
2443 },
2444 .setkey = aead_setkey,
2445 .setauthsize = aead_setauthsize,
2446 .encrypt = aead_encrypt,
2447 .decrypt = aead_decrypt,
2448 .ivsize = DES_BLOCK_SIZE,
2449 .maxauthsize = SHA256_DIGEST_SIZE,
2450 },
2451 .caam = {
2452 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2453 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2454 OP_ALG_AAI_HMAC_PRECOMP,
2455 },
2456 },
2457 {
2458 .aead = {
2459 .base = {
2460 .cra_name = "echainiv(authenc(hmac(sha256),"
2461 "cbc(des)))",
2462 .cra_driver_name = "echainiv-authenc-"
2463 "hmac-sha256-cbc-des-"
2464 "caam-qi",
2465 .cra_blocksize = DES_BLOCK_SIZE,
2466 },
2467 .setkey = aead_setkey,
2468 .setauthsize = aead_setauthsize,
2469 .encrypt = aead_encrypt,
2470 .decrypt = aead_decrypt,
2471 .ivsize = DES_BLOCK_SIZE,
2472 .maxauthsize = SHA256_DIGEST_SIZE,
2473 },
2474 .caam = {
2475 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2476 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2477 OP_ALG_AAI_HMAC_PRECOMP,
2478 .geniv = true,
2479 },
2480 },
2481 {
2482 .aead = {
2483 .base = {
2484 .cra_name = "authenc(hmac(sha384),cbc(des))",
2485 .cra_driver_name = "authenc-hmac-sha384-"
2486 "cbc-des-caam-qi",
2487 .cra_blocksize = DES_BLOCK_SIZE,
2488 },
2489 .setkey = aead_setkey,
2490 .setauthsize = aead_setauthsize,
2491 .encrypt = aead_encrypt,
2492 .decrypt = aead_decrypt,
2493 .ivsize = DES_BLOCK_SIZE,
2494 .maxauthsize = SHA384_DIGEST_SIZE,
2495 },
2496 .caam = {
2497 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2498 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2499 OP_ALG_AAI_HMAC_PRECOMP,
2500 },
2501 },
2502 {
2503 .aead = {
2504 .base = {
2505 .cra_name = "echainiv(authenc(hmac(sha384),"
2506 "cbc(des)))",
2507 .cra_driver_name = "echainiv-authenc-"
2508 "hmac-sha384-cbc-des-"
2509 "caam-qi",
2510 .cra_blocksize = DES_BLOCK_SIZE,
2511 },
2512 .setkey = aead_setkey,
2513 .setauthsize = aead_setauthsize,
2514 .encrypt = aead_encrypt,
2515 .decrypt = aead_decrypt,
2516 .ivsize = DES_BLOCK_SIZE,
2517 .maxauthsize = SHA384_DIGEST_SIZE,
2518 },
2519 .caam = {
2520 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2521 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2522 OP_ALG_AAI_HMAC_PRECOMP,
2523 .geniv = true,
2524 }
2525 },
2526 {
2527 .aead = {
2528 .base = {
2529 .cra_name = "authenc(hmac(sha512),cbc(des))",
2530 .cra_driver_name = "authenc-hmac-sha512-"
2531 "cbc-des-caam-qi",
2532 .cra_blocksize = DES_BLOCK_SIZE,
2533 },
2534 .setkey = aead_setkey,
2535 .setauthsize = aead_setauthsize,
2536 .encrypt = aead_encrypt,
2537 .decrypt = aead_decrypt,
2538 .ivsize = DES_BLOCK_SIZE,
2539 .maxauthsize = SHA512_DIGEST_SIZE,
2540 },
2541 .caam = {
2542 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2543 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2544 OP_ALG_AAI_HMAC_PRECOMP,
2545 }
2546 },
2547 {
2548 .aead = {
2549 .base = {
2550 .cra_name = "echainiv(authenc(hmac(sha512),"
2551 "cbc(des)))",
2552 .cra_driver_name = "echainiv-authenc-"
2553 "hmac-sha512-cbc-des-"
2554 "caam-qi",
2555 .cra_blocksize = DES_BLOCK_SIZE,
2556 },
2557 .setkey = aead_setkey,
2558 .setauthsize = aead_setauthsize,
2559 .encrypt = aead_encrypt,
2560 .decrypt = aead_decrypt,
2561 .ivsize = DES_BLOCK_SIZE,
2562 .maxauthsize = SHA512_DIGEST_SIZE,
2563 },
2564 .caam = {
2565 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2566 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2567 OP_ALG_AAI_HMAC_PRECOMP,
2568 .geniv = true,
2569 }
2570 },
2571};
2572
2573struct caam_crypto_alg {
2574 struct list_head entry;
2575 struct crypto_alg crypto_alg;
2576 struct caam_alg_entry caam;
2577};
2578
2579static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
2580 bool uses_dkp)
2581{
2582 struct caam_drv_private *priv;
2583
2584
2585
2586
2587
2588 ctx->jrdev = caam_jr_alloc();
2589 if (IS_ERR(ctx->jrdev)) {
2590 pr_err("Job Ring Device allocation for transform failed\n");
2591 return PTR_ERR(ctx->jrdev);
2592 }
2593
2594 priv = dev_get_drvdata(ctx->jrdev->parent);
2595 if (priv->era >= 6 && uses_dkp)
2596 ctx->dir = DMA_BIDIRECTIONAL;
2597 else
2598 ctx->dir = DMA_TO_DEVICE;
2599
2600 ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
2601 ctx->dir);
2602 if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
2603 dev_err(ctx->jrdev, "unable to map key\n");
2604 caam_jr_free(ctx->jrdev);
2605 return -ENOMEM;
2606 }
2607
2608
2609 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
2610 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
2611
2612 ctx->qidev = priv->qidev;
2613
2614 spin_lock_init(&ctx->lock);
2615 ctx->drv_ctx[ENCRYPT] = NULL;
2616 ctx->drv_ctx[DECRYPT] = NULL;
2617 ctx->drv_ctx[GIVENCRYPT] = NULL;
2618
2619 return 0;
2620}
2621
2622static int caam_cra_init(struct crypto_tfm *tfm)
2623{
2624 struct crypto_alg *alg = tfm->__crt_alg;
2625 struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
2626 crypto_alg);
2627 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2628
2629 return caam_init_common(ctx, &caam_alg->caam, false);
2630}
2631
2632static int caam_aead_init(struct crypto_aead *tfm)
2633{
2634 struct aead_alg *alg = crypto_aead_alg(tfm);
2635 struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
2636 aead);
2637 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
2638
2639 return caam_init_common(ctx, &caam_alg->caam,
2640 alg->setkey == aead_setkey);
2641}
2642
2643static void caam_exit_common(struct caam_ctx *ctx)
2644{
2645 caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
2646 caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
2647 caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
2648
2649 dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
2650
2651 caam_jr_free(ctx->jrdev);
2652}
2653
2654static void caam_cra_exit(struct crypto_tfm *tfm)
2655{
2656 caam_exit_common(crypto_tfm_ctx(tfm));
2657}
2658
2659static void caam_aead_exit(struct crypto_aead *tfm)
2660{
2661 caam_exit_common(crypto_aead_ctx(tfm));
2662}
2663
2664static struct list_head alg_list;
2665static void __exit caam_qi_algapi_exit(void)
2666{
2667 struct caam_crypto_alg *t_alg, *n;
2668 int i;
2669
2670 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
2671 struct caam_aead_alg *t_alg = driver_aeads + i;
2672
2673 if (t_alg->registered)
2674 crypto_unregister_aead(&t_alg->aead);
2675 }
2676
2677 if (!alg_list.next)
2678 return;
2679
2680 list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
2681 crypto_unregister_alg(&t_alg->crypto_alg);
2682 list_del(&t_alg->entry);
2683 kfree(t_alg);
2684 }
2685}
2686
2687static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
2688 *template)
2689{
2690 struct caam_crypto_alg *t_alg;
2691 struct crypto_alg *alg;
2692
2693 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
2694 if (!t_alg)
2695 return ERR_PTR(-ENOMEM);
2696
2697 alg = &t_alg->crypto_alg;
2698
2699 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2700 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2701 template->driver_name);
2702 alg->cra_module = THIS_MODULE;
2703 alg->cra_init = caam_cra_init;
2704 alg->cra_exit = caam_cra_exit;
2705 alg->cra_priority = CAAM_CRA_PRIORITY;
2706 alg->cra_blocksize = template->blocksize;
2707 alg->cra_alignmask = 0;
2708 alg->cra_ctxsize = sizeof(struct caam_ctx);
2709 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2710 template->type;
2711 switch (template->type) {
2712 case CRYPTO_ALG_TYPE_GIVCIPHER:
2713 alg->cra_type = &crypto_givcipher_type;
2714 alg->cra_ablkcipher = template->template_ablkcipher;
2715 break;
2716 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2717 alg->cra_type = &crypto_ablkcipher_type;
2718 alg->cra_ablkcipher = template->template_ablkcipher;
2719 break;
2720 }
2721
2722 t_alg->caam.class1_alg_type = template->class1_alg_type;
2723 t_alg->caam.class2_alg_type = template->class2_alg_type;
2724
2725 return t_alg;
2726}
2727
2728static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
2729{
2730 struct aead_alg *alg = &t_alg->aead;
2731
2732 alg->base.cra_module = THIS_MODULE;
2733 alg->base.cra_priority = CAAM_CRA_PRIORITY;
2734 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2735 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2736
2737 alg->init = caam_aead_init;
2738 alg->exit = caam_aead_exit;
2739}
2740
2741static int __init caam_qi_algapi_init(void)
2742{
2743 struct device_node *dev_node;
2744 struct platform_device *pdev;
2745 struct device *ctrldev;
2746 struct caam_drv_private *priv;
2747 int i = 0, err = 0;
2748 u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
2749 unsigned int md_limit = SHA512_DIGEST_SIZE;
2750 bool registered = false;
2751
2752 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2753 if (!dev_node) {
2754 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2755 if (!dev_node)
2756 return -ENODEV;
2757 }
2758
2759 pdev = of_find_device_by_node(dev_node);
2760 of_node_put(dev_node);
2761 if (!pdev)
2762 return -ENODEV;
2763
2764 ctrldev = &pdev->dev;
2765 priv = dev_get_drvdata(ctrldev);
2766
2767
2768
2769
2770
2771 if (!priv || !priv->qi_present)
2772 return -ENODEV;
2773
2774 if (caam_dpaa2) {
2775 dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
2776 return -ENODEV;
2777 }
2778
2779 INIT_LIST_HEAD(&alg_list);
2780
2781
2782
2783
2784
2785 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
2786 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
2787 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
2788 aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
2789 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2790
2791
2792 if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
2793 md_limit = SHA256_DIGEST_SIZE;
2794
2795 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2796 struct caam_crypto_alg *t_alg;
2797 struct caam_alg_template *alg = driver_algs + i;
2798 u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
2799
2800
2801 if (!des_inst &&
2802 ((alg_sel == OP_ALG_ALGSEL_3DES) ||
2803 (alg_sel == OP_ALG_ALGSEL_DES)))
2804 continue;
2805
2806
2807 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
2808 continue;
2809
2810 t_alg = caam_alg_alloc(alg);
2811 if (IS_ERR(t_alg)) {
2812 err = PTR_ERR(t_alg);
2813 dev_warn(priv->qidev, "%s alg allocation failed\n",
2814 alg->driver_name);
2815 continue;
2816 }
2817
2818 err = crypto_register_alg(&t_alg->crypto_alg);
2819 if (err) {
2820 dev_warn(priv->qidev, "%s alg registration failed\n",
2821 t_alg->crypto_alg.cra_driver_name);
2822 kfree(t_alg);
2823 continue;
2824 }
2825
2826 list_add_tail(&t_alg->entry, &alg_list);
2827 registered = true;
2828 }
2829
2830 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
2831 struct caam_aead_alg *t_alg = driver_aeads + i;
2832 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
2833 OP_ALG_ALGSEL_MASK;
2834 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
2835 OP_ALG_ALGSEL_MASK;
2836 u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
2837
2838
2839 if (!des_inst &&
2840 ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
2841 (c1_alg_sel == OP_ALG_ALGSEL_DES)))
2842 continue;
2843
2844
2845 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
2846 continue;
2847
2848
2849
2850
2851
2852 if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
2853 (alg_aai == OP_ALG_AAI_GCM))
2854 continue;
2855
2856
2857
2858
2859
2860 if (c2_alg_sel &&
2861 (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
2862 continue;
2863
2864 caam_aead_alg_init(t_alg);
2865
2866 err = crypto_register_aead(&t_alg->aead);
2867 if (err) {
2868 pr_warn("%s alg registration failed\n",
2869 t_alg->aead.base.cra_driver_name);
2870 continue;
2871 }
2872
2873 t_alg->registered = true;
2874 registered = true;
2875 }
2876
2877 if (registered)
2878 dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
2879
2880 return err;
2881}
2882
2883module_init(caam_qi_algapi_init);
2884module_exit(caam_qi_algapi_exit);
2885
2886MODULE_LICENSE("GPL");
2887MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
2888MODULE_AUTHOR("Freescale Semiconductor");
2889