1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58#include "compat.h"
59
60#include "regs.h"
61#include "intern.h"
62#include "desc_constr.h"
63#include "jr.h"
64#include "error.h"
65#include "sg_sw_sec4.h"
66#include "key_gen.h"
67#include "caamhash_desc.h"
68
69#define CAAM_CRA_PRIORITY 3000
70
71
72#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
73
74#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
75#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
76
77#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
78 CAAM_MAX_HASH_KEY_SIZE)
79#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
80
81
82#define HASH_MSG_LEN 8
83#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
84
85static struct list_head hash_list;
86
87
88struct caam_hash_ctx {
89 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
90 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
91 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
92 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
93 u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned;
94 dma_addr_t sh_desc_update_dma ____cacheline_aligned;
95 dma_addr_t sh_desc_update_first_dma;
96 dma_addr_t sh_desc_fin_dma;
97 dma_addr_t sh_desc_digest_dma;
98 dma_addr_t key_dma;
99 enum dma_data_direction dir;
100 struct device *jrdev;
101 int ctx_len;
102 struct alginfo adata;
103};
104
105
106struct caam_hash_state {
107 dma_addr_t buf_dma;
108 dma_addr_t ctx_dma;
109 int ctx_dma_len;
110 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
111 int buflen_0;
112 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
113 int buflen_1;
114 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
115 int (*update)(struct ahash_request *req);
116 int (*final)(struct ahash_request *req);
117 int (*finup)(struct ahash_request *req);
118 int current_buf;
119};
120
121struct caam_export_state {
122 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
123 u8 caam_ctx[MAX_CTX_LEN];
124 int buflen;
125 int (*update)(struct ahash_request *req);
126 int (*final)(struct ahash_request *req);
127 int (*finup)(struct ahash_request *req);
128};
129
130static inline void switch_buf(struct caam_hash_state *state)
131{
132 state->current_buf ^= 1;
133}
134
135static inline u8 *current_buf(struct caam_hash_state *state)
136{
137 return state->current_buf ? state->buf_1 : state->buf_0;
138}
139
140static inline u8 *alt_buf(struct caam_hash_state *state)
141{
142 return state->current_buf ? state->buf_0 : state->buf_1;
143}
144
145static inline int *current_buflen(struct caam_hash_state *state)
146{
147 return state->current_buf ? &state->buflen_1 : &state->buflen_0;
148}
149
150static inline int *alt_buflen(struct caam_hash_state *state)
151{
152 return state->current_buf ? &state->buflen_0 : &state->buflen_1;
153}
154
155static inline bool is_cmac_aes(u32 algtype)
156{
157 return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
158 (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC);
159}
160
161
162
163static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
164 struct caam_hash_state *state,
165 int ctx_len)
166{
167 state->ctx_dma_len = ctx_len;
168 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
169 ctx_len, DMA_FROM_DEVICE);
170 if (dma_mapping_error(jrdev, state->ctx_dma)) {
171 dev_err(jrdev, "unable to map ctx\n");
172 state->ctx_dma = 0;
173 return -ENOMEM;
174 }
175
176 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
177
178 return 0;
179}
180
181
182static inline int buf_map_to_sec4_sg(struct device *jrdev,
183 struct sec4_sg_entry *sec4_sg,
184 struct caam_hash_state *state)
185{
186 int buflen = *current_buflen(state);
187
188 if (!buflen)
189 return 0;
190
191 state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
192 DMA_TO_DEVICE);
193 if (dma_mapping_error(jrdev, state->buf_dma)) {
194 dev_err(jrdev, "unable to map buf\n");
195 state->buf_dma = 0;
196 return -ENOMEM;
197 }
198
199 dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
200
201 return 0;
202}
203
204
205static inline int ctx_map_to_sec4_sg(struct device *jrdev,
206 struct caam_hash_state *state, int ctx_len,
207 struct sec4_sg_entry *sec4_sg, u32 flag)
208{
209 state->ctx_dma_len = ctx_len;
210 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
211 if (dma_mapping_error(jrdev, state->ctx_dma)) {
212 dev_err(jrdev, "unable to map ctx\n");
213 state->ctx_dma = 0;
214 return -ENOMEM;
215 }
216
217 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
218
219 return 0;
220}
221
222static int ahash_set_sh_desc(struct crypto_ahash *ahash)
223{
224 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
225 int digestsize = crypto_ahash_digestsize(ahash);
226 struct device *jrdev = ctx->jrdev;
227 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
228 u32 *desc;
229
230 ctx->adata.key_virt = ctx->key;
231
232
233 desc = ctx->sh_desc_update;
234 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
235 ctx->ctx_len, true, ctrlpriv->era);
236 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
237 desc_bytes(desc), ctx->dir);
238
239 print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ",
240 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
241 1);
242
243
244 desc = ctx->sh_desc_update_first;
245 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
246 ctx->ctx_len, false, ctrlpriv->era);
247 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
248 desc_bytes(desc), ctx->dir);
249 print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__)
250 ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
251 desc_bytes(desc), 1);
252
253
254 desc = ctx->sh_desc_fin;
255 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
256 ctx->ctx_len, true, ctrlpriv->era);
257 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
258 desc_bytes(desc), ctx->dir);
259
260 print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ",
261 DUMP_PREFIX_ADDRESS, 16, 4, desc,
262 desc_bytes(desc), 1);
263
264
265 desc = ctx->sh_desc_digest;
266 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
267 ctx->ctx_len, false, ctrlpriv->era);
268 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
269 desc_bytes(desc), ctx->dir);
270
271 print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ",
272 DUMP_PREFIX_ADDRESS, 16, 4, desc,
273 desc_bytes(desc), 1);
274
275 return 0;
276}
277
278static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
279{
280 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
281 int digestsize = crypto_ahash_digestsize(ahash);
282 struct device *jrdev = ctx->jrdev;
283 u32 *desc;
284
285
286 ctx->adata.key_dma = ctx->key_dma;
287
288
289 desc = ctx->sh_desc_update;
290 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
291 ctx->ctx_len, ctx->ctx_len, 0);
292 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
293 desc_bytes(desc), ctx->dir);
294 print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ",
295 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
296 1);
297
298
299 desc = ctx->sh_desc_fin;
300 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
301 digestsize, ctx->ctx_len, 0);
302 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
303 desc_bytes(desc), ctx->dir);
304 print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ",
305 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
306 1);
307
308
309 ctx->adata.key_virt = ctx->key;
310
311
312 desc = ctx->sh_desc_update_first;
313 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
314 ctx->ctx_len, ctx->key_dma);
315 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
316 desc_bytes(desc), ctx->dir);
317 print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)
318 " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
319 desc_bytes(desc), 1);
320
321
322 desc = ctx->sh_desc_digest;
323 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
324 digestsize, ctx->ctx_len, 0);
325 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
326 desc_bytes(desc), ctx->dir);
327 print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ",
328 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
329 1);
330 return 0;
331}
332
333static int acmac_set_sh_desc(struct crypto_ahash *ahash)
334{
335 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
336 int digestsize = crypto_ahash_digestsize(ahash);
337 struct device *jrdev = ctx->jrdev;
338 u32 *desc;
339
340
341 desc = ctx->sh_desc_update;
342 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
343 ctx->ctx_len, ctx->ctx_len, 0);
344 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
345 desc_bytes(desc), ctx->dir);
346 print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ",
347 DUMP_PREFIX_ADDRESS, 16, 4, desc,
348 desc_bytes(desc), 1);
349
350
351 desc = ctx->sh_desc_fin;
352 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
353 digestsize, ctx->ctx_len, 0);
354 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
355 desc_bytes(desc), ctx->dir);
356 print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ",
357 DUMP_PREFIX_ADDRESS, 16, 4, desc,
358 desc_bytes(desc), 1);
359
360
361 desc = ctx->sh_desc_update_first;
362 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
363 ctx->ctx_len, 0);
364 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
365 desc_bytes(desc), ctx->dir);
366 print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)
367 " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
368 desc_bytes(desc), 1);
369
370
371 desc = ctx->sh_desc_digest;
372 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
373 digestsize, ctx->ctx_len, 0);
374 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
375 desc_bytes(desc), ctx->dir);
376 print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ",
377 DUMP_PREFIX_ADDRESS, 16, 4, desc,
378 desc_bytes(desc), 1);
379
380 return 0;
381}
382
383
384static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
385 u32 digestsize)
386{
387 struct device *jrdev = ctx->jrdev;
388 u32 *desc;
389 struct split_key_result result;
390 dma_addr_t key_dma;
391 int ret;
392
393 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
394 if (!desc) {
395 dev_err(jrdev, "unable to allocate key input memory\n");
396 return -ENOMEM;
397 }
398
399 init_job_desc(desc, 0);
400
401 key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL);
402 if (dma_mapping_error(jrdev, key_dma)) {
403 dev_err(jrdev, "unable to map key memory\n");
404 kfree(desc);
405 return -ENOMEM;
406 }
407
408
409 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
410 OP_ALG_AS_INITFINAL);
411 append_seq_in_ptr(desc, key_dma, *keylen, 0);
412 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
413 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
414 append_seq_out_ptr(desc, key_dma, digestsize, 0);
415 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
416 LDST_SRCDST_BYTE_CONTEXT);
417
418 print_hex_dump_debug("key_in@"__stringify(__LINE__)": ",
419 DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
420 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
421 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
422 1);
423
424 result.err = 0;
425 init_completion(&result.completion);
426
427 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
428 if (!ret) {
429
430 wait_for_completion(&result.completion);
431 ret = result.err;
432
433 print_hex_dump_debug("digested key@"__stringify(__LINE__)": ",
434 DUMP_PREFIX_ADDRESS, 16, 4, key,
435 digestsize, 1);
436 }
437 dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL);
438
439 *keylen = digestsize;
440
441 kfree(desc);
442
443 return ret;
444}
445
446static int ahash_setkey(struct crypto_ahash *ahash,
447 const u8 *key, unsigned int keylen)
448{
449 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
450 struct device *jrdev = ctx->jrdev;
451 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
452 int digestsize = crypto_ahash_digestsize(ahash);
453 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
454 int ret;
455 u8 *hashed_key = NULL;
456
457 dev_dbg(jrdev, "keylen %d\n", keylen);
458
459 if (keylen > blocksize) {
460 hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
461 if (!hashed_key)
462 return -ENOMEM;
463 ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
464 if (ret)
465 goto bad_free_key;
466 key = hashed_key;
467 }
468
469
470
471
472
473 if (ctrlpriv->era >= 6) {
474 ctx->adata.key_inline = true;
475 ctx->adata.keylen = keylen;
476 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
477 OP_ALG_ALGSEL_MASK);
478
479 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
480 goto bad_free_key;
481
482 memcpy(ctx->key, key, keylen);
483 } else {
484 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
485 keylen, CAAM_MAX_HASH_KEY_SIZE);
486 if (ret)
487 goto bad_free_key;
488 }
489
490 kfree(hashed_key);
491 return ahash_set_sh_desc(ahash);
492 bad_free_key:
493 kfree(hashed_key);
494 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
495 return -EINVAL;
496}
497
498static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
499 unsigned int keylen)
500{
501 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
502 struct device *jrdev = ctx->jrdev;
503
504 memcpy(ctx->key, key, keylen);
505 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
506 ctx->adata.keylen = keylen;
507
508 print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ",
509 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1);
510
511 return axcbc_set_sh_desc(ahash);
512}
513
514static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
515 unsigned int keylen)
516{
517 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
518
519
520 ctx->adata.key_virt = key;
521 ctx->adata.keylen = keylen;
522
523 print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ",
524 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
525
526 return acmac_set_sh_desc(ahash);
527}
528
529
530
531
532
533
534
535
536
537struct ahash_edesc {
538 dma_addr_t sec4_sg_dma;
539 int src_nents;
540 int sec4_sg_bytes;
541 u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
542 struct sec4_sg_entry sec4_sg[0];
543};
544
545static inline void ahash_unmap(struct device *dev,
546 struct ahash_edesc *edesc,
547 struct ahash_request *req, int dst_len)
548{
549 struct caam_hash_state *state = ahash_request_ctx(req);
550
551 if (edesc->src_nents)
552 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
553
554 if (edesc->sec4_sg_bytes)
555 dma_unmap_single(dev, edesc->sec4_sg_dma,
556 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
557
558 if (state->buf_dma) {
559 dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
560 DMA_TO_DEVICE);
561 state->buf_dma = 0;
562 }
563}
564
565static inline void ahash_unmap_ctx(struct device *dev,
566 struct ahash_edesc *edesc,
567 struct ahash_request *req, int dst_len, u32 flag)
568{
569 struct caam_hash_state *state = ahash_request_ctx(req);
570
571 if (state->ctx_dma) {
572 dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
573 state->ctx_dma = 0;
574 }
575 ahash_unmap(dev, edesc, req, dst_len);
576}
577
578static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
579 void *context)
580{
581 struct ahash_request *req = context;
582 struct ahash_edesc *edesc;
583 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
584 int digestsize = crypto_ahash_digestsize(ahash);
585 struct caam_hash_state *state = ahash_request_ctx(req);
586 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
587
588 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
589
590 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
591 if (err)
592 caam_jr_strstatus(jrdev, err);
593
594 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
595 memcpy(req->result, state->caam_ctx, digestsize);
596 kfree(edesc);
597
598 print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
599 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
600 ctx->ctx_len, 1);
601
602 req->base.complete(&req->base, err);
603}
604
605static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
606 void *context)
607{
608 struct ahash_request *req = context;
609 struct ahash_edesc *edesc;
610 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
611 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
612 struct caam_hash_state *state = ahash_request_ctx(req);
613 int digestsize = crypto_ahash_digestsize(ahash);
614
615 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
616
617 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
618 if (err)
619 caam_jr_strstatus(jrdev, err);
620
621 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
622 switch_buf(state);
623 kfree(edesc);
624
625 print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
626 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
627 ctx->ctx_len, 1);
628 if (req->result)
629 print_hex_dump_debug("result@"__stringify(__LINE__)": ",
630 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
631 digestsize, 1);
632
633 req->base.complete(&req->base, err);
634}
635
636static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
637 void *context)
638{
639 struct ahash_request *req = context;
640 struct ahash_edesc *edesc;
641 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
642 int digestsize = crypto_ahash_digestsize(ahash);
643 struct caam_hash_state *state = ahash_request_ctx(req);
644 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
645
646 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
647
648 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
649 if (err)
650 caam_jr_strstatus(jrdev, err);
651
652 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
653 memcpy(req->result, state->caam_ctx, digestsize);
654 kfree(edesc);
655
656 print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
657 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
658 ctx->ctx_len, 1);
659
660 req->base.complete(&req->base, err);
661}
662
663static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
664 void *context)
665{
666 struct ahash_request *req = context;
667 struct ahash_edesc *edesc;
668 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
669 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
670 struct caam_hash_state *state = ahash_request_ctx(req);
671 int digestsize = crypto_ahash_digestsize(ahash);
672
673 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
674
675 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
676 if (err)
677 caam_jr_strstatus(jrdev, err);
678
679 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
680 switch_buf(state);
681 kfree(edesc);
682
683 print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
684 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
685 ctx->ctx_len, 1);
686 if (req->result)
687 print_hex_dump_debug("result@"__stringify(__LINE__)": ",
688 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
689 digestsize, 1);
690
691 req->base.complete(&req->base, err);
692}
693
694
695
696
697
698static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
699 int sg_num, u32 *sh_desc,
700 dma_addr_t sh_desc_dma,
701 gfp_t flags)
702{
703 struct ahash_edesc *edesc;
704 unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
705
706 edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
707 if (!edesc) {
708 dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
709 return NULL;
710 }
711
712 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
713 HDR_SHARE_DEFER | HDR_REVERSE);
714
715 return edesc;
716}
717
718static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
719 struct ahash_edesc *edesc,
720 struct ahash_request *req, int nents,
721 unsigned int first_sg,
722 unsigned int first_bytes, size_t to_hash)
723{
724 dma_addr_t src_dma;
725 u32 options;
726
727 if (nents > 1 || first_sg) {
728 struct sec4_sg_entry *sg = edesc->sec4_sg;
729 unsigned int sgsize = sizeof(*sg) *
730 pad_sg_nents(first_sg + nents);
731
732 sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0);
733
734 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
735 if (dma_mapping_error(ctx->jrdev, src_dma)) {
736 dev_err(ctx->jrdev, "unable to map S/G table\n");
737 return -ENOMEM;
738 }
739
740 edesc->sec4_sg_bytes = sgsize;
741 edesc->sec4_sg_dma = src_dma;
742 options = LDST_SGF;
743 } else {
744 src_dma = sg_dma_address(req->src);
745 options = 0;
746 }
747
748 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
749 options);
750
751 return 0;
752}
753
754
755static int ahash_update_ctx(struct ahash_request *req)
756{
757 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
758 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
759 struct caam_hash_state *state = ahash_request_ctx(req);
760 struct device *jrdev = ctx->jrdev;
761 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
762 GFP_KERNEL : GFP_ATOMIC;
763 u8 *buf = current_buf(state);
764 int *buflen = current_buflen(state);
765 u8 *next_buf = alt_buf(state);
766 int blocksize = crypto_ahash_blocksize(ahash);
767 int *next_buflen = alt_buflen(state), last_buflen;
768 int in_len = *buflen + req->nbytes, to_hash;
769 u32 *desc;
770 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
771 struct ahash_edesc *edesc;
772 int ret = 0;
773
774 last_buflen = *next_buflen;
775 *next_buflen = in_len & (blocksize - 1);
776 to_hash = in_len - *next_buflen;
777
778
779
780
781
782 if ((is_xcbc_aes(ctx->adata.algtype) ||
783 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
784 (*next_buflen == 0)) {
785 *next_buflen = blocksize;
786 to_hash -= blocksize;
787 }
788
789 if (to_hash) {
790 int pad_nents;
791 int src_len = req->nbytes - *next_buflen;
792
793 src_nents = sg_nents_for_len(req->src, src_len);
794 if (src_nents < 0) {
795 dev_err(jrdev, "Invalid number of src SG.\n");
796 return src_nents;
797 }
798
799 if (src_nents) {
800 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
801 DMA_TO_DEVICE);
802 if (!mapped_nents) {
803 dev_err(jrdev, "unable to DMA map source\n");
804 return -ENOMEM;
805 }
806 } else {
807 mapped_nents = 0;
808 }
809
810 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
811 pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents);
812 sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
813
814
815
816
817
818 edesc = ahash_edesc_alloc(ctx, pad_nents, ctx->sh_desc_update,
819 ctx->sh_desc_update_dma, flags);
820 if (!edesc) {
821 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
822 return -ENOMEM;
823 }
824
825 edesc->src_nents = src_nents;
826 edesc->sec4_sg_bytes = sec4_sg_bytes;
827
828 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
829 edesc->sec4_sg, DMA_BIDIRECTIONAL);
830 if (ret)
831 goto unmap_ctx;
832
833 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
834 if (ret)
835 goto unmap_ctx;
836
837 if (mapped_nents)
838 sg_to_sec4_sg_last(req->src, src_len,
839 edesc->sec4_sg + sec4_sg_src_index,
840 0);
841 else
842 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
843 1);
844
845 if (*next_buflen)
846 scatterwalk_map_and_copy(next_buf, req->src,
847 to_hash - *buflen,
848 *next_buflen, 0);
849 desc = edesc->hw_desc;
850
851 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
852 sec4_sg_bytes,
853 DMA_TO_DEVICE);
854 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
855 dev_err(jrdev, "unable to map S/G table\n");
856 ret = -ENOMEM;
857 goto unmap_ctx;
858 }
859
860 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
861 to_hash, LDST_SGF);
862
863 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
864
865 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
866 DUMP_PREFIX_ADDRESS, 16, 4, desc,
867 desc_bytes(desc), 1);
868
869 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
870 if (ret)
871 goto unmap_ctx;
872
873 ret = -EINPROGRESS;
874 } else if (*next_buflen) {
875 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
876 req->nbytes, 0);
877 *buflen = *next_buflen;
878 *next_buflen = last_buflen;
879 }
880
881 print_hex_dump_debug("buf@"__stringify(__LINE__)": ",
882 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
883 print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
884 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
885 *next_buflen, 1);
886
887 return ret;
888unmap_ctx:
889 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
890 kfree(edesc);
891 return ret;
892}
893
894static int ahash_final_ctx(struct ahash_request *req)
895{
896 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
897 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
898 struct caam_hash_state *state = ahash_request_ctx(req);
899 struct device *jrdev = ctx->jrdev;
900 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
901 GFP_KERNEL : GFP_ATOMIC;
902 int buflen = *current_buflen(state);
903 u32 *desc;
904 int sec4_sg_bytes;
905 int digestsize = crypto_ahash_digestsize(ahash);
906 struct ahash_edesc *edesc;
907 int ret;
908
909 sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) *
910 sizeof(struct sec4_sg_entry);
911
912
913 edesc = ahash_edesc_alloc(ctx, 4, ctx->sh_desc_fin,
914 ctx->sh_desc_fin_dma, flags);
915 if (!edesc)
916 return -ENOMEM;
917
918 desc = edesc->hw_desc;
919
920 edesc->sec4_sg_bytes = sec4_sg_bytes;
921
922 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
923 edesc->sec4_sg, DMA_BIDIRECTIONAL);
924 if (ret)
925 goto unmap_ctx;
926
927 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
928 if (ret)
929 goto unmap_ctx;
930
931 sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0));
932
933 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
934 sec4_sg_bytes, DMA_TO_DEVICE);
935 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
936 dev_err(jrdev, "unable to map S/G table\n");
937 ret = -ENOMEM;
938 goto unmap_ctx;
939 }
940
941 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
942 LDST_SGF);
943 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
944
945 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
946 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
947 1);
948
949 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
950 if (ret)
951 goto unmap_ctx;
952
953 return -EINPROGRESS;
954 unmap_ctx:
955 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
956 kfree(edesc);
957 return ret;
958}
959
960static int ahash_finup_ctx(struct ahash_request *req)
961{
962 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
963 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
964 struct caam_hash_state *state = ahash_request_ctx(req);
965 struct device *jrdev = ctx->jrdev;
966 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
967 GFP_KERNEL : GFP_ATOMIC;
968 int buflen = *current_buflen(state);
969 u32 *desc;
970 int sec4_sg_src_index;
971 int src_nents, mapped_nents;
972 int digestsize = crypto_ahash_digestsize(ahash);
973 struct ahash_edesc *edesc;
974 int ret;
975
976 src_nents = sg_nents_for_len(req->src, req->nbytes);
977 if (src_nents < 0) {
978 dev_err(jrdev, "Invalid number of src SG.\n");
979 return src_nents;
980 }
981
982 if (src_nents) {
983 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
984 DMA_TO_DEVICE);
985 if (!mapped_nents) {
986 dev_err(jrdev, "unable to DMA map source\n");
987 return -ENOMEM;
988 }
989 } else {
990 mapped_nents = 0;
991 }
992
993 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
994
995
996 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
997 ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
998 flags);
999 if (!edesc) {
1000 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1001 return -ENOMEM;
1002 }
1003
1004 desc = edesc->hw_desc;
1005
1006 edesc->src_nents = src_nents;
1007
1008 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
1009 edesc->sec4_sg, DMA_BIDIRECTIONAL);
1010 if (ret)
1011 goto unmap_ctx;
1012
1013 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
1014 if (ret)
1015 goto unmap_ctx;
1016
1017 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
1018 sec4_sg_src_index, ctx->ctx_len + buflen,
1019 req->nbytes);
1020 if (ret)
1021 goto unmap_ctx;
1022
1023 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
1024
1025 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1026 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1027 1);
1028
1029 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1030 if (ret)
1031 goto unmap_ctx;
1032
1033 return -EINPROGRESS;
1034 unmap_ctx:
1035 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
1036 kfree(edesc);
1037 return ret;
1038}
1039
1040static int ahash_digest(struct ahash_request *req)
1041{
1042 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1043 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1044 struct caam_hash_state *state = ahash_request_ctx(req);
1045 struct device *jrdev = ctx->jrdev;
1046 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1047 GFP_KERNEL : GFP_ATOMIC;
1048 u32 *desc;
1049 int digestsize = crypto_ahash_digestsize(ahash);
1050 int src_nents, mapped_nents;
1051 struct ahash_edesc *edesc;
1052 int ret;
1053
1054 state->buf_dma = 0;
1055
1056 src_nents = sg_nents_for_len(req->src, req->nbytes);
1057 if (src_nents < 0) {
1058 dev_err(jrdev, "Invalid number of src SG.\n");
1059 return src_nents;
1060 }
1061
1062 if (src_nents) {
1063 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1064 DMA_TO_DEVICE);
1065 if (!mapped_nents) {
1066 dev_err(jrdev, "unable to map source for DMA\n");
1067 return -ENOMEM;
1068 }
1069 } else {
1070 mapped_nents = 0;
1071 }
1072
1073
1074 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
1075 ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1076 flags);
1077 if (!edesc) {
1078 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1079 return -ENOMEM;
1080 }
1081
1082 edesc->src_nents = src_nents;
1083
1084 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1085 req->nbytes);
1086 if (ret) {
1087 ahash_unmap(jrdev, edesc, req, digestsize);
1088 kfree(edesc);
1089 return ret;
1090 }
1091
1092 desc = edesc->hw_desc;
1093
1094 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1095 if (ret) {
1096 ahash_unmap(jrdev, edesc, req, digestsize);
1097 kfree(edesc);
1098 return -ENOMEM;
1099 }
1100
1101 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1102 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1103 1);
1104
1105 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1106 if (!ret) {
1107 ret = -EINPROGRESS;
1108 } else {
1109 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1110 kfree(edesc);
1111 }
1112
1113 return ret;
1114}
1115
1116
1117static int ahash_final_no_ctx(struct ahash_request *req)
1118{
1119 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1120 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1121 struct caam_hash_state *state = ahash_request_ctx(req);
1122 struct device *jrdev = ctx->jrdev;
1123 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1124 GFP_KERNEL : GFP_ATOMIC;
1125 u8 *buf = current_buf(state);
1126 int buflen = *current_buflen(state);
1127 u32 *desc;
1128 int digestsize = crypto_ahash_digestsize(ahash);
1129 struct ahash_edesc *edesc;
1130 int ret;
1131
1132
1133 edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
1134 ctx->sh_desc_digest_dma, flags);
1135 if (!edesc)
1136 return -ENOMEM;
1137
1138 desc = edesc->hw_desc;
1139
1140 if (buflen) {
1141 state->buf_dma = dma_map_single(jrdev, buf, buflen,
1142 DMA_TO_DEVICE);
1143 if (dma_mapping_error(jrdev, state->buf_dma)) {
1144 dev_err(jrdev, "unable to map src\n");
1145 goto unmap;
1146 }
1147
1148 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1149 }
1150
1151 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1152 if (ret)
1153 goto unmap;
1154
1155 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1156 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1157 1);
1158
1159 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1160 if (!ret) {
1161 ret = -EINPROGRESS;
1162 } else {
1163 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1164 kfree(edesc);
1165 }
1166
1167 return ret;
1168 unmap:
1169 ahash_unmap(jrdev, edesc, req, digestsize);
1170 kfree(edesc);
1171 return -ENOMEM;
1172
1173}
1174
1175
1176static int ahash_update_no_ctx(struct ahash_request *req)
1177{
1178 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1179 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1180 struct caam_hash_state *state = ahash_request_ctx(req);
1181 struct device *jrdev = ctx->jrdev;
1182 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1183 GFP_KERNEL : GFP_ATOMIC;
1184 u8 *buf = current_buf(state);
1185 int *buflen = current_buflen(state);
1186 int blocksize = crypto_ahash_blocksize(ahash);
1187 u8 *next_buf = alt_buf(state);
1188 int *next_buflen = alt_buflen(state);
1189 int in_len = *buflen + req->nbytes, to_hash;
1190 int sec4_sg_bytes, src_nents, mapped_nents;
1191 struct ahash_edesc *edesc;
1192 u32 *desc;
1193 int ret = 0;
1194
1195 *next_buflen = in_len & (blocksize - 1);
1196 to_hash = in_len - *next_buflen;
1197
1198
1199
1200
1201
1202 if ((is_xcbc_aes(ctx->adata.algtype) ||
1203 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1204 (*next_buflen == 0)) {
1205 *next_buflen = blocksize;
1206 to_hash -= blocksize;
1207 }
1208
1209 if (to_hash) {
1210 int pad_nents;
1211 int src_len = req->nbytes - *next_buflen;
1212
1213 src_nents = sg_nents_for_len(req->src, src_len);
1214 if (src_nents < 0) {
1215 dev_err(jrdev, "Invalid number of src SG.\n");
1216 return src_nents;
1217 }
1218
1219 if (src_nents) {
1220 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1221 DMA_TO_DEVICE);
1222 if (!mapped_nents) {
1223 dev_err(jrdev, "unable to DMA map source\n");
1224 return -ENOMEM;
1225 }
1226 } else {
1227 mapped_nents = 0;
1228 }
1229
1230 pad_nents = pad_sg_nents(1 + mapped_nents);
1231 sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
1232
1233
1234
1235
1236
1237 edesc = ahash_edesc_alloc(ctx, pad_nents,
1238 ctx->sh_desc_update_first,
1239 ctx->sh_desc_update_first_dma,
1240 flags);
1241 if (!edesc) {
1242 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1243 return -ENOMEM;
1244 }
1245
1246 edesc->src_nents = src_nents;
1247 edesc->sec4_sg_bytes = sec4_sg_bytes;
1248
1249 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1250 if (ret)
1251 goto unmap_ctx;
1252
1253 sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
1254
1255 if (*next_buflen) {
1256 scatterwalk_map_and_copy(next_buf, req->src,
1257 to_hash - *buflen,
1258 *next_buflen, 0);
1259 }
1260
1261 desc = edesc->hw_desc;
1262
1263 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1264 sec4_sg_bytes,
1265 DMA_TO_DEVICE);
1266 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1267 dev_err(jrdev, "unable to map S/G table\n");
1268 ret = -ENOMEM;
1269 goto unmap_ctx;
1270 }
1271
1272 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1273
1274 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1275 if (ret)
1276 goto unmap_ctx;
1277
1278 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1279 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1280 desc_bytes(desc), 1);
1281
1282 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1283 if (ret)
1284 goto unmap_ctx;
1285
1286 ret = -EINPROGRESS;
1287 state->update = ahash_update_ctx;
1288 state->finup = ahash_finup_ctx;
1289 state->final = ahash_final_ctx;
1290 } else if (*next_buflen) {
1291 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1292 req->nbytes, 0);
1293 *buflen = *next_buflen;
1294 *next_buflen = 0;
1295 }
1296
1297 print_hex_dump_debug("buf@"__stringify(__LINE__)": ",
1298 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1299 print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
1300 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
1301 1);
1302
1303 return ret;
1304 unmap_ctx:
1305 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1306 kfree(edesc);
1307 return ret;
1308}
1309
1310
1311static int ahash_finup_no_ctx(struct ahash_request *req)
1312{
1313 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1314 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1315 struct caam_hash_state *state = ahash_request_ctx(req);
1316 struct device *jrdev = ctx->jrdev;
1317 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1318 GFP_KERNEL : GFP_ATOMIC;
1319 int buflen = *current_buflen(state);
1320 u32 *desc;
1321 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1322 int digestsize = crypto_ahash_digestsize(ahash);
1323 struct ahash_edesc *edesc;
1324 int ret;
1325
1326 src_nents = sg_nents_for_len(req->src, req->nbytes);
1327 if (src_nents < 0) {
1328 dev_err(jrdev, "Invalid number of src SG.\n");
1329 return src_nents;
1330 }
1331
1332 if (src_nents) {
1333 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1334 DMA_TO_DEVICE);
1335 if (!mapped_nents) {
1336 dev_err(jrdev, "unable to DMA map source\n");
1337 return -ENOMEM;
1338 }
1339 } else {
1340 mapped_nents = 0;
1341 }
1342
1343 sec4_sg_src_index = 2;
1344 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1345 sizeof(struct sec4_sg_entry);
1346
1347
1348 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
1349 ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1350 flags);
1351 if (!edesc) {
1352 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1353 return -ENOMEM;
1354 }
1355
1356 desc = edesc->hw_desc;
1357
1358 edesc->src_nents = src_nents;
1359 edesc->sec4_sg_bytes = sec4_sg_bytes;
1360
1361 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1362 if (ret)
1363 goto unmap;
1364
1365 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1366 req->nbytes);
1367 if (ret) {
1368 dev_err(jrdev, "unable to map S/G table\n");
1369 goto unmap;
1370 }
1371
1372 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1373 if (ret)
1374 goto unmap;
1375
1376 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1377 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1378 1);
1379
1380 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1381 if (!ret) {
1382 ret = -EINPROGRESS;
1383 } else {
1384 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1385 kfree(edesc);
1386 }
1387
1388 return ret;
1389 unmap:
1390 ahash_unmap(jrdev, edesc, req, digestsize);
1391 kfree(edesc);
1392 return -ENOMEM;
1393
1394}
1395
1396
1397static int ahash_update_first(struct ahash_request *req)
1398{
1399 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1400 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1401 struct caam_hash_state *state = ahash_request_ctx(req);
1402 struct device *jrdev = ctx->jrdev;
1403 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1404 GFP_KERNEL : GFP_ATOMIC;
1405 u8 *next_buf = alt_buf(state);
1406 int *next_buflen = alt_buflen(state);
1407 int to_hash;
1408 int blocksize = crypto_ahash_blocksize(ahash);
1409 u32 *desc;
1410 int src_nents, mapped_nents;
1411 struct ahash_edesc *edesc;
1412 int ret = 0;
1413
1414 *next_buflen = req->nbytes & (blocksize - 1);
1415 to_hash = req->nbytes - *next_buflen;
1416
1417
1418
1419
1420
1421 if ((is_xcbc_aes(ctx->adata.algtype) ||
1422 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1423 (*next_buflen == 0)) {
1424 *next_buflen = blocksize;
1425 to_hash -= blocksize;
1426 }
1427
1428 if (to_hash) {
1429 src_nents = sg_nents_for_len(req->src,
1430 req->nbytes - *next_buflen);
1431 if (src_nents < 0) {
1432 dev_err(jrdev, "Invalid number of src SG.\n");
1433 return src_nents;
1434 }
1435
1436 if (src_nents) {
1437 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1438 DMA_TO_DEVICE);
1439 if (!mapped_nents) {
1440 dev_err(jrdev, "unable to map source for DMA\n");
1441 return -ENOMEM;
1442 }
1443 } else {
1444 mapped_nents = 0;
1445 }
1446
1447
1448
1449
1450
1451 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
1452 mapped_nents : 0,
1453 ctx->sh_desc_update_first,
1454 ctx->sh_desc_update_first_dma,
1455 flags);
1456 if (!edesc) {
1457 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1458 return -ENOMEM;
1459 }
1460
1461 edesc->src_nents = src_nents;
1462
1463 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1464 to_hash);
1465 if (ret)
1466 goto unmap_ctx;
1467
1468 if (*next_buflen)
1469 scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1470 *next_buflen, 0);
1471
1472 desc = edesc->hw_desc;
1473
1474 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1475 if (ret)
1476 goto unmap_ctx;
1477
1478 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1479 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1480 desc_bytes(desc), 1);
1481
1482 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1483 if (ret)
1484 goto unmap_ctx;
1485
1486 ret = -EINPROGRESS;
1487 state->update = ahash_update_ctx;
1488 state->finup = ahash_finup_ctx;
1489 state->final = ahash_final_ctx;
1490 } else if (*next_buflen) {
1491 state->update = ahash_update_no_ctx;
1492 state->finup = ahash_finup_no_ctx;
1493 state->final = ahash_final_no_ctx;
1494 scatterwalk_map_and_copy(next_buf, req->src, 0,
1495 req->nbytes, 0);
1496 switch_buf(state);
1497 }
1498
1499 print_hex_dump_debug("next buf@"__stringify(__LINE__)": ",
1500 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
1501 1);
1502
1503 return ret;
1504 unmap_ctx:
1505 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1506 kfree(edesc);
1507 return ret;
1508}
1509
1510static int ahash_finup_first(struct ahash_request *req)
1511{
1512 return ahash_digest(req);
1513}
1514
1515static int ahash_init(struct ahash_request *req)
1516{
1517 struct caam_hash_state *state = ahash_request_ctx(req);
1518
1519 state->update = ahash_update_first;
1520 state->finup = ahash_finup_first;
1521 state->final = ahash_final_no_ctx;
1522
1523 state->ctx_dma = 0;
1524 state->ctx_dma_len = 0;
1525 state->current_buf = 0;
1526 state->buf_dma = 0;
1527 state->buflen_0 = 0;
1528 state->buflen_1 = 0;
1529
1530 return 0;
1531}
1532
1533static int ahash_update(struct ahash_request *req)
1534{
1535 struct caam_hash_state *state = ahash_request_ctx(req);
1536
1537 return state->update(req);
1538}
1539
1540static int ahash_finup(struct ahash_request *req)
1541{
1542 struct caam_hash_state *state = ahash_request_ctx(req);
1543
1544 return state->finup(req);
1545}
1546
1547static int ahash_final(struct ahash_request *req)
1548{
1549 struct caam_hash_state *state = ahash_request_ctx(req);
1550
1551 return state->final(req);
1552}
1553
1554static int ahash_export(struct ahash_request *req, void *out)
1555{
1556 struct caam_hash_state *state = ahash_request_ctx(req);
1557 struct caam_export_state *export = out;
1558 int len;
1559 u8 *buf;
1560
1561 if (state->current_buf) {
1562 buf = state->buf_1;
1563 len = state->buflen_1;
1564 } else {
1565 buf = state->buf_0;
1566 len = state->buflen_0;
1567 }
1568
1569 memcpy(export->buf, buf, len);
1570 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1571 export->buflen = len;
1572 export->update = state->update;
1573 export->final = state->final;
1574 export->finup = state->finup;
1575
1576 return 0;
1577}
1578
1579static int ahash_import(struct ahash_request *req, const void *in)
1580{
1581 struct caam_hash_state *state = ahash_request_ctx(req);
1582 const struct caam_export_state *export = in;
1583
1584 memset(state, 0, sizeof(*state));
1585 memcpy(state->buf_0, export->buf, export->buflen);
1586 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1587 state->buflen_0 = export->buflen;
1588 state->update = export->update;
1589 state->final = export->final;
1590 state->finup = export->finup;
1591
1592 return 0;
1593}
1594
1595struct caam_hash_template {
1596 char name[CRYPTO_MAX_ALG_NAME];
1597 char driver_name[CRYPTO_MAX_ALG_NAME];
1598 char hmac_name[CRYPTO_MAX_ALG_NAME];
1599 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1600 unsigned int blocksize;
1601 struct ahash_alg template_ahash;
1602 u32 alg_type;
1603};
1604
1605
1606static struct caam_hash_template driver_hash[] = {
1607 {
1608 .name = "sha1",
1609 .driver_name = "sha1-caam",
1610 .hmac_name = "hmac(sha1)",
1611 .hmac_driver_name = "hmac-sha1-caam",
1612 .blocksize = SHA1_BLOCK_SIZE,
1613 .template_ahash = {
1614 .init = ahash_init,
1615 .update = ahash_update,
1616 .final = ahash_final,
1617 .finup = ahash_finup,
1618 .digest = ahash_digest,
1619 .export = ahash_export,
1620 .import = ahash_import,
1621 .setkey = ahash_setkey,
1622 .halg = {
1623 .digestsize = SHA1_DIGEST_SIZE,
1624 .statesize = sizeof(struct caam_export_state),
1625 },
1626 },
1627 .alg_type = OP_ALG_ALGSEL_SHA1,
1628 }, {
1629 .name = "sha224",
1630 .driver_name = "sha224-caam",
1631 .hmac_name = "hmac(sha224)",
1632 .hmac_driver_name = "hmac-sha224-caam",
1633 .blocksize = SHA224_BLOCK_SIZE,
1634 .template_ahash = {
1635 .init = ahash_init,
1636 .update = ahash_update,
1637 .final = ahash_final,
1638 .finup = ahash_finup,
1639 .digest = ahash_digest,
1640 .export = ahash_export,
1641 .import = ahash_import,
1642 .setkey = ahash_setkey,
1643 .halg = {
1644 .digestsize = SHA224_DIGEST_SIZE,
1645 .statesize = sizeof(struct caam_export_state),
1646 },
1647 },
1648 .alg_type = OP_ALG_ALGSEL_SHA224,
1649 }, {
1650 .name = "sha256",
1651 .driver_name = "sha256-caam",
1652 .hmac_name = "hmac(sha256)",
1653 .hmac_driver_name = "hmac-sha256-caam",
1654 .blocksize = SHA256_BLOCK_SIZE,
1655 .template_ahash = {
1656 .init = ahash_init,
1657 .update = ahash_update,
1658 .final = ahash_final,
1659 .finup = ahash_finup,
1660 .digest = ahash_digest,
1661 .export = ahash_export,
1662 .import = ahash_import,
1663 .setkey = ahash_setkey,
1664 .halg = {
1665 .digestsize = SHA256_DIGEST_SIZE,
1666 .statesize = sizeof(struct caam_export_state),
1667 },
1668 },
1669 .alg_type = OP_ALG_ALGSEL_SHA256,
1670 }, {
1671 .name = "sha384",
1672 .driver_name = "sha384-caam",
1673 .hmac_name = "hmac(sha384)",
1674 .hmac_driver_name = "hmac-sha384-caam",
1675 .blocksize = SHA384_BLOCK_SIZE,
1676 .template_ahash = {
1677 .init = ahash_init,
1678 .update = ahash_update,
1679 .final = ahash_final,
1680 .finup = ahash_finup,
1681 .digest = ahash_digest,
1682 .export = ahash_export,
1683 .import = ahash_import,
1684 .setkey = ahash_setkey,
1685 .halg = {
1686 .digestsize = SHA384_DIGEST_SIZE,
1687 .statesize = sizeof(struct caam_export_state),
1688 },
1689 },
1690 .alg_type = OP_ALG_ALGSEL_SHA384,
1691 }, {
1692 .name = "sha512",
1693 .driver_name = "sha512-caam",
1694 .hmac_name = "hmac(sha512)",
1695 .hmac_driver_name = "hmac-sha512-caam",
1696 .blocksize = SHA512_BLOCK_SIZE,
1697 .template_ahash = {
1698 .init = ahash_init,
1699 .update = ahash_update,
1700 .final = ahash_final,
1701 .finup = ahash_finup,
1702 .digest = ahash_digest,
1703 .export = ahash_export,
1704 .import = ahash_import,
1705 .setkey = ahash_setkey,
1706 .halg = {
1707 .digestsize = SHA512_DIGEST_SIZE,
1708 .statesize = sizeof(struct caam_export_state),
1709 },
1710 },
1711 .alg_type = OP_ALG_ALGSEL_SHA512,
1712 }, {
1713 .name = "md5",
1714 .driver_name = "md5-caam",
1715 .hmac_name = "hmac(md5)",
1716 .hmac_driver_name = "hmac-md5-caam",
1717 .blocksize = MD5_BLOCK_WORDS * 4,
1718 .template_ahash = {
1719 .init = ahash_init,
1720 .update = ahash_update,
1721 .final = ahash_final,
1722 .finup = ahash_finup,
1723 .digest = ahash_digest,
1724 .export = ahash_export,
1725 .import = ahash_import,
1726 .setkey = ahash_setkey,
1727 .halg = {
1728 .digestsize = MD5_DIGEST_SIZE,
1729 .statesize = sizeof(struct caam_export_state),
1730 },
1731 },
1732 .alg_type = OP_ALG_ALGSEL_MD5,
1733 }, {
1734 .hmac_name = "xcbc(aes)",
1735 .hmac_driver_name = "xcbc-aes-caam",
1736 .blocksize = AES_BLOCK_SIZE,
1737 .template_ahash = {
1738 .init = ahash_init,
1739 .update = ahash_update,
1740 .final = ahash_final,
1741 .finup = ahash_finup,
1742 .digest = ahash_digest,
1743 .export = ahash_export,
1744 .import = ahash_import,
1745 .setkey = axcbc_setkey,
1746 .halg = {
1747 .digestsize = AES_BLOCK_SIZE,
1748 .statesize = sizeof(struct caam_export_state),
1749 },
1750 },
1751 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC,
1752 }, {
1753 .hmac_name = "cmac(aes)",
1754 .hmac_driver_name = "cmac-aes-caam",
1755 .blocksize = AES_BLOCK_SIZE,
1756 .template_ahash = {
1757 .init = ahash_init,
1758 .update = ahash_update,
1759 .final = ahash_final,
1760 .finup = ahash_finup,
1761 .digest = ahash_digest,
1762 .export = ahash_export,
1763 .import = ahash_import,
1764 .setkey = acmac_setkey,
1765 .halg = {
1766 .digestsize = AES_BLOCK_SIZE,
1767 .statesize = sizeof(struct caam_export_state),
1768 },
1769 },
1770 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC,
1771 },
1772};
1773
1774struct caam_hash_alg {
1775 struct list_head entry;
1776 int alg_type;
1777 struct ahash_alg ahash_alg;
1778};
1779
1780static int caam_hash_cra_init(struct crypto_tfm *tfm)
1781{
1782 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1783 struct crypto_alg *base = tfm->__crt_alg;
1784 struct hash_alg_common *halg =
1785 container_of(base, struct hash_alg_common, base);
1786 struct ahash_alg *alg =
1787 container_of(halg, struct ahash_alg, halg);
1788 struct caam_hash_alg *caam_hash =
1789 container_of(alg, struct caam_hash_alg, ahash_alg);
1790 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1791
1792 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1793 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1794 HASH_MSG_LEN + 32,
1795 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1796 HASH_MSG_LEN + 64,
1797 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1798 dma_addr_t dma_addr;
1799 struct caam_drv_private *priv;
1800
1801
1802
1803
1804
1805 ctx->jrdev = caam_jr_alloc();
1806 if (IS_ERR(ctx->jrdev)) {
1807 pr_err("Job Ring Device allocation for transform failed\n");
1808 return PTR_ERR(ctx->jrdev);
1809 }
1810
1811 priv = dev_get_drvdata(ctx->jrdev->parent);
1812
1813 if (is_xcbc_aes(caam_hash->alg_type)) {
1814 ctx->dir = DMA_TO_DEVICE;
1815 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1816 ctx->ctx_len = 48;
1817
1818 ctx->key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
1819 ARRAY_SIZE(ctx->key),
1820 DMA_BIDIRECTIONAL,
1821 DMA_ATTR_SKIP_CPU_SYNC);
1822 if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
1823 dev_err(ctx->jrdev, "unable to map key\n");
1824 caam_jr_free(ctx->jrdev);
1825 return -ENOMEM;
1826 }
1827 } else if (is_cmac_aes(caam_hash->alg_type)) {
1828 ctx->dir = DMA_TO_DEVICE;
1829 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1830 ctx->ctx_len = 32;
1831 } else {
1832 ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1833 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1834 ctx->ctx_len = runninglen[(ctx->adata.algtype &
1835 OP_ALG_ALGSEL_SUBMASK) >>
1836 OP_ALG_ALGSEL_SHIFT];
1837 }
1838
1839 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1840 offsetof(struct caam_hash_ctx, key),
1841 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1842 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1843 dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1844
1845 if (is_xcbc_aes(caam_hash->alg_type))
1846 dma_unmap_single_attrs(ctx->jrdev, ctx->key_dma,
1847 ARRAY_SIZE(ctx->key),
1848 DMA_BIDIRECTIONAL,
1849 DMA_ATTR_SKIP_CPU_SYNC);
1850
1851 caam_jr_free(ctx->jrdev);
1852 return -ENOMEM;
1853 }
1854
1855 ctx->sh_desc_update_dma = dma_addr;
1856 ctx->sh_desc_update_first_dma = dma_addr +
1857 offsetof(struct caam_hash_ctx,
1858 sh_desc_update_first);
1859 ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1860 sh_desc_fin);
1861 ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1862 sh_desc_digest);
1863
1864 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1865 sizeof(struct caam_hash_state));
1866
1867
1868
1869
1870
1871 return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
1872}
1873
1874static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1875{
1876 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1877
1878 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1879 offsetof(struct caam_hash_ctx, key),
1880 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1881 if (is_xcbc_aes(ctx->adata.algtype))
1882 dma_unmap_single_attrs(ctx->jrdev, ctx->key_dma,
1883 ARRAY_SIZE(ctx->key), DMA_BIDIRECTIONAL,
1884 DMA_ATTR_SKIP_CPU_SYNC);
1885 caam_jr_free(ctx->jrdev);
1886}
1887
1888void caam_algapi_hash_exit(void)
1889{
1890 struct caam_hash_alg *t_alg, *n;
1891
1892 if (!hash_list.next)
1893 return;
1894
1895 list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1896 crypto_unregister_ahash(&t_alg->ahash_alg);
1897 list_del(&t_alg->entry);
1898 kfree(t_alg);
1899 }
1900}
1901
1902static struct caam_hash_alg *
1903caam_hash_alloc(struct caam_hash_template *template,
1904 bool keyed)
1905{
1906 struct caam_hash_alg *t_alg;
1907 struct ahash_alg *halg;
1908 struct crypto_alg *alg;
1909
1910 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1911 if (!t_alg) {
1912 pr_err("failed to allocate t_alg\n");
1913 return ERR_PTR(-ENOMEM);
1914 }
1915
1916 t_alg->ahash_alg = template->template_ahash;
1917 halg = &t_alg->ahash_alg;
1918 alg = &halg->halg.base;
1919
1920 if (keyed) {
1921 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1922 template->hmac_name);
1923 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1924 template->hmac_driver_name);
1925 } else {
1926 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1927 template->name);
1928 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1929 template->driver_name);
1930 t_alg->ahash_alg.setkey = NULL;
1931 }
1932 alg->cra_module = THIS_MODULE;
1933 alg->cra_init = caam_hash_cra_init;
1934 alg->cra_exit = caam_hash_cra_exit;
1935 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1936 alg->cra_priority = CAAM_CRA_PRIORITY;
1937 alg->cra_blocksize = template->blocksize;
1938 alg->cra_alignmask = 0;
1939 alg->cra_flags = CRYPTO_ALG_ASYNC;
1940
1941 t_alg->alg_type = template->alg_type;
1942
1943 return t_alg;
1944}
1945
1946int caam_algapi_hash_init(struct device *ctrldev)
1947{
1948 int i = 0, err = 0;
1949 struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1950 unsigned int md_limit = SHA512_DIGEST_SIZE;
1951 u32 md_inst, md_vid;
1952
1953
1954
1955
1956
1957 if (priv->era < 10) {
1958 md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
1959 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1960 md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1961 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1962 } else {
1963 u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
1964
1965 md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
1966 md_inst = mdha & CHA_VER_NUM_MASK;
1967 }
1968
1969
1970
1971
1972
1973 if (!md_inst)
1974 return -ENODEV;
1975
1976
1977 if (md_vid == CHA_VER_VID_MD_LP256)
1978 md_limit = SHA256_DIGEST_SIZE;
1979
1980 INIT_LIST_HEAD(&hash_list);
1981
1982
1983 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1984 struct caam_hash_alg *t_alg;
1985 struct caam_hash_template *alg = driver_hash + i;
1986
1987
1988 if (is_mdha(alg->alg_type) &&
1989 alg->template_ahash.halg.digestsize > md_limit)
1990 continue;
1991
1992
1993 t_alg = caam_hash_alloc(alg, true);
1994 if (IS_ERR(t_alg)) {
1995 err = PTR_ERR(t_alg);
1996 pr_warn("%s alg allocation failed\n",
1997 alg->hmac_driver_name);
1998 continue;
1999 }
2000
2001 err = crypto_register_ahash(&t_alg->ahash_alg);
2002 if (err) {
2003 pr_warn("%s alg registration failed: %d\n",
2004 t_alg->ahash_alg.halg.base.cra_driver_name,
2005 err);
2006 kfree(t_alg);
2007 } else
2008 list_add_tail(&t_alg->entry, &hash_list);
2009
2010 if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES)
2011 continue;
2012
2013
2014 t_alg = caam_hash_alloc(alg, false);
2015 if (IS_ERR(t_alg)) {
2016 err = PTR_ERR(t_alg);
2017 pr_warn("%s alg allocation failed\n", alg->driver_name);
2018 continue;
2019 }
2020
2021 err = crypto_register_ahash(&t_alg->ahash_alg);
2022 if (err) {
2023 pr_warn("%s alg registration failed: %d\n",
2024 t_alg->ahash_alg.halg.base.cra_driver_name,
2025 err);
2026 kfree(t_alg);
2027 } else
2028 list_add_tail(&t_alg->entry, &hash_list);
2029 }
2030
2031 return err;
2032}
2033