1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/platform_device.h>
20#include <crypto/algapi.h>
21#include <crypto/hash.h>
22#include <crypto/sha.h>
23#include <crypto/md5.h>
24#include <crypto/internal/hash.h>
25
26#include "ssi_config.h"
27#include "ssi_driver.h"
28#include "ssi_request_mgr.h"
29#include "ssi_buffer_mgr.h"
30#include "ssi_sysfs.h"
31#include "ssi_hash.h"
32#include "ssi_sram_mgr.h"
33
34#define SSI_MAX_AHASH_SEQ_LEN 12
35#define SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE MAX(SSI_MAX_HASH_BLCK_SIZE, 3 * AES_BLOCK_SIZE)
36
37struct ssi_hash_handle {
38 ssi_sram_addr_t digest_len_sram_addr;
39 ssi_sram_addr_t larval_digest_sram_addr;
40 struct list_head hash_list;
41 struct completion init_comp;
42};
43
44static const u32 digest_len_init[] = {
45 0x00000040, 0x00000000, 0x00000000, 0x00000000 };
46static const u32 md5_init[] = {
47 SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
48static const u32 sha1_init[] = {
49 SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
50static const u32 sha224_init[] = {
51 SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
52 SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
53static const u32 sha256_init[] = {
54 SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
55 SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
56#if (DX_DEV_SHA_MAX > 256)
57static const u32 digest_len_sha512_init[] = {
58 0x00000080, 0x00000000, 0x00000000, 0x00000000 };
59static const u64 sha384_init[] = {
60 SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
61 SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
62static const u64 sha512_init[] = {
63 SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
64 SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
65#endif
66
67static void ssi_hash_create_xcbc_setup(
68 struct ahash_request *areq,
69 struct cc_hw_desc desc[],
70 unsigned int *seq_size);
71
72static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
73 struct cc_hw_desc desc[],
74 unsigned int *seq_size);
75
76struct ssi_hash_alg {
77 struct list_head entry;
78 int hash_mode;
79 int hw_mode;
80 int inter_digestsize;
81 struct ssi_drvdata *drvdata;
82 struct ahash_alg ahash_alg;
83};
84
85struct hash_key_req_ctx {
86 u32 keylen;
87 dma_addr_t key_dma_addr;
88};
89
90
91struct ssi_hash_ctx {
92 struct ssi_drvdata *drvdata;
93
94
95
96 u8 digest_buff[SSI_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
97 u8 opad_tmp_keys_buff[SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE] ____cacheline_aligned;
98
99 dma_addr_t opad_tmp_keys_dma_addr ____cacheline_aligned;
100 dma_addr_t digest_buff_dma_addr;
101
102 struct hash_key_req_ctx key_params;
103 int hash_mode;
104 int hw_mode;
105 int inter_digestsize;
106 struct completion setkey_comp;
107 bool is_hmac;
108};
109
110static void ssi_hash_create_data_desc(
111 struct ahash_req_ctx *areq_ctx,
112 struct ssi_hash_ctx *ctx,
113 unsigned int flow_mode, struct cc_hw_desc desc[],
114 bool is_not_last_data,
115 unsigned int *seq_size);
116
117static inline void ssi_set_hash_endianity(u32 mode, struct cc_hw_desc *desc)
118{
119 if (unlikely((mode == DRV_HASH_MD5) ||
120 (mode == DRV_HASH_SHA384) ||
121 (mode == DRV_HASH_SHA512))) {
122 set_bytes_swap(desc, 1);
123 } else {
124 set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
125 }
126}
127
128static int ssi_hash_map_result(struct device *dev,
129 struct ahash_req_ctx *state,
130 unsigned int digestsize)
131{
132 state->digest_result_dma_addr =
133 dma_map_single(dev, (void *)state->digest_result_buff,
134 digestsize,
135 DMA_BIDIRECTIONAL);
136 if (unlikely(dma_mapping_error(dev, state->digest_result_dma_addr))) {
137 SSI_LOG_ERR("Mapping digest result buffer %u B for DMA failed\n",
138 digestsize);
139 return -ENOMEM;
140 }
141 SSI_LOG_DEBUG("Mapped digest result buffer %u B "
142 "at va=%pK to dma=%pad\n",
143 digestsize, state->digest_result_buff,
144 state->digest_result_dma_addr);
145
146 return 0;
147}
148
149static int ssi_hash_map_request(struct device *dev,
150 struct ahash_req_ctx *state,
151 struct ssi_hash_ctx *ctx)
152{
153 bool is_hmac = ctx->is_hmac;
154 ssi_sram_addr_t larval_digest_addr = ssi_ahash_get_larval_digest_sram_addr(
155 ctx->drvdata, ctx->hash_mode);
156 struct ssi_crypto_req ssi_req = {};
157 struct cc_hw_desc desc;
158 int rc = -ENOMEM;
159
160 state->buff0 = kzalloc(SSI_MAX_HASH_BLCK_SIZE, GFP_KERNEL | GFP_DMA);
161 if (!state->buff0) {
162 SSI_LOG_ERR("Allocating buff0 in context failed\n");
163 goto fail0;
164 }
165 state->buff1 = kzalloc(SSI_MAX_HASH_BLCK_SIZE, GFP_KERNEL | GFP_DMA);
166 if (!state->buff1) {
167 SSI_LOG_ERR("Allocating buff1 in context failed\n");
168 goto fail_buff0;
169 }
170 state->digest_result_buff = kzalloc(SSI_MAX_HASH_DIGEST_SIZE, GFP_KERNEL | GFP_DMA);
171 if (!state->digest_result_buff) {
172 SSI_LOG_ERR("Allocating digest_result_buff in context failed\n");
173 goto fail_buff1;
174 }
175 state->digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL | GFP_DMA);
176 if (!state->digest_buff) {
177 SSI_LOG_ERR("Allocating digest-buffer in context failed\n");
178 goto fail_digest_result_buff;
179 }
180
181 SSI_LOG_DEBUG("Allocated digest-buffer in context ctx->digest_buff=@%p\n", state->digest_buff);
182 if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
183 state->digest_bytes_len = kzalloc(HASH_LEN_SIZE, GFP_KERNEL | GFP_DMA);
184 if (!state->digest_bytes_len) {
185 SSI_LOG_ERR("Allocating digest-bytes-len in context failed\n");
186 goto fail1;
187 }
188 SSI_LOG_DEBUG("Allocated digest-bytes-len in context state->>digest_bytes_len=@%p\n", state->digest_bytes_len);
189 } else {
190 state->digest_bytes_len = NULL;
191 }
192
193 state->opad_digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL | GFP_DMA);
194 if (!state->opad_digest_buff) {
195 SSI_LOG_ERR("Allocating opad-digest-buffer in context failed\n");
196 goto fail2;
197 }
198 SSI_LOG_DEBUG("Allocated opad-digest-buffer in context state->digest_bytes_len=@%p\n", state->opad_digest_buff);
199
200 state->digest_buff_dma_addr = dma_map_single(dev, (void *)state->digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
201 if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
202 SSI_LOG_ERR("Mapping digest len %d B at va=%pK for DMA failed\n",
203 ctx->inter_digestsize, state->digest_buff);
204 goto fail3;
205 }
206 SSI_LOG_DEBUG("Mapped digest %d B at va=%pK to dma=%pad\n",
207 ctx->inter_digestsize, state->digest_buff,
208 state->digest_buff_dma_addr);
209
210 if (is_hmac) {
211 dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
212 if ((ctx->hw_mode == DRV_CIPHER_XCBC_MAC) || (ctx->hw_mode == DRV_CIPHER_CMAC)) {
213 memset(state->digest_buff, 0, ctx->inter_digestsize);
214 } else {
215 memcpy(state->digest_buff, ctx->digest_buff, ctx->inter_digestsize);
216#if (DX_DEV_SHA_MAX > 256)
217 if (unlikely((ctx->hash_mode == DRV_HASH_SHA512) || (ctx->hash_mode == DRV_HASH_SHA384)))
218 memcpy(state->digest_bytes_len, digest_len_sha512_init, HASH_LEN_SIZE);
219 else
220 memcpy(state->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
221#else
222 memcpy(state->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
223#endif
224 }
225 dma_sync_single_for_device(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
226
227 if (ctx->hash_mode != DRV_HASH_NULL) {
228 dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
229 memcpy(state->opad_digest_buff, ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
230 }
231 } else {
232
233
234
235 hw_desc_init(&desc);
236 set_din_sram(&desc, larval_digest_addr, ctx->inter_digestsize);
237 set_dout_dlli(&desc, state->digest_buff_dma_addr,
238 ctx->inter_digestsize, NS_BIT, 0);
239 set_flow_mode(&desc, BYPASS);
240
241 rc = send_request(ctx->drvdata, &ssi_req, &desc, 1, 0);
242 if (unlikely(rc != 0)) {
243 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
244 goto fail4;
245 }
246 }
247
248 if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
249 state->digest_bytes_len_dma_addr = dma_map_single(dev, (void *)state->digest_bytes_len, HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
250 if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
251 SSI_LOG_ERR("Mapping digest len %u B at va=%pK for DMA failed\n",
252 HASH_LEN_SIZE, state->digest_bytes_len);
253 goto fail4;
254 }
255 SSI_LOG_DEBUG("Mapped digest len %u B at va=%pK to dma=%pad\n",
256 HASH_LEN_SIZE, state->digest_bytes_len,
257 state->digest_bytes_len_dma_addr);
258 } else {
259 state->digest_bytes_len_dma_addr = 0;
260 }
261
262 if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
263 state->opad_digest_dma_addr = dma_map_single(dev, (void *)state->opad_digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
264 if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
265 SSI_LOG_ERR("Mapping opad digest %d B at va=%pK for DMA failed\n",
266 ctx->inter_digestsize,
267 state->opad_digest_buff);
268 goto fail5;
269 }
270 SSI_LOG_DEBUG("Mapped opad digest %d B at va=%pK to dma=%pad\n",
271 ctx->inter_digestsize, state->opad_digest_buff,
272 state->opad_digest_dma_addr);
273 } else {
274 state->opad_digest_dma_addr = 0;
275 }
276 state->buff0_cnt = 0;
277 state->buff1_cnt = 0;
278 state->buff_index = 0;
279 state->mlli_params.curr_pool = NULL;
280
281 return 0;
282
283fail5:
284 if (state->digest_bytes_len_dma_addr != 0) {
285 dma_unmap_single(dev, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
286 state->digest_bytes_len_dma_addr = 0;
287 }
288fail4:
289 if (state->digest_buff_dma_addr != 0) {
290 dma_unmap_single(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
291 state->digest_buff_dma_addr = 0;
292 }
293fail3:
294 kfree(state->opad_digest_buff);
295fail2:
296 kfree(state->digest_bytes_len);
297fail1:
298 kfree(state->digest_buff);
299fail_digest_result_buff:
300 kfree(state->digest_result_buff);
301 state->digest_result_buff = NULL;
302fail_buff1:
303 kfree(state->buff1);
304 state->buff1 = NULL;
305fail_buff0:
306 kfree(state->buff0);
307 state->buff0 = NULL;
308fail0:
309 return rc;
310}
311
312static void ssi_hash_unmap_request(struct device *dev,
313 struct ahash_req_ctx *state,
314 struct ssi_hash_ctx *ctx)
315{
316 if (state->digest_buff_dma_addr != 0) {
317 dma_unmap_single(dev, state->digest_buff_dma_addr,
318 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
319 SSI_LOG_DEBUG("Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
320 state->digest_buff_dma_addr);
321 state->digest_buff_dma_addr = 0;
322 }
323 if (state->digest_bytes_len_dma_addr != 0) {
324 dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
325 HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
326 SSI_LOG_DEBUG("Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
327 state->digest_bytes_len_dma_addr);
328 state->digest_bytes_len_dma_addr = 0;
329 }
330 if (state->opad_digest_dma_addr != 0) {
331 dma_unmap_single(dev, state->opad_digest_dma_addr,
332 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
333 SSI_LOG_DEBUG("Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
334 state->opad_digest_dma_addr);
335 state->opad_digest_dma_addr = 0;
336 }
337
338 kfree(state->opad_digest_buff);
339 kfree(state->digest_bytes_len);
340 kfree(state->digest_buff);
341 kfree(state->digest_result_buff);
342 kfree(state->buff1);
343 kfree(state->buff0);
344}
345
346static void ssi_hash_unmap_result(struct device *dev,
347 struct ahash_req_ctx *state,
348 unsigned int digestsize, u8 *result)
349{
350 if (state->digest_result_dma_addr != 0) {
351 dma_unmap_single(dev,
352 state->digest_result_dma_addr,
353 digestsize,
354 DMA_BIDIRECTIONAL);
355 SSI_LOG_DEBUG("unmpa digest result buffer "
356 "va (%pK) pa (%pad) len %u\n",
357 state->digest_result_buff,
358 state->digest_result_dma_addr,
359 digestsize);
360 memcpy(result,
361 state->digest_result_buff,
362 digestsize);
363 }
364 state->digest_result_dma_addr = 0;
365}
366
367static void ssi_hash_update_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
368{
369 struct ahash_request *req = (struct ahash_request *)ssi_req;
370 struct ahash_req_ctx *state = ahash_request_ctx(req);
371
372 SSI_LOG_DEBUG("req=%pK\n", req);
373
374 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
375 req->base.complete(&req->base, 0);
376}
377
378static void ssi_hash_digest_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
379{
380 struct ahash_request *req = (struct ahash_request *)ssi_req;
381 struct ahash_req_ctx *state = ahash_request_ctx(req);
382 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
383 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
384 u32 digestsize = crypto_ahash_digestsize(tfm);
385
386 SSI_LOG_DEBUG("req=%pK\n", req);
387
388 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
389 ssi_hash_unmap_result(dev, state, digestsize, req->result);
390 ssi_hash_unmap_request(dev, state, ctx);
391 req->base.complete(&req->base, 0);
392}
393
394static void ssi_hash_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
395{
396 struct ahash_request *req = (struct ahash_request *)ssi_req;
397 struct ahash_req_ctx *state = ahash_request_ctx(req);
398 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
399 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
400 u32 digestsize = crypto_ahash_digestsize(tfm);
401
402 SSI_LOG_DEBUG("req=%pK\n", req);
403
404 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
405 ssi_hash_unmap_result(dev, state, digestsize, req->result);
406 ssi_hash_unmap_request(dev, state, ctx);
407 req->base.complete(&req->base, 0);
408}
409
410static int ssi_hash_digest(struct ahash_req_ctx *state,
411 struct ssi_hash_ctx *ctx,
412 unsigned int digestsize,
413 struct scatterlist *src,
414 unsigned int nbytes, u8 *result,
415 void *async_req)
416{
417 struct device *dev = &ctx->drvdata->plat_dev->dev;
418 bool is_hmac = ctx->is_hmac;
419 struct ssi_crypto_req ssi_req = {};
420 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
421 ssi_sram_addr_t larval_digest_addr = ssi_ahash_get_larval_digest_sram_addr(
422 ctx->drvdata, ctx->hash_mode);
423 int idx = 0;
424 int rc = 0;
425
426 SSI_LOG_DEBUG("===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash", nbytes);
427
428 if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) {
429 SSI_LOG_ERR("map_ahash_source() failed\n");
430 return -ENOMEM;
431 }
432
433 if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
434 SSI_LOG_ERR("map_ahash_digest() failed\n");
435 return -ENOMEM;
436 }
437
438 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1) != 0)) {
439 SSI_LOG_ERR("map_ahash_request_final() failed\n");
440 return -ENOMEM;
441 }
442
443 if (async_req) {
444
445 ssi_req.user_cb = (void *)ssi_hash_digest_complete;
446 ssi_req.user_arg = (void *)async_req;
447 }
448
449
450 hw_desc_init(&desc[idx]);
451 set_cipher_mode(&desc[idx], ctx->hw_mode);
452 if (is_hmac) {
453 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
454 ctx->inter_digestsize, NS_BIT);
455 } else {
456 set_din_sram(&desc[idx], larval_digest_addr,
457 ctx->inter_digestsize);
458 }
459 set_flow_mode(&desc[idx], S_DIN_to_HASH);
460 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
461 idx++;
462
463
464 hw_desc_init(&desc[idx]);
465 set_cipher_mode(&desc[idx], ctx->hw_mode);
466
467 if (is_hmac) {
468 set_din_type(&desc[idx], DMA_DLLI,
469 state->digest_bytes_len_dma_addr, HASH_LEN_SIZE,
470 NS_BIT);
471 } else {
472 set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
473 if (likely(nbytes != 0))
474 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
475 else
476 set_cipher_do(&desc[idx], DO_PAD);
477 }
478 set_flow_mode(&desc[idx], S_DIN_to_HASH);
479 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
480 idx++;
481
482 ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
483
484 if (is_hmac) {
485
486 hw_desc_init(&desc[idx]);
487 set_cipher_mode(&desc[idx], ctx->hw_mode);
488 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
489 HASH_LEN_SIZE, NS_BIT, 0);
490 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
491 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
492 set_cipher_do(&desc[idx], DO_PAD);
493 idx++;
494
495
496 hw_desc_init(&desc[idx]);
497 set_cipher_mode(&desc[idx], ctx->hw_mode);
498 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
499 digestsize, NS_BIT, 0);
500 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
501 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
502 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
503 idx++;
504
505
506 hw_desc_init(&desc[idx]);
507 set_cipher_mode(&desc[idx], ctx->hw_mode);
508 set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
509 ctx->inter_digestsize, NS_BIT);
510 set_flow_mode(&desc[idx], S_DIN_to_HASH);
511 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
512 idx++;
513
514
515 hw_desc_init(&desc[idx]);
516 set_cipher_mode(&desc[idx], ctx->hw_mode);
517 set_din_sram(&desc[idx],
518 ssi_ahash_get_initial_digest_len_sram_addr(
519ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
520 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
521 set_flow_mode(&desc[idx], S_DIN_to_HASH);
522 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
523 idx++;
524
525
526 hw_desc_init(&desc[idx]);
527 set_din_no_dma(&desc[idx], 0, 0xfffff0);
528 set_dout_no_dma(&desc[idx], 0, 0, 1);
529 idx++;
530
531
532 hw_desc_init(&desc[idx]);
533 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
534 digestsize, NS_BIT);
535 set_flow_mode(&desc[idx], DIN_HASH);
536 idx++;
537 }
538
539
540 hw_desc_init(&desc[idx]);
541 set_cipher_mode(&desc[idx], ctx->hw_mode);
542
543 set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
544 NS_BIT, (async_req ? 1 : 0));
545 if (async_req)
546 set_queue_last_ind(&desc[idx]);
547 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
548 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
549 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
550 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
551 idx++;
552
553 if (async_req) {
554 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
555 if (unlikely(rc != -EINPROGRESS)) {
556 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
557 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
558 ssi_hash_unmap_result(dev, state, digestsize, result);
559 ssi_hash_unmap_request(dev, state, ctx);
560 }
561 } else {
562 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
563 if (rc != 0) {
564 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
565 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
566 } else {
567 ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
568 }
569 ssi_hash_unmap_result(dev, state, digestsize, result);
570 ssi_hash_unmap_request(dev, state, ctx);
571 }
572 return rc;
573}
574
575static int ssi_hash_update(struct ahash_req_ctx *state,
576 struct ssi_hash_ctx *ctx,
577 unsigned int block_size,
578 struct scatterlist *src,
579 unsigned int nbytes,
580 void *async_req)
581{
582 struct device *dev = &ctx->drvdata->plat_dev->dev;
583 struct ssi_crypto_req ssi_req = {};
584 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
585 u32 idx = 0;
586 int rc;
587
588 SSI_LOG_DEBUG("===== %s-update (%d) ====\n", ctx->is_hmac ?
589 "hmac" : "hash", nbytes);
590
591 if (nbytes == 0) {
592
593 return 0;
594 }
595
596 rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, src, nbytes, block_size);
597 if (unlikely(rc)) {
598 if (rc == 1) {
599 SSI_LOG_DEBUG(" data size not require HW update %x\n",
600 nbytes);
601
602 return 0;
603 }
604 SSI_LOG_ERR("map_ahash_request_update() failed\n");
605 return -ENOMEM;
606 }
607
608 if (async_req) {
609
610 ssi_req.user_cb = (void *)ssi_hash_update_complete;
611 ssi_req.user_arg = async_req;
612 }
613
614
615 hw_desc_init(&desc[idx]);
616 set_cipher_mode(&desc[idx], ctx->hw_mode);
617 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
618 ctx->inter_digestsize, NS_BIT);
619 set_flow_mode(&desc[idx], S_DIN_to_HASH);
620 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
621 idx++;
622
623 hw_desc_init(&desc[idx]);
624 set_cipher_mode(&desc[idx], ctx->hw_mode);
625 set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
626 HASH_LEN_SIZE, NS_BIT);
627 set_flow_mode(&desc[idx], S_DIN_to_HASH);
628 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
629 idx++;
630
631 ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
632
633
634 hw_desc_init(&desc[idx]);
635 set_cipher_mode(&desc[idx], ctx->hw_mode);
636 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
637 ctx->inter_digestsize, NS_BIT, 0);
638 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
639 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
640 idx++;
641
642
643 hw_desc_init(&desc[idx]);
644 set_cipher_mode(&desc[idx], ctx->hw_mode);
645 set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
646 HASH_LEN_SIZE, NS_BIT, (async_req ? 1 : 0));
647 if (async_req)
648 set_queue_last_ind(&desc[idx]);
649 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
650 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
651 idx++;
652
653 if (async_req) {
654 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
655 if (unlikely(rc != -EINPROGRESS)) {
656 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
657 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
658 }
659 } else {
660 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
661 if (rc != 0) {
662 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
663 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
664 } else {
665 ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
666 }
667 }
668 return rc;
669}
670
671static int ssi_hash_finup(struct ahash_req_ctx *state,
672 struct ssi_hash_ctx *ctx,
673 unsigned int digestsize,
674 struct scatterlist *src,
675 unsigned int nbytes,
676 u8 *result,
677 void *async_req)
678{
679 struct device *dev = &ctx->drvdata->plat_dev->dev;
680 bool is_hmac = ctx->is_hmac;
681 struct ssi_crypto_req ssi_req = {};
682 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
683 int idx = 0;
684 int rc;
685
686 SSI_LOG_DEBUG("===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash", nbytes);
687
688 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1) != 0)) {
689 SSI_LOG_ERR("map_ahash_request_final() failed\n");
690 return -ENOMEM;
691 }
692 if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
693 SSI_LOG_ERR("map_ahash_digest() failed\n");
694 return -ENOMEM;
695 }
696
697 if (async_req) {
698
699 ssi_req.user_cb = (void *)ssi_hash_complete;
700 ssi_req.user_arg = async_req;
701 }
702
703
704 hw_desc_init(&desc[idx]);
705 set_cipher_mode(&desc[idx], ctx->hw_mode);
706 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
707 ctx->inter_digestsize, NS_BIT);
708 set_flow_mode(&desc[idx], S_DIN_to_HASH);
709 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
710 idx++;
711
712
713 hw_desc_init(&desc[idx]);
714 set_cipher_mode(&desc[idx], ctx->hw_mode);
715 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
716 set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
717 HASH_LEN_SIZE, NS_BIT);
718 set_flow_mode(&desc[idx], S_DIN_to_HASH);
719 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
720 idx++;
721
722 ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
723
724 if (is_hmac) {
725
726 hw_desc_init(&desc[idx]);
727 set_cipher_mode(&desc[idx], ctx->hw_mode);
728 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
729 digestsize, NS_BIT, 0);
730 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
731 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
732 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
733 idx++;
734
735
736 hw_desc_init(&desc[idx]);
737 set_cipher_mode(&desc[idx], ctx->hw_mode);
738 set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
739 ctx->inter_digestsize, NS_BIT);
740 set_flow_mode(&desc[idx], S_DIN_to_HASH);
741 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
742 idx++;
743
744
745 hw_desc_init(&desc[idx]);
746 set_cipher_mode(&desc[idx], ctx->hw_mode);
747 set_din_sram(&desc[idx],
748 ssi_ahash_get_initial_digest_len_sram_addr(
749ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
750 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
751 set_flow_mode(&desc[idx], S_DIN_to_HASH);
752 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
753 idx++;
754
755
756 hw_desc_init(&desc[idx]);
757 set_din_no_dma(&desc[idx], 0, 0xfffff0);
758 set_dout_no_dma(&desc[idx], 0, 0, 1);
759 idx++;
760
761
762 hw_desc_init(&desc[idx]);
763 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
764 digestsize, NS_BIT);
765 set_flow_mode(&desc[idx], DIN_HASH);
766 idx++;
767 }
768
769
770 hw_desc_init(&desc[idx]);
771
772 set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
773 NS_BIT, (async_req ? 1 : 0));
774 if (async_req)
775 set_queue_last_ind(&desc[idx]);
776 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
777 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
778 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
779 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
780 set_cipher_mode(&desc[idx], ctx->hw_mode);
781 idx++;
782
783 if (async_req) {
784 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
785 if (unlikely(rc != -EINPROGRESS)) {
786 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
787 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
788 ssi_hash_unmap_result(dev, state, digestsize, result);
789 }
790 } else {
791 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
792 if (rc != 0) {
793 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
794 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
795 ssi_hash_unmap_result(dev, state, digestsize, result);
796 } else {
797 ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
798 ssi_hash_unmap_result(dev, state, digestsize, result);
799 ssi_hash_unmap_request(dev, state, ctx);
800 }
801 }
802 return rc;
803}
804
805static int ssi_hash_final(struct ahash_req_ctx *state,
806 struct ssi_hash_ctx *ctx,
807 unsigned int digestsize,
808 struct scatterlist *src,
809 unsigned int nbytes,
810 u8 *result,
811 void *async_req)
812{
813 struct device *dev = &ctx->drvdata->plat_dev->dev;
814 bool is_hmac = ctx->is_hmac;
815 struct ssi_crypto_req ssi_req = {};
816 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
817 int idx = 0;
818 int rc;
819
820 SSI_LOG_DEBUG("===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash", nbytes);
821
822 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0) != 0)) {
823 SSI_LOG_ERR("map_ahash_request_final() failed\n");
824 return -ENOMEM;
825 }
826
827 if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
828 SSI_LOG_ERR("map_ahash_digest() failed\n");
829 return -ENOMEM;
830 }
831
832 if (async_req) {
833
834 ssi_req.user_cb = (void *)ssi_hash_complete;
835 ssi_req.user_arg = async_req;
836 }
837
838
839 hw_desc_init(&desc[idx]);
840 set_cipher_mode(&desc[idx], ctx->hw_mode);
841 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
842 ctx->inter_digestsize, NS_BIT);
843 set_flow_mode(&desc[idx], S_DIN_to_HASH);
844 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
845 idx++;
846
847
848 hw_desc_init(&desc[idx]);
849 set_cipher_mode(&desc[idx], ctx->hw_mode);
850 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
851 set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
852 HASH_LEN_SIZE, NS_BIT);
853 set_flow_mode(&desc[idx], S_DIN_to_HASH);
854 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
855 idx++;
856
857 ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
858
859
860 hw_desc_init(&desc[idx]);
861 set_cipher_do(&desc[idx], DO_PAD);
862 set_cipher_mode(&desc[idx], ctx->hw_mode);
863 set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
864 HASH_LEN_SIZE, NS_BIT, 0);
865 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
866 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
867 idx++;
868
869 if (is_hmac) {
870
871 hw_desc_init(&desc[idx]);
872 set_cipher_mode(&desc[idx], ctx->hw_mode);
873 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
874 digestsize, NS_BIT, 0);
875 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
876 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
877 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
878 idx++;
879
880
881 hw_desc_init(&desc[idx]);
882 set_cipher_mode(&desc[idx], ctx->hw_mode);
883 set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
884 ctx->inter_digestsize, NS_BIT);
885 set_flow_mode(&desc[idx], S_DIN_to_HASH);
886 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
887 idx++;
888
889
890 hw_desc_init(&desc[idx]);
891 set_cipher_mode(&desc[idx], ctx->hw_mode);
892 set_din_sram(&desc[idx],
893 ssi_ahash_get_initial_digest_len_sram_addr(
894ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
895 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
896 set_flow_mode(&desc[idx], S_DIN_to_HASH);
897 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
898 idx++;
899
900
901 hw_desc_init(&desc[idx]);
902 set_din_no_dma(&desc[idx], 0, 0xfffff0);
903 set_dout_no_dma(&desc[idx], 0, 0, 1);
904 idx++;
905
906
907 hw_desc_init(&desc[idx]);
908 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
909 digestsize, NS_BIT);
910 set_flow_mode(&desc[idx], DIN_HASH);
911 idx++;
912 }
913
914
915 hw_desc_init(&desc[idx]);
916 set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
917 NS_BIT, (async_req ? 1 : 0));
918 if (async_req)
919 set_queue_last_ind(&desc[idx]);
920 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
921 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
922 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
923 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
924 set_cipher_mode(&desc[idx], ctx->hw_mode);
925 idx++;
926
927 if (async_req) {
928 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
929 if (unlikely(rc != -EINPROGRESS)) {
930 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
931 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
932 ssi_hash_unmap_result(dev, state, digestsize, result);
933 }
934 } else {
935 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
936 if (rc != 0) {
937 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
938 ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
939 ssi_hash_unmap_result(dev, state, digestsize, result);
940 } else {
941 ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
942 ssi_hash_unmap_result(dev, state, digestsize, result);
943 ssi_hash_unmap_request(dev, state, ctx);
944 }
945 }
946 return rc;
947}
948
949static int ssi_hash_init(struct ahash_req_ctx *state, struct ssi_hash_ctx *ctx)
950{
951 struct device *dev = &ctx->drvdata->plat_dev->dev;
952
953 state->xcbc_count = 0;
954
955 ssi_hash_map_request(dev, state, ctx);
956
957 return 0;
958}
959
960static int ssi_hash_setkey(void *hash,
961 const u8 *key,
962 unsigned int keylen,
963 bool synchronize)
964{
965 unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
966 struct ssi_crypto_req ssi_req = {};
967 struct ssi_hash_ctx *ctx = NULL;
968 int blocksize = 0;
969 int digestsize = 0;
970 int i, idx = 0, rc = 0;
971 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
972 ssi_sram_addr_t larval_addr;
973
974 SSI_LOG_DEBUG("start keylen: %d", keylen);
975
976 ctx = crypto_ahash_ctx(((struct crypto_ahash *)hash));
977 blocksize = crypto_tfm_alg_blocksize(&((struct crypto_ahash *)hash)->base);
978 digestsize = crypto_ahash_digestsize(((struct crypto_ahash *)hash));
979
980 larval_addr = ssi_ahash_get_larval_digest_sram_addr(
981 ctx->drvdata, ctx->hash_mode);
982
983
984
985
986 ctx->key_params.keylen = keylen;
987 ctx->key_params.key_dma_addr = 0;
988 ctx->is_hmac = true;
989
990 if (keylen != 0) {
991 ctx->key_params.key_dma_addr = dma_map_single(
992 &ctx->drvdata->plat_dev->dev,
993 (void *)key,
994 keylen, DMA_TO_DEVICE);
995 if (unlikely(dma_mapping_error(&ctx->drvdata->plat_dev->dev,
996 ctx->key_params.key_dma_addr))) {
997 SSI_LOG_ERR("Mapping key va=0x%p len=%u for"
998 " DMA failed\n", key, keylen);
999 return -ENOMEM;
1000 }
1001 SSI_LOG_DEBUG("mapping key-buffer: key_dma_addr=%pad "
1002 "keylen=%u\n", ctx->key_params.key_dma_addr,
1003 ctx->key_params.keylen);
1004
1005 if (keylen > blocksize) {
1006
1007 hw_desc_init(&desc[idx]);
1008 set_cipher_mode(&desc[idx], ctx->hw_mode);
1009 set_din_sram(&desc[idx], larval_addr,
1010 ctx->inter_digestsize);
1011 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1012 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1013 idx++;
1014
1015
1016 hw_desc_init(&desc[idx]);
1017 set_cipher_mode(&desc[idx], ctx->hw_mode);
1018 set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
1019 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1020 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1021 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1022 idx++;
1023
1024 hw_desc_init(&desc[idx]);
1025 set_din_type(&desc[idx], DMA_DLLI,
1026 ctx->key_params.key_dma_addr, keylen,
1027 NS_BIT);
1028 set_flow_mode(&desc[idx], DIN_HASH);
1029 idx++;
1030
1031
1032 hw_desc_init(&desc[idx]);
1033 set_cipher_mode(&desc[idx], ctx->hw_mode);
1034 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
1035 digestsize, NS_BIT, 0);
1036 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1037 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1038 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
1039 ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
1040 idx++;
1041
1042 hw_desc_init(&desc[idx]);
1043 set_din_const(&desc[idx], 0, (blocksize - digestsize));
1044 set_flow_mode(&desc[idx], BYPASS);
1045 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
1046 digestsize),
1047 (blocksize - digestsize), NS_BIT, 0);
1048 idx++;
1049 } else {
1050 hw_desc_init(&desc[idx]);
1051 set_din_type(&desc[idx], DMA_DLLI,
1052 ctx->key_params.key_dma_addr, keylen,
1053 NS_BIT);
1054 set_flow_mode(&desc[idx], BYPASS);
1055 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
1056 keylen, NS_BIT, 0);
1057 idx++;
1058
1059 if ((blocksize - keylen) != 0) {
1060 hw_desc_init(&desc[idx]);
1061 set_din_const(&desc[idx], 0,
1062 (blocksize - keylen));
1063 set_flow_mode(&desc[idx], BYPASS);
1064 set_dout_dlli(&desc[idx],
1065 (ctx->opad_tmp_keys_dma_addr +
1066 keylen), (blocksize - keylen),
1067 NS_BIT, 0);
1068 idx++;
1069 }
1070 }
1071 } else {
1072 hw_desc_init(&desc[idx]);
1073 set_din_const(&desc[idx], 0, blocksize);
1074 set_flow_mode(&desc[idx], BYPASS);
1075 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr),
1076 blocksize, NS_BIT, 0);
1077 idx++;
1078 }
1079
1080 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
1081 if (unlikely(rc != 0)) {
1082 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
1083 goto out;
1084 }
1085
1086
1087 for (idx = 0, i = 0; i < 2; i++) {
1088
1089 hw_desc_init(&desc[idx]);
1090 set_cipher_mode(&desc[idx], ctx->hw_mode);
1091 set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize);
1092 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1093 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1094 idx++;
1095
1096
1097 hw_desc_init(&desc[idx]);
1098 set_cipher_mode(&desc[idx], ctx->hw_mode);
1099 set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
1100 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1101 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1102 idx++;
1103
1104
1105 hw_desc_init(&desc[idx]);
1106 set_xor_val(&desc[idx], hmac_pad_const[i]);
1107 set_cipher_mode(&desc[idx], ctx->hw_mode);
1108 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1109 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1110 idx++;
1111
1112
1113 hw_desc_init(&desc[idx]);
1114 set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
1115 blocksize, NS_BIT);
1116 set_cipher_mode(&desc[idx], ctx->hw_mode);
1117 set_xor_active(&desc[idx]);
1118 set_flow_mode(&desc[idx], DIN_HASH);
1119 idx++;
1120
1121
1122 hw_desc_init(&desc[idx]);
1123 set_cipher_mode(&desc[idx], ctx->hw_mode);
1124 if (i > 0)
1125 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
1126 ctx->inter_digestsize, NS_BIT, 0);
1127 else
1128 set_dout_dlli(&desc[idx], ctx->digest_buff_dma_addr,
1129 ctx->inter_digestsize, NS_BIT, 0);
1130 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1131 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1132 idx++;
1133 }
1134
1135 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
1136
1137out:
1138 if (rc)
1139 crypto_ahash_set_flags((struct crypto_ahash *)hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
1140
1141 if (ctx->key_params.key_dma_addr) {
1142 dma_unmap_single(&ctx->drvdata->plat_dev->dev,
1143 ctx->key_params.key_dma_addr,
1144 ctx->key_params.keylen, DMA_TO_DEVICE);
1145 SSI_LOG_DEBUG("Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
1146 ctx->key_params.key_dma_addr,
1147 ctx->key_params.keylen);
1148 }
1149 return rc;
1150}
1151
1152static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
1153 const u8 *key, unsigned int keylen)
1154{
1155 struct ssi_crypto_req ssi_req = {};
1156 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1157 int idx = 0, rc = 0;
1158 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1159
1160 SSI_LOG_DEBUG("===== setkey (%d) ====\n", keylen);
1161
1162 switch (keylen) {
1163 case AES_KEYSIZE_128:
1164 case AES_KEYSIZE_192:
1165 case AES_KEYSIZE_256:
1166 break;
1167 default:
1168 return -EINVAL;
1169 }
1170
1171 ctx->key_params.keylen = keylen;
1172
1173 ctx->key_params.key_dma_addr = dma_map_single(
1174 &ctx->drvdata->plat_dev->dev,
1175 (void *)key,
1176 keylen, DMA_TO_DEVICE);
1177 if (unlikely(dma_mapping_error(&ctx->drvdata->plat_dev->dev,
1178 ctx->key_params.key_dma_addr))) {
1179 SSI_LOG_ERR("Mapping key va=0x%p len=%u for"
1180 " DMA failed\n", key, keylen);
1181 return -ENOMEM;
1182 }
1183 SSI_LOG_DEBUG("mapping key-buffer: key_dma_addr=%pad "
1184 "keylen=%u\n",
1185 ctx->key_params.key_dma_addr,
1186 ctx->key_params.keylen);
1187
1188 ctx->is_hmac = true;
1189
1190 hw_desc_init(&desc[idx]);
1191 set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr,
1192 keylen, NS_BIT);
1193 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1194 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1195 set_key_size_aes(&desc[idx], keylen);
1196 set_flow_mode(&desc[idx], S_DIN_to_AES);
1197 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1198 idx++;
1199
1200 hw_desc_init(&desc[idx]);
1201 set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
1202 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1203 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
1204 XCBC_MAC_K1_OFFSET),
1205 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
1206 idx++;
1207
1208 hw_desc_init(&desc[idx]);
1209 set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
1210 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1211 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
1212 XCBC_MAC_K2_OFFSET),
1213 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
1214 idx++;
1215
1216 hw_desc_init(&desc[idx]);
1217 set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
1218 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1219 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
1220 XCBC_MAC_K3_OFFSET),
1221 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
1222 idx++;
1223
1224 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
1225
1226 if (rc != 0)
1227 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
1228
1229 dma_unmap_single(&ctx->drvdata->plat_dev->dev,
1230 ctx->key_params.key_dma_addr,
1231 ctx->key_params.keylen, DMA_TO_DEVICE);
1232 SSI_LOG_DEBUG("Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
1233 ctx->key_params.key_dma_addr,
1234 ctx->key_params.keylen);
1235
1236 return rc;
1237}
1238
1239#if SSI_CC_HAS_CMAC
1240static int ssi_cmac_setkey(struct crypto_ahash *ahash,
1241 const u8 *key, unsigned int keylen)
1242{
1243 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1244
1245 SSI_LOG_DEBUG("===== setkey (%d) ====\n", keylen);
1246
1247 ctx->is_hmac = true;
1248
1249 switch (keylen) {
1250 case AES_KEYSIZE_128:
1251 case AES_KEYSIZE_192:
1252 case AES_KEYSIZE_256:
1253 break;
1254 default:
1255 return -EINVAL;
1256 }
1257
1258 ctx->key_params.keylen = keylen;
1259
1260
1261
1262 dma_sync_single_for_cpu(&ctx->drvdata->plat_dev->dev,
1263 ctx->opad_tmp_keys_dma_addr,
1264 keylen, DMA_TO_DEVICE);
1265
1266 memcpy(ctx->opad_tmp_keys_buff, key, keylen);
1267 if (keylen == 24)
1268 memset(ctx->opad_tmp_keys_buff + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
1269
1270 dma_sync_single_for_device(&ctx->drvdata->plat_dev->dev,
1271 ctx->opad_tmp_keys_dma_addr,
1272 keylen, DMA_TO_DEVICE);
1273
1274 ctx->key_params.keylen = keylen;
1275
1276 return 0;
1277}
1278#endif
1279
1280static void ssi_hash_free_ctx(struct ssi_hash_ctx *ctx)
1281{
1282 struct device *dev = &ctx->drvdata->plat_dev->dev;
1283
1284 if (ctx->digest_buff_dma_addr != 0) {
1285 dma_unmap_single(dev, ctx->digest_buff_dma_addr,
1286 sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1287 SSI_LOG_DEBUG("Unmapped digest-buffer: "
1288 "digest_buff_dma_addr=%pad\n",
1289 ctx->digest_buff_dma_addr);
1290 ctx->digest_buff_dma_addr = 0;
1291 }
1292 if (ctx->opad_tmp_keys_dma_addr != 0) {
1293 dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
1294 sizeof(ctx->opad_tmp_keys_buff),
1295 DMA_BIDIRECTIONAL);
1296 SSI_LOG_DEBUG("Unmapped opad-digest: "
1297 "opad_tmp_keys_dma_addr=%pad\n",
1298 ctx->opad_tmp_keys_dma_addr);
1299 ctx->opad_tmp_keys_dma_addr = 0;
1300 }
1301
1302 ctx->key_params.keylen = 0;
1303}
1304
1305static int ssi_hash_alloc_ctx(struct ssi_hash_ctx *ctx)
1306{
1307 struct device *dev = &ctx->drvdata->plat_dev->dev;
1308
1309 ctx->key_params.keylen = 0;
1310
1311 ctx->digest_buff_dma_addr = dma_map_single(dev, (void *)ctx->digest_buff, sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1312 if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
1313 SSI_LOG_ERR("Mapping digest len %zu B at va=%pK for DMA failed\n",
1314 sizeof(ctx->digest_buff), ctx->digest_buff);
1315 goto fail;
1316 }
1317 SSI_LOG_DEBUG("Mapped digest %zu B at va=%pK to dma=%pad\n",
1318 sizeof(ctx->digest_buff), ctx->digest_buff,
1319 ctx->digest_buff_dma_addr);
1320
1321 ctx->opad_tmp_keys_dma_addr = dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff, sizeof(ctx->opad_tmp_keys_buff), DMA_BIDIRECTIONAL);
1322 if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
1323 SSI_LOG_ERR("Mapping opad digest %zu B at va=%pK for DMA failed\n",
1324 sizeof(ctx->opad_tmp_keys_buff),
1325 ctx->opad_tmp_keys_buff);
1326 goto fail;
1327 }
1328 SSI_LOG_DEBUG("Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
1329 sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
1330 ctx->opad_tmp_keys_dma_addr);
1331
1332 ctx->is_hmac = false;
1333 return 0;
1334
1335fail:
1336 ssi_hash_free_ctx(ctx);
1337 return -ENOMEM;
1338}
1339
1340static int ssi_ahash_cra_init(struct crypto_tfm *tfm)
1341{
1342 struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1343 struct hash_alg_common *hash_alg_common =
1344 container_of(tfm->__crt_alg, struct hash_alg_common, base);
1345 struct ahash_alg *ahash_alg =
1346 container_of(hash_alg_common, struct ahash_alg, halg);
1347 struct ssi_hash_alg *ssi_alg =
1348 container_of(ahash_alg, struct ssi_hash_alg, ahash_alg);
1349
1350 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1351 sizeof(struct ahash_req_ctx));
1352
1353 ctx->hash_mode = ssi_alg->hash_mode;
1354 ctx->hw_mode = ssi_alg->hw_mode;
1355 ctx->inter_digestsize = ssi_alg->inter_digestsize;
1356 ctx->drvdata = ssi_alg->drvdata;
1357
1358 return ssi_hash_alloc_ctx(ctx);
1359}
1360
1361static void ssi_hash_cra_exit(struct crypto_tfm *tfm)
1362{
1363 struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1364
1365 SSI_LOG_DEBUG("ssi_hash_cra_exit");
1366 ssi_hash_free_ctx(ctx);
1367}
1368
1369static int ssi_mac_update(struct ahash_request *req)
1370{
1371 struct ahash_req_ctx *state = ahash_request_ctx(req);
1372 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1373 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1374 struct device *dev = &ctx->drvdata->plat_dev->dev;
1375 unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1376 struct ssi_crypto_req ssi_req = {};
1377 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1378 int rc;
1379 u32 idx = 0;
1380
1381 if (req->nbytes == 0) {
1382
1383 return 0;
1384 }
1385
1386 state->xcbc_count++;
1387
1388 rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, req->src, req->nbytes, block_size);
1389 if (unlikely(rc)) {
1390 if (rc == 1) {
1391 SSI_LOG_DEBUG(" data size not require HW update %x\n",
1392 req->nbytes);
1393
1394 return 0;
1395 }
1396 SSI_LOG_ERR("map_ahash_request_update() failed\n");
1397 return -ENOMEM;
1398 }
1399
1400 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1401 ssi_hash_create_xcbc_setup(req, desc, &idx);
1402 else
1403 ssi_hash_create_cmac_setup(req, desc, &idx);
1404
1405 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
1406
1407
1408 hw_desc_init(&desc[idx]);
1409 set_cipher_mode(&desc[idx], ctx->hw_mode);
1410 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1411 ctx->inter_digestsize, NS_BIT, 1);
1412 set_queue_last_ind(&desc[idx]);
1413 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1414 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1415 idx++;
1416
1417
1418 ssi_req.user_cb = (void *)ssi_hash_update_complete;
1419 ssi_req.user_arg = (void *)req;
1420
1421 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1422 if (unlikely(rc != -EINPROGRESS)) {
1423 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
1424 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
1425 }
1426 return rc;
1427}
1428
1429static int ssi_mac_final(struct ahash_request *req)
1430{
1431 struct ahash_req_ctx *state = ahash_request_ctx(req);
1432 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1433 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1434 struct device *dev = &ctx->drvdata->plat_dev->dev;
1435 struct ssi_crypto_req ssi_req = {};
1436 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1437 int idx = 0;
1438 int rc = 0;
1439 u32 key_size, key_len;
1440 u32 digestsize = crypto_ahash_digestsize(tfm);
1441
1442 u32 rem_cnt = state->buff_index ? state->buff1_cnt :
1443 state->buff0_cnt;
1444
1445 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1446 key_size = CC_AES_128_BIT_KEY_SIZE;
1447 key_len = CC_AES_128_BIT_KEY_SIZE;
1448 } else {
1449 key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
1450 ctx->key_params.keylen;
1451 key_len = ctx->key_params.keylen;
1452 }
1453
1454 SSI_LOG_DEBUG("===== final xcbc reminder (%d) ====\n", rem_cnt);
1455
1456 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 0) != 0)) {
1457 SSI_LOG_ERR("map_ahash_request_final() failed\n");
1458 return -ENOMEM;
1459 }
1460
1461 if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
1462 SSI_LOG_ERR("map_ahash_digest() failed\n");
1463 return -ENOMEM;
1464 }
1465
1466
1467 ssi_req.user_cb = (void *)ssi_hash_complete;
1468 ssi_req.user_arg = (void *)req;
1469
1470 if (state->xcbc_count && (rem_cnt == 0)) {
1471
1472 hw_desc_init(&desc[idx]);
1473 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1474 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
1475 set_din_type(&desc[idx], DMA_DLLI,
1476 (ctx->opad_tmp_keys_dma_addr +
1477 XCBC_MAC_K1_OFFSET), key_size, NS_BIT);
1478 set_key_size_aes(&desc[idx], key_len);
1479 set_flow_mode(&desc[idx], S_DIN_to_AES);
1480 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1481 idx++;
1482
1483
1484 hw_desc_init(&desc[idx]);
1485 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
1486 CC_AES_BLOCK_SIZE, NS_BIT);
1487 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1488 CC_AES_BLOCK_SIZE, NS_BIT, 0);
1489 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1490 idx++;
1491
1492
1493 hw_desc_init(&desc[idx]);
1494 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1495 set_dout_no_dma(&desc[idx], 0, 0, 1);
1496 idx++;
1497 }
1498
1499 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1500 ssi_hash_create_xcbc_setup(req, desc, &idx);
1501 else
1502 ssi_hash_create_cmac_setup(req, desc, &idx);
1503
1504 if (state->xcbc_count == 0) {
1505 hw_desc_init(&desc[idx]);
1506 set_cipher_mode(&desc[idx], ctx->hw_mode);
1507 set_key_size_aes(&desc[idx], key_len);
1508 set_cmac_size0_mode(&desc[idx]);
1509 set_flow_mode(&desc[idx], S_DIN_to_AES);
1510 idx++;
1511 } else if (rem_cnt > 0) {
1512 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1513 } else {
1514 hw_desc_init(&desc[idx]);
1515 set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
1516 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1517 idx++;
1518 }
1519
1520
1521 hw_desc_init(&desc[idx]);
1522
1523 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1524 digestsize, NS_BIT, 1);
1525 set_queue_last_ind(&desc[idx]);
1526 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1527 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1528 set_cipher_mode(&desc[idx], ctx->hw_mode);
1529 idx++;
1530
1531 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1532 if (unlikely(rc != -EINPROGRESS)) {
1533 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
1534 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
1535 ssi_hash_unmap_result(dev, state, digestsize, req->result);
1536 }
1537 return rc;
1538}
1539
1540static int ssi_mac_finup(struct ahash_request *req)
1541{
1542 struct ahash_req_ctx *state = ahash_request_ctx(req);
1543 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1544 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1545 struct device *dev = &ctx->drvdata->plat_dev->dev;
1546 struct ssi_crypto_req ssi_req = {};
1547 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1548 int idx = 0;
1549 int rc = 0;
1550 u32 key_len = 0;
1551 u32 digestsize = crypto_ahash_digestsize(tfm);
1552
1553 SSI_LOG_DEBUG("===== finup xcbc(%d) ====\n", req->nbytes);
1554 if (state->xcbc_count > 0 && req->nbytes == 0) {
1555 SSI_LOG_DEBUG("No data to update. Call to fdx_mac_final\n");
1556 return ssi_mac_final(req);
1557 }
1558
1559 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1) != 0)) {
1560 SSI_LOG_ERR("map_ahash_request_final() failed\n");
1561 return -ENOMEM;
1562 }
1563 if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
1564 SSI_LOG_ERR("map_ahash_digest() failed\n");
1565 return -ENOMEM;
1566 }
1567
1568
1569 ssi_req.user_cb = (void *)ssi_hash_complete;
1570 ssi_req.user_arg = (void *)req;
1571
1572 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1573 key_len = CC_AES_128_BIT_KEY_SIZE;
1574 ssi_hash_create_xcbc_setup(req, desc, &idx);
1575 } else {
1576 key_len = ctx->key_params.keylen;
1577 ssi_hash_create_cmac_setup(req, desc, &idx);
1578 }
1579
1580 if (req->nbytes == 0) {
1581 hw_desc_init(&desc[idx]);
1582 set_cipher_mode(&desc[idx], ctx->hw_mode);
1583 set_key_size_aes(&desc[idx], key_len);
1584 set_cmac_size0_mode(&desc[idx]);
1585 set_flow_mode(&desc[idx], S_DIN_to_AES);
1586 idx++;
1587 } else {
1588 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1589 }
1590
1591
1592 hw_desc_init(&desc[idx]);
1593
1594 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1595 digestsize, NS_BIT, 1);
1596 set_queue_last_ind(&desc[idx]);
1597 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1598 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1599 set_cipher_mode(&desc[idx], ctx->hw_mode);
1600 idx++;
1601
1602 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1603 if (unlikely(rc != -EINPROGRESS)) {
1604 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
1605 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
1606 ssi_hash_unmap_result(dev, state, digestsize, req->result);
1607 }
1608 return rc;
1609}
1610
1611static int ssi_mac_digest(struct ahash_request *req)
1612{
1613 struct ahash_req_ctx *state = ahash_request_ctx(req);
1614 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1615 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1616 struct device *dev = &ctx->drvdata->plat_dev->dev;
1617 u32 digestsize = crypto_ahash_digestsize(tfm);
1618 struct ssi_crypto_req ssi_req = {};
1619 struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
1620 u32 key_len;
1621 int idx = 0;
1622 int rc;
1623
1624 SSI_LOG_DEBUG("===== -digest mac (%d) ====\n", req->nbytes);
1625
1626 if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) {
1627 SSI_LOG_ERR("map_ahash_source() failed\n");
1628 return -ENOMEM;
1629 }
1630 if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
1631 SSI_LOG_ERR("map_ahash_digest() failed\n");
1632 return -ENOMEM;
1633 }
1634
1635 if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1) != 0)) {
1636 SSI_LOG_ERR("map_ahash_request_final() failed\n");
1637 return -ENOMEM;
1638 }
1639
1640
1641 ssi_req.user_cb = (void *)ssi_hash_digest_complete;
1642 ssi_req.user_arg = (void *)req;
1643
1644 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1645 key_len = CC_AES_128_BIT_KEY_SIZE;
1646 ssi_hash_create_xcbc_setup(req, desc, &idx);
1647 } else {
1648 key_len = ctx->key_params.keylen;
1649 ssi_hash_create_cmac_setup(req, desc, &idx);
1650 }
1651
1652 if (req->nbytes == 0) {
1653 hw_desc_init(&desc[idx]);
1654 set_cipher_mode(&desc[idx], ctx->hw_mode);
1655 set_key_size_aes(&desc[idx], key_len);
1656 set_cmac_size0_mode(&desc[idx]);
1657 set_flow_mode(&desc[idx], S_DIN_to_AES);
1658 idx++;
1659 } else {
1660 ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1661 }
1662
1663
1664 hw_desc_init(&desc[idx]);
1665 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1666 CC_AES_BLOCK_SIZE, NS_BIT, 1);
1667 set_queue_last_ind(&desc[idx]);
1668 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1669 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1670 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1671 set_cipher_mode(&desc[idx], ctx->hw_mode);
1672 idx++;
1673
1674 rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
1675 if (unlikely(rc != -EINPROGRESS)) {
1676 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
1677 ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
1678 ssi_hash_unmap_result(dev, state, digestsize, req->result);
1679 ssi_hash_unmap_request(dev, state, ctx);
1680 }
1681 return rc;
1682}
1683
1684
1685static int ssi_ahash_digest(struct ahash_request *req)
1686{
1687 struct ahash_req_ctx *state = ahash_request_ctx(req);
1688 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1689 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1690 u32 digestsize = crypto_ahash_digestsize(tfm);
1691
1692 return ssi_hash_digest(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
1693}
1694
1695static int ssi_ahash_update(struct ahash_request *req)
1696{
1697 struct ahash_req_ctx *state = ahash_request_ctx(req);
1698 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1699 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1700 unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1701
1702 return ssi_hash_update(state, ctx, block_size, req->src, req->nbytes, (void *)req);
1703}
1704
1705static int ssi_ahash_finup(struct ahash_request *req)
1706{
1707 struct ahash_req_ctx *state = ahash_request_ctx(req);
1708 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1709 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1710 u32 digestsize = crypto_ahash_digestsize(tfm);
1711
1712 return ssi_hash_finup(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
1713}
1714
1715static int ssi_ahash_final(struct ahash_request *req)
1716{
1717 struct ahash_req_ctx *state = ahash_request_ctx(req);
1718 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1719 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1720 u32 digestsize = crypto_ahash_digestsize(tfm);
1721
1722 return ssi_hash_final(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
1723}
1724
1725static int ssi_ahash_init(struct ahash_request *req)
1726{
1727 struct ahash_req_ctx *state = ahash_request_ctx(req);
1728 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1729 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1730
1731 SSI_LOG_DEBUG("===== init (%d) ====\n", req->nbytes);
1732
1733 return ssi_hash_init(state, ctx);
1734}
1735
1736static int ssi_ahash_export(struct ahash_request *req, void *out)
1737{
1738 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1739 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1740 struct device *dev = &ctx->drvdata->plat_dev->dev;
1741 struct ahash_req_ctx *state = ahash_request_ctx(req);
1742 u8 *curr_buff = state->buff_index ? state->buff1 : state->buff0;
1743 u32 curr_buff_cnt = state->buff_index ? state->buff1_cnt :
1744 state->buff0_cnt;
1745 const u32 tmp = CC_EXPORT_MAGIC;
1746
1747 memcpy(out, &tmp, sizeof(u32));
1748 out += sizeof(u32);
1749
1750 dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr,
1751 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
1752 memcpy(out, state->digest_buff, ctx->inter_digestsize);
1753 out += ctx->inter_digestsize;
1754
1755 if (state->digest_bytes_len_dma_addr) {
1756 dma_sync_single_for_cpu(dev, state->digest_bytes_len_dma_addr,
1757 HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
1758 memcpy(out, state->digest_bytes_len, HASH_LEN_SIZE);
1759 } else {
1760
1761 memset(out, 0x5F, HASH_LEN_SIZE);
1762 }
1763 out += HASH_LEN_SIZE;
1764
1765 memcpy(out, &curr_buff_cnt, sizeof(u32));
1766 out += sizeof(u32);
1767
1768 memcpy(out, curr_buff, curr_buff_cnt);
1769
1770
1771
1772
1773
1774 return 0;
1775}
1776
1777static int ssi_ahash_import(struct ahash_request *req, const void *in)
1778{
1779 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1780 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1781 struct device *dev = &ctx->drvdata->plat_dev->dev;
1782 struct ahash_req_ctx *state = ahash_request_ctx(req);
1783 u32 tmp;
1784 int rc;
1785
1786 memcpy(&tmp, in, sizeof(u32));
1787 if (tmp != CC_EXPORT_MAGIC) {
1788 rc = -EINVAL;
1789 goto out;
1790 }
1791 in += sizeof(u32);
1792
1793 rc = ssi_hash_init(state, ctx);
1794 if (rc)
1795 goto out;
1796
1797 dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr,
1798 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
1799 memcpy(state->digest_buff, in, ctx->inter_digestsize);
1800 in += ctx->inter_digestsize;
1801
1802 if (state->digest_bytes_len_dma_addr) {
1803 dma_sync_single_for_cpu(dev, state->digest_bytes_len_dma_addr,
1804 HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
1805 memcpy(state->digest_bytes_len, in, HASH_LEN_SIZE);
1806 }
1807 in += HASH_LEN_SIZE;
1808
1809 dma_sync_single_for_device(dev, state->digest_buff_dma_addr,
1810 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
1811
1812 if (state->digest_bytes_len_dma_addr)
1813 dma_sync_single_for_device(dev,
1814 state->digest_bytes_len_dma_addr,
1815 HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
1816
1817 state->buff_index = 0;
1818
1819
1820 memcpy(&tmp, in, sizeof(u32));
1821 if (tmp > SSI_MAX_HASH_BLCK_SIZE) {
1822 rc = -EINVAL;
1823 goto out;
1824 }
1825 in += sizeof(u32);
1826
1827 state->buff0_cnt = tmp;
1828 memcpy(state->buff0, in, state->buff0_cnt);
1829
1830out:
1831 return rc;
1832}
1833
1834static int ssi_ahash_setkey(struct crypto_ahash *ahash,
1835 const u8 *key, unsigned int keylen)
1836{
1837 return ssi_hash_setkey((void *)ahash, key, keylen, false);
1838}
1839
1840struct ssi_hash_template {
1841 char name[CRYPTO_MAX_ALG_NAME];
1842 char driver_name[CRYPTO_MAX_ALG_NAME];
1843 char mac_name[CRYPTO_MAX_ALG_NAME];
1844 char mac_driver_name[CRYPTO_MAX_ALG_NAME];
1845 unsigned int blocksize;
1846 bool synchronize;
1847 struct ahash_alg template_ahash;
1848 int hash_mode;
1849 int hw_mode;
1850 int inter_digestsize;
1851 struct ssi_drvdata *drvdata;
1852};
1853
1854#define CC_STATE_SIZE(_x) \
1855 ((_x) + HASH_LEN_SIZE + SSI_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
1856
1857
1858static struct ssi_hash_template driver_hash[] = {
1859
1860 {
1861 .name = "sha1",
1862 .driver_name = "sha1-dx",
1863 .mac_name = "hmac(sha1)",
1864 .mac_driver_name = "hmac-sha1-dx",
1865 .blocksize = SHA1_BLOCK_SIZE,
1866 .synchronize = false,
1867 .template_ahash = {
1868 .init = ssi_ahash_init,
1869 .update = ssi_ahash_update,
1870 .final = ssi_ahash_final,
1871 .finup = ssi_ahash_finup,
1872 .digest = ssi_ahash_digest,
1873 .export = ssi_ahash_export,
1874 .import = ssi_ahash_import,
1875 .setkey = ssi_ahash_setkey,
1876 .halg = {
1877 .digestsize = SHA1_DIGEST_SIZE,
1878 .statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
1879 },
1880 },
1881 .hash_mode = DRV_HASH_SHA1,
1882 .hw_mode = DRV_HASH_HW_SHA1,
1883 .inter_digestsize = SHA1_DIGEST_SIZE,
1884 },
1885 {
1886 .name = "sha256",
1887 .driver_name = "sha256-dx",
1888 .mac_name = "hmac(sha256)",
1889 .mac_driver_name = "hmac-sha256-dx",
1890 .blocksize = SHA256_BLOCK_SIZE,
1891 .template_ahash = {
1892 .init = ssi_ahash_init,
1893 .update = ssi_ahash_update,
1894 .final = ssi_ahash_final,
1895 .finup = ssi_ahash_finup,
1896 .digest = ssi_ahash_digest,
1897 .export = ssi_ahash_export,
1898 .import = ssi_ahash_import,
1899 .setkey = ssi_ahash_setkey,
1900 .halg = {
1901 .digestsize = SHA256_DIGEST_SIZE,
1902 .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
1903 },
1904 },
1905 .hash_mode = DRV_HASH_SHA256,
1906 .hw_mode = DRV_HASH_HW_SHA256,
1907 .inter_digestsize = SHA256_DIGEST_SIZE,
1908 },
1909 {
1910 .name = "sha224",
1911 .driver_name = "sha224-dx",
1912 .mac_name = "hmac(sha224)",
1913 .mac_driver_name = "hmac-sha224-dx",
1914 .blocksize = SHA224_BLOCK_SIZE,
1915 .template_ahash = {
1916 .init = ssi_ahash_init,
1917 .update = ssi_ahash_update,
1918 .final = ssi_ahash_final,
1919 .finup = ssi_ahash_finup,
1920 .digest = ssi_ahash_digest,
1921 .export = ssi_ahash_export,
1922 .import = ssi_ahash_import,
1923 .setkey = ssi_ahash_setkey,
1924 .halg = {
1925 .digestsize = SHA224_DIGEST_SIZE,
1926 .statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE),
1927 },
1928 },
1929 .hash_mode = DRV_HASH_SHA224,
1930 .hw_mode = DRV_HASH_HW_SHA256,
1931 .inter_digestsize = SHA256_DIGEST_SIZE,
1932 },
1933#if (DX_DEV_SHA_MAX > 256)
1934 {
1935 .name = "sha384",
1936 .driver_name = "sha384-dx",
1937 .mac_name = "hmac(sha384)",
1938 .mac_driver_name = "hmac-sha384-dx",
1939 .blocksize = SHA384_BLOCK_SIZE,
1940 .template_ahash = {
1941 .init = ssi_ahash_init,
1942 .update = ssi_ahash_update,
1943 .final = ssi_ahash_final,
1944 .finup = ssi_ahash_finup,
1945 .digest = ssi_ahash_digest,
1946 .export = ssi_ahash_export,
1947 .import = ssi_ahash_import,
1948 .setkey = ssi_ahash_setkey,
1949 .halg = {
1950 .digestsize = SHA384_DIGEST_SIZE,
1951 .statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE),
1952 },
1953 },
1954 .hash_mode = DRV_HASH_SHA384,
1955 .hw_mode = DRV_HASH_HW_SHA512,
1956 .inter_digestsize = SHA512_DIGEST_SIZE,
1957 },
1958 {
1959 .name = "sha512",
1960 .driver_name = "sha512-dx",
1961 .mac_name = "hmac(sha512)",
1962 .mac_driver_name = "hmac-sha512-dx",
1963 .blocksize = SHA512_BLOCK_SIZE,
1964 .template_ahash = {
1965 .init = ssi_ahash_init,
1966 .update = ssi_ahash_update,
1967 .final = ssi_ahash_final,
1968 .finup = ssi_ahash_finup,
1969 .digest = ssi_ahash_digest,
1970 .export = ssi_ahash_export,
1971 .import = ssi_ahash_import,
1972 .setkey = ssi_ahash_setkey,
1973 .halg = {
1974 .digestsize = SHA512_DIGEST_SIZE,
1975 .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
1976 },
1977 },
1978 .hash_mode = DRV_HASH_SHA512,
1979 .hw_mode = DRV_HASH_HW_SHA512,
1980 .inter_digestsize = SHA512_DIGEST_SIZE,
1981 },
1982#endif
1983 {
1984 .name = "md5",
1985 .driver_name = "md5-dx",
1986 .mac_name = "hmac(md5)",
1987 .mac_driver_name = "hmac-md5-dx",
1988 .blocksize = MD5_HMAC_BLOCK_SIZE,
1989 .template_ahash = {
1990 .init = ssi_ahash_init,
1991 .update = ssi_ahash_update,
1992 .final = ssi_ahash_final,
1993 .finup = ssi_ahash_finup,
1994 .digest = ssi_ahash_digest,
1995 .export = ssi_ahash_export,
1996 .import = ssi_ahash_import,
1997 .setkey = ssi_ahash_setkey,
1998 .halg = {
1999 .digestsize = MD5_DIGEST_SIZE,
2000 .statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
2001 },
2002 },
2003 .hash_mode = DRV_HASH_MD5,
2004 .hw_mode = DRV_HASH_HW_MD5,
2005 .inter_digestsize = MD5_DIGEST_SIZE,
2006 },
2007 {
2008 .mac_name = "xcbc(aes)",
2009 .mac_driver_name = "xcbc-aes-dx",
2010 .blocksize = AES_BLOCK_SIZE,
2011 .template_ahash = {
2012 .init = ssi_ahash_init,
2013 .update = ssi_mac_update,
2014 .final = ssi_mac_final,
2015 .finup = ssi_mac_finup,
2016 .digest = ssi_mac_digest,
2017 .setkey = ssi_xcbc_setkey,
2018 .export = ssi_ahash_export,
2019 .import = ssi_ahash_import,
2020 .halg = {
2021 .digestsize = AES_BLOCK_SIZE,
2022 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
2023 },
2024 },
2025 .hash_mode = DRV_HASH_NULL,
2026 .hw_mode = DRV_CIPHER_XCBC_MAC,
2027 .inter_digestsize = AES_BLOCK_SIZE,
2028 },
2029#if SSI_CC_HAS_CMAC
2030 {
2031 .mac_name = "cmac(aes)",
2032 .mac_driver_name = "cmac-aes-dx",
2033 .blocksize = AES_BLOCK_SIZE,
2034 .template_ahash = {
2035 .init = ssi_ahash_init,
2036 .update = ssi_mac_update,
2037 .final = ssi_mac_final,
2038 .finup = ssi_mac_finup,
2039 .digest = ssi_mac_digest,
2040 .setkey = ssi_cmac_setkey,
2041 .export = ssi_ahash_export,
2042 .import = ssi_ahash_import,
2043 .halg = {
2044 .digestsize = AES_BLOCK_SIZE,
2045 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
2046 },
2047 },
2048 .hash_mode = DRV_HASH_NULL,
2049 .hw_mode = DRV_CIPHER_CMAC,
2050 .inter_digestsize = AES_BLOCK_SIZE,
2051 },
2052#endif
2053
2054};
2055
2056static struct ssi_hash_alg *
2057ssi_hash_create_alg(struct ssi_hash_template *template, bool keyed)
2058{
2059 struct ssi_hash_alg *t_crypto_alg;
2060 struct crypto_alg *alg;
2061 struct ahash_alg *halg;
2062
2063 t_crypto_alg = kzalloc(sizeof(*t_crypto_alg), GFP_KERNEL);
2064 if (!t_crypto_alg) {
2065 SSI_LOG_ERR("failed to allocate t_crypto_alg\n");
2066 return ERR_PTR(-ENOMEM);
2067 }
2068
2069 t_crypto_alg->ahash_alg = template->template_ahash;
2070 halg = &t_crypto_alg->ahash_alg;
2071 alg = &halg->halg.base;
2072
2073 if (keyed) {
2074 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
2075 template->mac_name);
2076 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2077 template->mac_driver_name);
2078 } else {
2079 halg->setkey = NULL;
2080 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
2081 template->name);
2082 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2083 template->driver_name);
2084 }
2085 alg->cra_module = THIS_MODULE;
2086 alg->cra_ctxsize = sizeof(struct ssi_hash_ctx);
2087 alg->cra_priority = SSI_CRA_PRIO;
2088 alg->cra_blocksize = template->blocksize;
2089 alg->cra_alignmask = 0;
2090 alg->cra_exit = ssi_hash_cra_exit;
2091
2092 alg->cra_init = ssi_ahash_cra_init;
2093 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH |
2094 CRYPTO_ALG_KERN_DRIVER_ONLY;
2095 alg->cra_type = &crypto_ahash_type;
2096
2097 t_crypto_alg->hash_mode = template->hash_mode;
2098 t_crypto_alg->hw_mode = template->hw_mode;
2099 t_crypto_alg->inter_digestsize = template->inter_digestsize;
2100
2101 return t_crypto_alg;
2102}
2103
2104int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
2105{
2106 struct ssi_hash_handle *hash_handle = drvdata->hash_handle;
2107 ssi_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
2108 unsigned int larval_seq_len = 0;
2109 struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
2110 int rc = 0;
2111#if (DX_DEV_SHA_MAX > 256)
2112 int i;
2113#endif
2114
2115
2116 ssi_sram_mgr_const2sram_desc(digest_len_init, sram_buff_ofs,
2117 ARRAY_SIZE(digest_len_init),
2118 larval_seq, &larval_seq_len);
2119 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2120 if (unlikely(rc != 0))
2121 goto init_digest_const_err;
2122
2123 sram_buff_ofs += sizeof(digest_len_init);
2124 larval_seq_len = 0;
2125
2126#if (DX_DEV_SHA_MAX > 256)
2127
2128 ssi_sram_mgr_const2sram_desc(digest_len_sha512_init, sram_buff_ofs,
2129 ARRAY_SIZE(digest_len_sha512_init),
2130 larval_seq, &larval_seq_len);
2131 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2132 if (unlikely(rc != 0))
2133 goto init_digest_const_err;
2134
2135 sram_buff_ofs += sizeof(digest_len_sha512_init);
2136 larval_seq_len = 0;
2137#endif
2138
2139
2140 hash_handle->larval_digest_sram_addr = sram_buff_ofs;
2141
2142
2143 ssi_sram_mgr_const2sram_desc(md5_init, sram_buff_ofs,
2144 ARRAY_SIZE(md5_init), larval_seq,
2145 &larval_seq_len);
2146 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2147 if (unlikely(rc != 0))
2148 goto init_digest_const_err;
2149 sram_buff_ofs += sizeof(md5_init);
2150 larval_seq_len = 0;
2151
2152 ssi_sram_mgr_const2sram_desc(sha1_init, sram_buff_ofs,
2153 ARRAY_SIZE(sha1_init), larval_seq,
2154 &larval_seq_len);
2155 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2156 if (unlikely(rc != 0))
2157 goto init_digest_const_err;
2158 sram_buff_ofs += sizeof(sha1_init);
2159 larval_seq_len = 0;
2160
2161 ssi_sram_mgr_const2sram_desc(sha224_init, sram_buff_ofs,
2162 ARRAY_SIZE(sha224_init), larval_seq,
2163 &larval_seq_len);
2164 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2165 if (unlikely(rc != 0))
2166 goto init_digest_const_err;
2167 sram_buff_ofs += sizeof(sha224_init);
2168 larval_seq_len = 0;
2169
2170 ssi_sram_mgr_const2sram_desc(sha256_init, sram_buff_ofs,
2171 ARRAY_SIZE(sha256_init), larval_seq,
2172 &larval_seq_len);
2173 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2174 if (unlikely(rc != 0))
2175 goto init_digest_const_err;
2176 sram_buff_ofs += sizeof(sha256_init);
2177 larval_seq_len = 0;
2178
2179#if (DX_DEV_SHA_MAX > 256)
2180
2181 for (i = 0; i < ARRAY_SIZE(sha384_init); i++) {
2182 const u32 const0 = ((u32 *)((u64 *)&sha384_init[i]))[1];
2183 const u32 const1 = ((u32 *)((u64 *)&sha384_init[i]))[0];
2184
2185 ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
2186 larval_seq, &larval_seq_len);
2187 sram_buff_ofs += sizeof(u32);
2188 ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
2189 larval_seq, &larval_seq_len);
2190 sram_buff_ofs += sizeof(u32);
2191 }
2192 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2193 if (unlikely(rc != 0)) {
2194 SSI_LOG_ERR("send_request() failed (rc = %d)\n", rc);
2195 goto init_digest_const_err;
2196 }
2197 larval_seq_len = 0;
2198
2199 for (i = 0; i < ARRAY_SIZE(sha512_init); i++) {
2200 const u32 const0 = ((u32 *)((u64 *)&sha512_init[i]))[1];
2201 const u32 const1 = ((u32 *)((u64 *)&sha512_init[i]))[0];
2202
2203 ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
2204 larval_seq, &larval_seq_len);
2205 sram_buff_ofs += sizeof(u32);
2206 ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
2207 larval_seq, &larval_seq_len);
2208 sram_buff_ofs += sizeof(u32);
2209 }
2210 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
2211 if (unlikely(rc != 0)) {
2212 SSI_LOG_ERR("send_request() failed (rc = %d)\n", rc);
2213 goto init_digest_const_err;
2214 }
2215#endif
2216
2217init_digest_const_err:
2218 return rc;
2219}
2220
2221int ssi_hash_alloc(struct ssi_drvdata *drvdata)
2222{
2223 struct ssi_hash_handle *hash_handle;
2224 ssi_sram_addr_t sram_buff;
2225 u32 sram_size_to_alloc;
2226 int rc = 0;
2227 int alg;
2228
2229 hash_handle = kzalloc(sizeof(*hash_handle), GFP_KERNEL);
2230 if (!hash_handle) {
2231 SSI_LOG_ERR("kzalloc failed to allocate %zu B\n",
2232 sizeof(*hash_handle));
2233 rc = -ENOMEM;
2234 goto fail;
2235 }
2236
2237 drvdata->hash_handle = hash_handle;
2238
2239 sram_size_to_alloc = sizeof(digest_len_init) +
2240#if (DX_DEV_SHA_MAX > 256)
2241 sizeof(digest_len_sha512_init) +
2242 sizeof(sha384_init) +
2243 sizeof(sha512_init) +
2244#endif
2245 sizeof(md5_init) +
2246 sizeof(sha1_init) +
2247 sizeof(sha224_init) +
2248 sizeof(sha256_init);
2249
2250 sram_buff = ssi_sram_mgr_alloc(drvdata, sram_size_to_alloc);
2251 if (sram_buff == NULL_SRAM_ADDR) {
2252 SSI_LOG_ERR("SRAM pool exhausted\n");
2253 rc = -ENOMEM;
2254 goto fail;
2255 }
2256
2257
2258 hash_handle->digest_len_sram_addr = sram_buff;
2259
2260
2261 rc = ssi_hash_init_sram_digest_consts(drvdata);
2262 if (unlikely(rc != 0)) {
2263 SSI_LOG_ERR("Init digest CONST failed (rc=%d)\n", rc);
2264 goto fail;
2265 }
2266
2267 INIT_LIST_HEAD(&hash_handle->hash_list);
2268
2269
2270 for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
2271 struct ssi_hash_alg *t_alg;
2272 int hw_mode = driver_hash[alg].hw_mode;
2273
2274
2275 t_alg = ssi_hash_create_alg(&driver_hash[alg], true);
2276 if (IS_ERR(t_alg)) {
2277 rc = PTR_ERR(t_alg);
2278 SSI_LOG_ERR("%s alg allocation failed\n",
2279 driver_hash[alg].driver_name);
2280 goto fail;
2281 }
2282 t_alg->drvdata = drvdata;
2283
2284 rc = crypto_register_ahash(&t_alg->ahash_alg);
2285 if (unlikely(rc)) {
2286 SSI_LOG_ERR("%s alg registration failed\n",
2287 driver_hash[alg].driver_name);
2288 kfree(t_alg);
2289 goto fail;
2290 } else {
2291 list_add_tail(&t_alg->entry,
2292 &hash_handle->hash_list);
2293 }
2294
2295 if ((hw_mode == DRV_CIPHER_XCBC_MAC) ||
2296 (hw_mode == DRV_CIPHER_CMAC))
2297 continue;
2298
2299
2300 t_alg = ssi_hash_create_alg(&driver_hash[alg], false);
2301 if (IS_ERR(t_alg)) {
2302 rc = PTR_ERR(t_alg);
2303 SSI_LOG_ERR("%s alg allocation failed\n",
2304 driver_hash[alg].driver_name);
2305 goto fail;
2306 }
2307 t_alg->drvdata = drvdata;
2308
2309 rc = crypto_register_ahash(&t_alg->ahash_alg);
2310 if (unlikely(rc)) {
2311 SSI_LOG_ERR("%s alg registration failed\n",
2312 driver_hash[alg].driver_name);
2313 kfree(t_alg);
2314 goto fail;
2315 } else {
2316 list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2317 }
2318 }
2319
2320 return 0;
2321
2322fail:
2323 kfree(drvdata->hash_handle);
2324 drvdata->hash_handle = NULL;
2325 return rc;
2326}
2327
2328int ssi_hash_free(struct ssi_drvdata *drvdata)
2329{
2330 struct ssi_hash_alg *t_hash_alg, *hash_n;
2331 struct ssi_hash_handle *hash_handle = drvdata->hash_handle;
2332
2333 if (hash_handle) {
2334 list_for_each_entry_safe(t_hash_alg, hash_n, &hash_handle->hash_list, entry) {
2335 crypto_unregister_ahash(&t_hash_alg->ahash_alg);
2336 list_del(&t_hash_alg->entry);
2337 kfree(t_hash_alg);
2338 }
2339
2340 kfree(hash_handle);
2341 drvdata->hash_handle = NULL;
2342 }
2343 return 0;
2344}
2345
2346static void ssi_hash_create_xcbc_setup(struct ahash_request *areq,
2347 struct cc_hw_desc desc[],
2348 unsigned int *seq_size)
2349{
2350 unsigned int idx = *seq_size;
2351 struct ahash_req_ctx *state = ahash_request_ctx(areq);
2352 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2353 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2354
2355
2356 hw_desc_init(&desc[idx]);
2357 set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2358 XCBC_MAC_K1_OFFSET),
2359 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2360 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2361 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2362 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2363 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2364 set_flow_mode(&desc[idx], S_DIN_to_AES);
2365 idx++;
2366
2367
2368 hw_desc_init(&desc[idx]);
2369 set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2370 XCBC_MAC_K2_OFFSET),
2371 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2372 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
2373 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2374 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2375 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2376 set_flow_mode(&desc[idx], S_DIN_to_AES);
2377 idx++;
2378
2379
2380 hw_desc_init(&desc[idx]);
2381 set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2382 XCBC_MAC_K3_OFFSET),
2383 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2384 set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
2385 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2386 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2387 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2388 set_flow_mode(&desc[idx], S_DIN_to_AES);
2389 idx++;
2390
2391
2392 hw_desc_init(&desc[idx]);
2393 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2394 CC_AES_BLOCK_SIZE, NS_BIT);
2395 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2396 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2397 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2398 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2399 set_flow_mode(&desc[idx], S_DIN_to_AES);
2400 idx++;
2401 *seq_size = idx;
2402}
2403
2404static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
2405 struct cc_hw_desc desc[],
2406 unsigned int *seq_size)
2407{
2408 unsigned int idx = *seq_size;
2409 struct ahash_req_ctx *state = ahash_request_ctx(areq);
2410 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2411 struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2412
2413
2414 hw_desc_init(&desc[idx]);
2415 set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
2416 ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
2417 ctx->key_params.keylen), NS_BIT);
2418 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2419 set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2420 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2421 set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2422 set_flow_mode(&desc[idx], S_DIN_to_AES);
2423 idx++;
2424
2425
2426 hw_desc_init(&desc[idx]);
2427 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2428 CC_AES_BLOCK_SIZE, NS_BIT);
2429 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2430 set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2431 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2432 set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2433 set_flow_mode(&desc[idx], S_DIN_to_AES);
2434 idx++;
2435 *seq_size = idx;
2436}
2437
2438static void ssi_hash_create_data_desc(struct ahash_req_ctx *areq_ctx,
2439 struct ssi_hash_ctx *ctx,
2440 unsigned int flow_mode,
2441 struct cc_hw_desc desc[],
2442 bool is_not_last_data,
2443 unsigned int *seq_size)
2444{
2445 unsigned int idx = *seq_size;
2446
2447 if (likely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_DLLI)) {
2448 hw_desc_init(&desc[idx]);
2449 set_din_type(&desc[idx], DMA_DLLI,
2450 sg_dma_address(areq_ctx->curr_sg),
2451 areq_ctx->curr_sg->length, NS_BIT);
2452 set_flow_mode(&desc[idx], flow_mode);
2453 idx++;
2454 } else {
2455 if (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) {
2456 SSI_LOG_DEBUG(" NULL mode\n");
2457
2458 return;
2459 }
2460
2461 hw_desc_init(&desc[idx]);
2462 set_din_type(&desc[idx], DMA_DLLI,
2463 areq_ctx->mlli_params.mlli_dma_addr,
2464 areq_ctx->mlli_params.mlli_len, NS_BIT);
2465 set_dout_sram(&desc[idx], ctx->drvdata->mlli_sram_addr,
2466 areq_ctx->mlli_params.mlli_len);
2467 set_flow_mode(&desc[idx], BYPASS);
2468 idx++;
2469
2470 hw_desc_init(&desc[idx]);
2471 set_din_type(&desc[idx], DMA_MLLI,
2472 ctx->drvdata->mlli_sram_addr,
2473 areq_ctx->mlli_nents, NS_BIT);
2474 set_flow_mode(&desc[idx], flow_mode);
2475 idx++;
2476 }
2477 if (is_not_last_data)
2478 set_din_not_last_indication(&desc[(idx - 1)]);
2479
2480 *seq_size = idx;
2481}
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, u32 mode)
2493{
2494 struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata;
2495 struct ssi_hash_handle *hash_handle = _drvdata->hash_handle;
2496
2497 switch (mode) {
2498 case DRV_HASH_NULL:
2499 break;
2500 case DRV_HASH_MD5:
2501 return (hash_handle->larval_digest_sram_addr);
2502 case DRV_HASH_SHA1:
2503 return (hash_handle->larval_digest_sram_addr +
2504 sizeof(md5_init));
2505 case DRV_HASH_SHA224:
2506 return (hash_handle->larval_digest_sram_addr +
2507 sizeof(md5_init) +
2508 sizeof(sha1_init));
2509 case DRV_HASH_SHA256:
2510 return (hash_handle->larval_digest_sram_addr +
2511 sizeof(md5_init) +
2512 sizeof(sha1_init) +
2513 sizeof(sha224_init));
2514#if (DX_DEV_SHA_MAX > 256)
2515 case DRV_HASH_SHA384:
2516 return (hash_handle->larval_digest_sram_addr +
2517 sizeof(md5_init) +
2518 sizeof(sha1_init) +
2519 sizeof(sha224_init) +
2520 sizeof(sha256_init));
2521 case DRV_HASH_SHA512:
2522 return (hash_handle->larval_digest_sram_addr +
2523 sizeof(md5_init) +
2524 sizeof(sha1_init) +
2525 sizeof(sha224_init) +
2526 sizeof(sha256_init) +
2527 sizeof(sha384_init));
2528#endif
2529 default:
2530 SSI_LOG_ERR("Invalid hash mode (%d)\n", mode);
2531 }
2532
2533
2534 return hash_handle->larval_digest_sram_addr;
2535}
2536
2537ssi_sram_addr_t
2538ssi_ahash_get_initial_digest_len_sram_addr(void *drvdata, u32 mode)
2539{
2540 struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata;
2541 struct ssi_hash_handle *hash_handle = _drvdata->hash_handle;
2542 ssi_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
2543
2544 switch (mode) {
2545 case DRV_HASH_SHA1:
2546 case DRV_HASH_SHA224:
2547 case DRV_HASH_SHA256:
2548 case DRV_HASH_MD5:
2549 return digest_len_addr;
2550#if (DX_DEV_SHA_MAX > 256)
2551 case DRV_HASH_SHA384:
2552 case DRV_HASH_SHA512:
2553 return digest_len_addr + sizeof(digest_len_init);
2554#endif
2555 default:
2556 return digest_len_addr;
2557 }
2558}
2559
2560