1
2
3
4#include <crypto/internal/aead.h>
5#include <crypto/authenc.h>
6#include <crypto/scatterwalk.h>
7#include <linux/dmapool.h>
8#include <linux/dma-mapping.h>
9
10#include "cc_buffer_mgr.h"
11#include "cc_lli_defs.h"
12#include "cc_cipher.h"
13#include "cc_hash.h"
14#include "cc_aead.h"
15
16union buffer_array_entry {
17 struct scatterlist *sgl;
18 dma_addr_t buffer_dma;
19};
20
21struct buffer_array {
22 unsigned int num_of_buffers;
23 union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI];
24 unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
25 int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
26 int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
27 bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
28 u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
29};
30
31static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type)
32{
33 switch (type) {
34 case CC_DMA_BUF_NULL:
35 return "BUF_NULL";
36 case CC_DMA_BUF_DLLI:
37 return "BUF_DLLI";
38 case CC_DMA_BUF_MLLI:
39 return "BUF_MLLI";
40 default:
41 return "BUF_INVALID";
42 }
43}
44
45
46
47
48
49
50
51
52static void cc_copy_mac(struct device *dev, struct aead_request *req,
53 enum cc_sg_cpy_direct dir)
54{
55 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
56 u32 skip = req->assoclen + req->cryptlen;
57
58 cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
59 (skip - areq_ctx->req_authsize), skip, dir);
60}
61
62
63
64
65
66
67
68
69
70
71
72
73static unsigned int cc_get_sgl_nents(struct device *dev,
74 struct scatterlist *sg_list,
75 unsigned int nbytes, u32 *lbytes)
76{
77 unsigned int nents = 0;
78
79 *lbytes = 0;
80
81 while (nbytes && sg_list) {
82 nents++;
83
84 *lbytes = nbytes;
85 nbytes -= (sg_list->length > nbytes) ?
86 nbytes : sg_list->length;
87 sg_list = sg_next(sg_list);
88 }
89
90 dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
91 return nents;
92}
93
94
95
96
97
98
99
100
101
102
103
104
105
106void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
107 u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
108{
109 u32 nents;
110
111 nents = sg_nents_for_len(sg, end);
112 sg_copy_buffer(sg, nents, dest, (end - to_skip + 1), to_skip,
113 (direct == CC_SG_TO_BUF));
114}
115
116static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma,
117 u32 buff_size, u32 *curr_nents,
118 u32 **mlli_entry_pp)
119{
120 u32 *mlli_entry_p = *mlli_entry_pp;
121 u32 new_nents;
122
123
124 new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
125 if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) {
126 dev_err(dev, "Too many mlli entries. current %d max %d\n",
127 new_nents, MAX_NUM_OF_TOTAL_MLLI_ENTRIES);
128 return -ENOMEM;
129 }
130
131
132 while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
133 cc_lli_set_addr(mlli_entry_p, buff_dma);
134 cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
135 dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
136 *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
137 mlli_entry_p[LLI_WORD1_OFFSET]);
138 buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
139 buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
140 mlli_entry_p = mlli_entry_p + 2;
141 (*curr_nents)++;
142 }
143
144 cc_lli_set_addr(mlli_entry_p, buff_dma);
145 cc_lli_set_size(mlli_entry_p, buff_size);
146 dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
147 *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
148 mlli_entry_p[LLI_WORD1_OFFSET]);
149 mlli_entry_p = mlli_entry_p + 2;
150 *mlli_entry_pp = mlli_entry_p;
151 (*curr_nents)++;
152 return 0;
153}
154
155static int cc_render_sg_to_mlli(struct device *dev, struct scatterlist *sgl,
156 u32 sgl_data_len, u32 sgl_offset,
157 u32 *curr_nents, u32 **mlli_entry_pp)
158{
159 struct scatterlist *curr_sgl = sgl;
160 u32 *mlli_entry_p = *mlli_entry_pp;
161 s32 rc = 0;
162
163 for ( ; (curr_sgl && sgl_data_len);
164 curr_sgl = sg_next(curr_sgl)) {
165 u32 entry_data_len =
166 (sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ?
167 sg_dma_len(curr_sgl) - sgl_offset :
168 sgl_data_len;
169 sgl_data_len -= entry_data_len;
170 rc = cc_render_buff_to_mlli(dev, sg_dma_address(curr_sgl) +
171 sgl_offset, entry_data_len,
172 curr_nents, &mlli_entry_p);
173 if (rc)
174 return rc;
175
176 sgl_offset = 0;
177 }
178 *mlli_entry_pp = mlli_entry_p;
179 return 0;
180}
181
182static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
183 struct mlli_params *mlli_params, gfp_t flags)
184{
185 u32 *mlli_p;
186 u32 total_nents = 0, prev_total_nents = 0;
187 int rc = 0, i;
188
189 dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers);
190
191
192 mlli_params->mlli_virt_addr =
193 dma_pool_alloc(mlli_params->curr_pool, flags,
194 &mlli_params->mlli_dma_addr);
195 if (!mlli_params->mlli_virt_addr) {
196 dev_err(dev, "dma_pool_alloc() failed\n");
197 rc = -ENOMEM;
198 goto build_mlli_exit;
199 }
200
201 mlli_p = mlli_params->mlli_virt_addr;
202
203 for (i = 0; i < sg_data->num_of_buffers; i++) {
204 union buffer_array_entry *entry = &sg_data->entry[i];
205 u32 tot_len = sg_data->total_data_len[i];
206 u32 offset = sg_data->offset[i];
207
208 rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len, offset,
209 &total_nents, &mlli_p);
210 if (rc)
211 return rc;
212
213
214 if (sg_data->mlli_nents[i]) {
215
216
217
218 *sg_data->mlli_nents[i] +=
219 (total_nents - prev_total_nents);
220 prev_total_nents = total_nents;
221 }
222 }
223
224
225 mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
226
227 dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
228 mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr,
229 mlli_params->mlli_len);
230
231build_mlli_exit:
232 return rc;
233}
234
235static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
236 unsigned int nents, struct scatterlist *sgl,
237 unsigned int data_len, unsigned int data_offset,
238 bool is_last_table, u32 *mlli_nents)
239{
240 unsigned int index = sgl_data->num_of_buffers;
241
242 dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
243 index, nents, sgl, data_len, is_last_table);
244 sgl_data->nents[index] = nents;
245 sgl_data->entry[index].sgl = sgl;
246 sgl_data->offset[index] = data_offset;
247 sgl_data->total_data_len[index] = data_len;
248 sgl_data->is_last[index] = is_last_table;
249 sgl_data->mlli_nents[index] = mlli_nents;
250 if (sgl_data->mlli_nents[index])
251 *sgl_data->mlli_nents[index] = 0;
252 sgl_data->num_of_buffers++;
253}
254
255static int cc_map_sg(struct device *dev, struct scatterlist *sg,
256 unsigned int nbytes, int direction, u32 *nents,
257 u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
258{
259 int ret = 0;
260
261 if (!nbytes) {
262 *mapped_nents = 0;
263 *lbytes = 0;
264 *nents = 0;
265 return 0;
266 }
267
268 *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
269 if (*nents > max_sg_nents) {
270 *nents = 0;
271 dev_err(dev, "Too many fragments. current %d max %d\n",
272 *nents, max_sg_nents);
273 return -ENOMEM;
274 }
275
276 ret = dma_map_sg(dev, sg, *nents, direction);
277 if (dma_mapping_error(dev, ret)) {
278 *nents = 0;
279 dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret);
280 return -ENOMEM;
281 }
282
283 *mapped_nents = ret;
284
285 return 0;
286}
287
288static int
289cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx,
290 u8 *config_data, struct buffer_array *sg_data,
291 unsigned int assoclen)
292{
293 dev_dbg(dev, " handle additional data config set to DLLI\n");
294
295 sg_init_one(&areq_ctx->ccm_adata_sg, config_data,
296 AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
297 if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) {
298 dev_err(dev, "dma_map_sg() config buffer failed\n");
299 return -ENOMEM;
300 }
301 dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
302 &sg_dma_address(&areq_ctx->ccm_adata_sg),
303 sg_page(&areq_ctx->ccm_adata_sg),
304 sg_virt(&areq_ctx->ccm_adata_sg),
305 areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length);
306
307 if (assoclen > 0) {
308 cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg,
309 (AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
310 0, false, NULL);
311 }
312 return 0;
313}
314
315static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
316 u8 *curr_buff, u32 curr_buff_cnt,
317 struct buffer_array *sg_data)
318{
319 dev_dbg(dev, " handle curr buff %x set to DLLI\n", curr_buff_cnt);
320
321 sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
322 if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) {
323 dev_err(dev, "dma_map_sg() src buffer failed\n");
324 return -ENOMEM;
325 }
326 dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
327 &sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
328 sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
329 areq_ctx->buff_sg->length);
330 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
331 areq_ctx->curr_sg = areq_ctx->buff_sg;
332 areq_ctx->in_nents = 0;
333
334 cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0,
335 false, NULL);
336 return 0;
337}
338
339void cc_unmap_cipher_request(struct device *dev, void *ctx,
340 unsigned int ivsize, struct scatterlist *src,
341 struct scatterlist *dst)
342{
343 struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
344
345 if (req_ctx->gen_ctx.iv_dma_addr) {
346 dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
347 &req_ctx->gen_ctx.iv_dma_addr, ivsize);
348 dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
349 ivsize, DMA_BIDIRECTIONAL);
350 }
351
352 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
353 req_ctx->mlli_params.mlli_virt_addr) {
354 dma_pool_free(req_ctx->mlli_params.curr_pool,
355 req_ctx->mlli_params.mlli_virt_addr,
356 req_ctx->mlli_params.mlli_dma_addr);
357 }
358
359 dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
360 dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
361
362 if (src != dst) {
363 dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
364 dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
365 }
366}
367
368int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
369 unsigned int ivsize, unsigned int nbytes,
370 void *info, struct scatterlist *src,
371 struct scatterlist *dst, gfp_t flags)
372{
373 struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
374 struct mlli_params *mlli_params = &req_ctx->mlli_params;
375 struct device *dev = drvdata_to_dev(drvdata);
376 struct buffer_array sg_data;
377 u32 dummy = 0;
378 int rc = 0;
379 u32 mapped_nents = 0;
380
381 req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
382 mlli_params->curr_pool = NULL;
383 sg_data.num_of_buffers = 0;
384
385
386 if (ivsize) {
387 dump_byte_array("iv", info, ivsize);
388 req_ctx->gen_ctx.iv_dma_addr =
389 dma_map_single(dev, info, ivsize, DMA_BIDIRECTIONAL);
390 if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
391 dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
392 ivsize, info);
393 return -ENOMEM;
394 }
395 dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
396 ivsize, info, &req_ctx->gen_ctx.iv_dma_addr);
397 } else {
398 req_ctx->gen_ctx.iv_dma_addr = 0;
399 }
400
401
402 rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
403 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
404 if (rc)
405 goto cipher_exit;
406 if (mapped_nents > 1)
407 req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
408
409 if (src == dst) {
410
411 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
412 req_ctx->out_nents = 0;
413 cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
414 nbytes, 0, true,
415 &req_ctx->in_mlli_nents);
416 }
417 } else {
418
419 rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
420 &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
421 &dummy, &mapped_nents);
422 if (rc)
423 goto cipher_exit;
424 if (mapped_nents > 1)
425 req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
426
427 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
428 cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
429 nbytes, 0, true,
430 &req_ctx->in_mlli_nents);
431 cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst,
432 nbytes, 0, true,
433 &req_ctx->out_mlli_nents);
434 }
435 }
436
437 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
438 mlli_params->curr_pool = drvdata->mlli_buffs_pool;
439 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
440 if (rc)
441 goto cipher_exit;
442 }
443
444 dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n",
445 cc_dma_buf_type(req_ctx->dma_buf_type));
446
447 return 0;
448
449cipher_exit:
450 cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
451 return rc;
452}
453
454void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
455{
456 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
457 unsigned int hw_iv_size = areq_ctx->hw_iv_size;
458 struct cc_drvdata *drvdata = dev_get_drvdata(dev);
459
460 if (areq_ctx->mac_buf_dma_addr) {
461 dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
462 MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
463 }
464
465 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
466 if (areq_ctx->hkey_dma_addr) {
467 dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
468 AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
469 }
470
471 if (areq_ctx->gcm_block_len_dma_addr) {
472 dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
473 AES_BLOCK_SIZE, DMA_TO_DEVICE);
474 }
475
476 if (areq_ctx->gcm_iv_inc1_dma_addr) {
477 dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
478 AES_BLOCK_SIZE, DMA_TO_DEVICE);
479 }
480
481 if (areq_ctx->gcm_iv_inc2_dma_addr) {
482 dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
483 AES_BLOCK_SIZE, DMA_TO_DEVICE);
484 }
485 }
486
487 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
488 if (areq_ctx->ccm_iv0_dma_addr) {
489 dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
490 AES_BLOCK_SIZE, DMA_TO_DEVICE);
491 }
492
493 dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
494 }
495 if (areq_ctx->gen_ctx.iv_dma_addr) {
496 dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
497 hw_iv_size, DMA_BIDIRECTIONAL);
498 kfree_sensitive(areq_ctx->gen_ctx.iv);
499 }
500
501
502 if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
503 areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) &&
504 (areq_ctx->mlli_params.mlli_virt_addr)) {
505 dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
506 &areq_ctx->mlli_params.mlli_dma_addr,
507 areq_ctx->mlli_params.mlli_virt_addr);
508 dma_pool_free(areq_ctx->mlli_params.curr_pool,
509 areq_ctx->mlli_params.mlli_virt_addr,
510 areq_ctx->mlli_params.mlli_dma_addr);
511 }
512
513 dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
514 sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
515 areq_ctx->assoclen, req->cryptlen);
516
517 dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents,
518 DMA_BIDIRECTIONAL);
519 if (req->src != req->dst) {
520 dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
521 sg_virt(req->dst));
522 dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents,
523 DMA_BIDIRECTIONAL);
524 }
525 if (drvdata->coherent &&
526 areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
527 req->src == req->dst) {
528
529
530
531
532 cc_copy_mac(dev, req, CC_SG_FROM_BUF);
533 }
534}
535
536static bool cc_is_icv_frag(unsigned int sgl_nents, unsigned int authsize,
537 u32 last_entry_data_size)
538{
539 return ((sgl_nents > 1) && (last_entry_data_size < authsize));
540}
541
542static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
543 struct aead_request *req,
544 struct buffer_array *sg_data,
545 bool is_last, bool do_chain)
546{
547 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
548 unsigned int hw_iv_size = areq_ctx->hw_iv_size;
549 struct device *dev = drvdata_to_dev(drvdata);
550 gfp_t flags = cc_gfp_flags(&req->base);
551 int rc = 0;
552
553 if (!req->iv) {
554 areq_ctx->gen_ctx.iv_dma_addr = 0;
555 areq_ctx->gen_ctx.iv = NULL;
556 goto chain_iv_exit;
557 }
558
559 areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags);
560 if (!areq_ctx->gen_ctx.iv)
561 return -ENOMEM;
562
563 areq_ctx->gen_ctx.iv_dma_addr =
564 dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size,
565 DMA_BIDIRECTIONAL);
566 if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
567 dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
568 hw_iv_size, req->iv);
569 kfree_sensitive(areq_ctx->gen_ctx.iv);
570 areq_ctx->gen_ctx.iv = NULL;
571 rc = -ENOMEM;
572 goto chain_iv_exit;
573 }
574
575 dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
576 hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
577
578chain_iv_exit:
579 return rc;
580}
581
582static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
583 struct aead_request *req,
584 struct buffer_array *sg_data,
585 bool is_last, bool do_chain)
586{
587 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
588 int rc = 0;
589 int mapped_nents = 0;
590 struct device *dev = drvdata_to_dev(drvdata);
591
592 if (!sg_data) {
593 rc = -EINVAL;
594 goto chain_assoc_exit;
595 }
596
597 if (areq_ctx->assoclen == 0) {
598 areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
599 areq_ctx->assoc.nents = 0;
600 areq_ctx->assoc.mlli_nents = 0;
601 dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n",
602 cc_dma_buf_type(areq_ctx->assoc_buff_type),
603 areq_ctx->assoc.nents);
604 goto chain_assoc_exit;
605 }
606
607 mapped_nents = sg_nents_for_len(req->src, areq_ctx->assoclen);
608 if (mapped_nents < 0)
609 return mapped_nents;
610
611 if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
612 dev_err(dev, "Too many fragments. current %d max %d\n",
613 mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
614 return -ENOMEM;
615 }
616 areq_ctx->assoc.nents = mapped_nents;
617
618
619
620
621 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
622 if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
623 dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n",
624 (areq_ctx->assoc.nents + 1),
625 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
626 rc = -ENOMEM;
627 goto chain_assoc_exit;
628 }
629 }
630
631 if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null)
632 areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI;
633 else
634 areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
635
636 if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
637 dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n",
638 cc_dma_buf_type(areq_ctx->assoc_buff_type),
639 areq_ctx->assoc.nents);
640 cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
641 areq_ctx->assoclen, 0, is_last,
642 &areq_ctx->assoc.mlli_nents);
643 areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
644 }
645
646chain_assoc_exit:
647 return rc;
648}
649
650static void cc_prepare_aead_data_dlli(struct aead_request *req,
651 u32 *src_last_bytes, u32 *dst_last_bytes)
652{
653 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
654 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
655 unsigned int authsize = areq_ctx->req_authsize;
656 struct scatterlist *sg;
657 ssize_t offset;
658
659 areq_ctx->is_icv_fragmented = false;
660
661 if ((req->src == req->dst) || direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
662 sg = areq_ctx->src_sgl;
663 offset = *src_last_bytes - authsize;
664 } else {
665 sg = areq_ctx->dst_sgl;
666 offset = *dst_last_bytes - authsize;
667 }
668
669 areq_ctx->icv_dma_addr = sg_dma_address(sg) + offset;
670 areq_ctx->icv_virt_addr = sg_virt(sg) + offset;
671}
672
673static void cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
674 struct aead_request *req,
675 struct buffer_array *sg_data,
676 u32 *src_last_bytes, u32 *dst_last_bytes,
677 bool is_last_table)
678{
679 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
680 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
681 unsigned int authsize = areq_ctx->req_authsize;
682 struct device *dev = drvdata_to_dev(drvdata);
683 struct scatterlist *sg;
684
685 if (req->src == req->dst) {
686
687 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
688 areq_ctx->src_sgl, areq_ctx->cryptlen,
689 areq_ctx->src_offset, is_last_table,
690 &areq_ctx->src.mlli_nents);
691
692 areq_ctx->is_icv_fragmented =
693 cc_is_icv_frag(areq_ctx->src.nents, authsize,
694 *src_last_bytes);
695
696 if (areq_ctx->is_icv_fragmented) {
697
698
699
700
701 if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
702
703
704
705
706
707 if (!drvdata->coherent)
708 cc_copy_mac(dev, req, CC_SG_TO_BUF);
709
710 areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
711 } else {
712 areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
713 areq_ctx->icv_dma_addr =
714 areq_ctx->mac_buf_dma_addr;
715 }
716 } else {
717 sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
718
719 areq_ctx->icv_dma_addr = sg_dma_address(sg) +
720 (*src_last_bytes - authsize);
721 areq_ctx->icv_virt_addr = sg_virt(sg) +
722 (*src_last_bytes - authsize);
723 }
724
725 } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
726
727 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
728 areq_ctx->src_sgl, areq_ctx->cryptlen,
729 areq_ctx->src_offset, is_last_table,
730 &areq_ctx->src.mlli_nents);
731 cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
732 areq_ctx->dst_sgl, areq_ctx->cryptlen,
733 areq_ctx->dst_offset, is_last_table,
734 &areq_ctx->dst.mlli_nents);
735
736 areq_ctx->is_icv_fragmented =
737 cc_is_icv_frag(areq_ctx->src.nents, authsize,
738 *src_last_bytes);
739
740
741
742
743
744 if (areq_ctx->is_icv_fragmented) {
745 cc_copy_mac(dev, req, CC_SG_TO_BUF);
746 areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
747
748 } else {
749 sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
750
751 areq_ctx->icv_dma_addr = sg_dma_address(sg) +
752 (*src_last_bytes - authsize);
753 areq_ctx->icv_virt_addr = sg_virt(sg) +
754 (*src_last_bytes - authsize);
755 }
756
757 } else {
758
759 cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
760 areq_ctx->dst_sgl, areq_ctx->cryptlen,
761 areq_ctx->dst_offset, is_last_table,
762 &areq_ctx->dst.mlli_nents);
763 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
764 areq_ctx->src_sgl, areq_ctx->cryptlen,
765 areq_ctx->src_offset, is_last_table,
766 &areq_ctx->src.mlli_nents);
767
768 areq_ctx->is_icv_fragmented =
769 cc_is_icv_frag(areq_ctx->dst.nents, authsize,
770 *dst_last_bytes);
771
772 if (!areq_ctx->is_icv_fragmented) {
773 sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
774
775 areq_ctx->icv_dma_addr = sg_dma_address(sg) +
776 (*dst_last_bytes - authsize);
777 areq_ctx->icv_virt_addr = sg_virt(sg) +
778 (*dst_last_bytes - authsize);
779 } else {
780 areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
781 areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
782 }
783 }
784}
785
786static int cc_aead_chain_data(struct cc_drvdata *drvdata,
787 struct aead_request *req,
788 struct buffer_array *sg_data,
789 bool is_last_table, bool do_chain)
790{
791 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
792 struct device *dev = drvdata_to_dev(drvdata);
793 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
794 unsigned int authsize = areq_ctx->req_authsize;
795 unsigned int src_last_bytes = 0, dst_last_bytes = 0;
796 int rc = 0;
797 u32 src_mapped_nents = 0, dst_mapped_nents = 0;
798 u32 offset = 0;
799
800 unsigned int size_for_map = req->assoclen + req->cryptlen;
801 u32 sg_index = 0;
802 u32 size_to_skip = req->assoclen;
803 struct scatterlist *sgl;
804
805 offset = size_to_skip;
806
807 if (!sg_data)
808 return -EINVAL;
809
810 areq_ctx->src_sgl = req->src;
811 areq_ctx->dst_sgl = req->dst;
812
813 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
814 authsize : 0;
815 src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
816 &src_last_bytes);
817 sg_index = areq_ctx->src_sgl->length;
818
819 while (src_mapped_nents && (sg_index <= size_to_skip)) {
820 src_mapped_nents--;
821 offset -= areq_ctx->src_sgl->length;
822 sgl = sg_next(areq_ctx->src_sgl);
823 if (!sgl)
824 break;
825 areq_ctx->src_sgl = sgl;
826 sg_index += areq_ctx->src_sgl->length;
827 }
828 if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
829 dev_err(dev, "Too many fragments. current %d max %d\n",
830 src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
831 return -ENOMEM;
832 }
833
834 areq_ctx->src.nents = src_mapped_nents;
835
836 areq_ctx->src_offset = offset;
837
838 if (req->src != req->dst) {
839 size_for_map = req->assoclen + req->cryptlen;
840
841 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT)
842 size_for_map += authsize;
843 else
844 size_for_map -= authsize;
845
846 rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
847 &areq_ctx->dst.mapped_nents,
848 LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
849 &dst_mapped_nents);
850 if (rc)
851 goto chain_data_exit;
852 }
853
854 dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
855 &dst_last_bytes);
856 sg_index = areq_ctx->dst_sgl->length;
857 offset = size_to_skip;
858
859
860 while (dst_mapped_nents && sg_index <= size_to_skip) {
861 dst_mapped_nents--;
862 offset -= areq_ctx->dst_sgl->length;
863 sgl = sg_next(areq_ctx->dst_sgl);
864 if (!sgl)
865 break;
866 areq_ctx->dst_sgl = sgl;
867 sg_index += areq_ctx->dst_sgl->length;
868 }
869 if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
870 dev_err(dev, "Too many fragments. current %d max %d\n",
871 dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
872 return -ENOMEM;
873 }
874 areq_ctx->dst.nents = dst_mapped_nents;
875 areq_ctx->dst_offset = offset;
876 if (src_mapped_nents > 1 ||
877 dst_mapped_nents > 1 ||
878 do_chain) {
879 areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
880 cc_prepare_aead_data_mlli(drvdata, req, sg_data,
881 &src_last_bytes, &dst_last_bytes,
882 is_last_table);
883 } else {
884 areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
885 cc_prepare_aead_data_dlli(req, &src_last_bytes,
886 &dst_last_bytes);
887 }
888
889chain_data_exit:
890 return rc;
891}
892
893static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata,
894 struct aead_request *req)
895{
896 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
897 u32 curr_mlli_size = 0;
898
899 if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
900 areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
901 curr_mlli_size = areq_ctx->assoc.mlli_nents *
902 LLI_ENTRY_BYTE_SIZE;
903 }
904
905 if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
906
907 if (req->src == req->dst) {
908 areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
909 areq_ctx->src.sram_addr = drvdata->mlli_sram_addr +
910 curr_mlli_size;
911 areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
912 if (!areq_ctx->is_single_pass)
913 areq_ctx->assoc.mlli_nents +=
914 areq_ctx->src.mlli_nents;
915 } else {
916 if (areq_ctx->gen_ctx.op_type ==
917 DRV_CRYPTO_DIRECTION_DECRYPT) {
918 areq_ctx->src.sram_addr =
919 drvdata->mlli_sram_addr +
920 curr_mlli_size;
921 areq_ctx->dst.sram_addr =
922 areq_ctx->src.sram_addr +
923 areq_ctx->src.mlli_nents *
924 LLI_ENTRY_BYTE_SIZE;
925 if (!areq_ctx->is_single_pass)
926 areq_ctx->assoc.mlli_nents +=
927 areq_ctx->src.mlli_nents;
928 } else {
929 areq_ctx->dst.sram_addr =
930 drvdata->mlli_sram_addr +
931 curr_mlli_size;
932 areq_ctx->src.sram_addr =
933 areq_ctx->dst.sram_addr +
934 areq_ctx->dst.mlli_nents *
935 LLI_ENTRY_BYTE_SIZE;
936 if (!areq_ctx->is_single_pass)
937 areq_ctx->assoc.mlli_nents +=
938 areq_ctx->dst.mlli_nents;
939 }
940 }
941 }
942}
943
944int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
945{
946 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
947 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
948 struct device *dev = drvdata_to_dev(drvdata);
949 struct buffer_array sg_data;
950 unsigned int authsize = areq_ctx->req_authsize;
951 int rc = 0;
952 dma_addr_t dma_addr;
953 u32 mapped_nents = 0;
954 u32 dummy = 0;
955 u32 size_to_map;
956 gfp_t flags = cc_gfp_flags(&req->base);
957
958 mlli_params->curr_pool = NULL;
959 sg_data.num_of_buffers = 0;
960
961
962
963
964 if (drvdata->coherent &&
965 areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
966 req->src == req->dst)
967 cc_copy_mac(dev, req, CC_SG_TO_BUF);
968
969
970 areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
971 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
972 req->cryptlen :
973 (req->cryptlen - authsize);
974
975 dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
976 DMA_BIDIRECTIONAL);
977 if (dma_mapping_error(dev, dma_addr)) {
978 dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
979 MAX_MAC_SIZE, areq_ctx->mac_buf);
980 rc = -ENOMEM;
981 goto aead_map_failure;
982 }
983 areq_ctx->mac_buf_dma_addr = dma_addr;
984
985 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
986 void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
987
988 dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE,
989 DMA_TO_DEVICE);
990
991 if (dma_mapping_error(dev, dma_addr)) {
992 dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
993 AES_BLOCK_SIZE, addr);
994 areq_ctx->ccm_iv0_dma_addr = 0;
995 rc = -ENOMEM;
996 goto aead_map_failure;
997 }
998 areq_ctx->ccm_iv0_dma_addr = dma_addr;
999
1000 rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
1001 &sg_data, areq_ctx->assoclen);
1002 if (rc)
1003 goto aead_map_failure;
1004 }
1005
1006 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
1007 dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
1008 DMA_BIDIRECTIONAL);
1009 if (dma_mapping_error(dev, dma_addr)) {
1010 dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
1011 AES_BLOCK_SIZE, areq_ctx->hkey);
1012 rc = -ENOMEM;
1013 goto aead_map_failure;
1014 }
1015 areq_ctx->hkey_dma_addr = dma_addr;
1016
1017 dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
1018 AES_BLOCK_SIZE, DMA_TO_DEVICE);
1019 if (dma_mapping_error(dev, dma_addr)) {
1020 dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
1021 AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
1022 rc = -ENOMEM;
1023 goto aead_map_failure;
1024 }
1025 areq_ctx->gcm_block_len_dma_addr = dma_addr;
1026
1027 dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1,
1028 AES_BLOCK_SIZE, DMA_TO_DEVICE);
1029
1030 if (dma_mapping_error(dev, dma_addr)) {
1031 dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
1032 AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
1033 areq_ctx->gcm_iv_inc1_dma_addr = 0;
1034 rc = -ENOMEM;
1035 goto aead_map_failure;
1036 }
1037 areq_ctx->gcm_iv_inc1_dma_addr = dma_addr;
1038
1039 dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2,
1040 AES_BLOCK_SIZE, DMA_TO_DEVICE);
1041
1042 if (dma_mapping_error(dev, dma_addr)) {
1043 dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
1044 AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
1045 areq_ctx->gcm_iv_inc2_dma_addr = 0;
1046 rc = -ENOMEM;
1047 goto aead_map_failure;
1048 }
1049 areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
1050 }
1051
1052 size_to_map = req->cryptlen + req->assoclen;
1053
1054 if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) &&
1055 (req->src == req->dst)) {
1056 size_to_map += authsize;
1057 }
1058
1059 rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
1060 &areq_ctx->src.mapped_nents,
1061 (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
1062 LLI_MAX_NUM_OF_DATA_ENTRIES),
1063 &dummy, &mapped_nents);
1064 if (rc)
1065 goto aead_map_failure;
1066
1067 if (areq_ctx->is_single_pass) {
1068
1069
1070
1071
1072
1073
1074 rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false);
1075 if (rc)
1076 goto aead_map_failure;
1077 rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false);
1078 if (rc)
1079 goto aead_map_failure;
1080 rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false);
1081 if (rc)
1082 goto aead_map_failure;
1083 } else {
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104 rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true);
1105 if (rc)
1106 goto aead_map_failure;
1107 rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true);
1108 if (rc)
1109 goto aead_map_failure;
1110 rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true);
1111 if (rc)
1112 goto aead_map_failure;
1113 }
1114
1115
1116
1117
1118 if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1119 areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
1120 mlli_params->curr_pool = drvdata->mlli_buffs_pool;
1121 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1122 if (rc)
1123 goto aead_map_failure;
1124
1125 cc_update_aead_mlli_nents(drvdata, req);
1126 dev_dbg(dev, "assoc params mn %d\n",
1127 areq_ctx->assoc.mlli_nents);
1128 dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents);
1129 dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents);
1130 }
1131 return 0;
1132
1133aead_map_failure:
1134 cc_unmap_aead_request(dev, req);
1135 return rc;
1136}
1137
1138int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
1139 struct scatterlist *src, unsigned int nbytes,
1140 bool do_update, gfp_t flags)
1141{
1142 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1143 struct device *dev = drvdata_to_dev(drvdata);
1144 u8 *curr_buff = cc_hash_buf(areq_ctx);
1145 u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1146 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1147 struct buffer_array sg_data;
1148 int rc = 0;
1149 u32 dummy = 0;
1150 u32 mapped_nents = 0;
1151
1152 dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
1153 curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1154
1155 areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1156 mlli_params->curr_pool = NULL;
1157 sg_data.num_of_buffers = 0;
1158 areq_ctx->in_nents = 0;
1159
1160 if (nbytes == 0 && *curr_buff_cnt == 0) {
1161
1162 return 0;
1163 }
1164
1165
1166 if (*curr_buff_cnt) {
1167 rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1168 &sg_data);
1169 if (rc)
1170 return rc;
1171 }
1172
1173 if (src && nbytes > 0 && do_update) {
1174 rc = cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
1175 &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
1176 &dummy, &mapped_nents);
1177 if (rc)
1178 goto unmap_curr_buff;
1179 if (src && mapped_nents == 1 &&
1180 areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1181 memcpy(areq_ctx->buff_sg, src,
1182 sizeof(struct scatterlist));
1183 areq_ctx->buff_sg->length = nbytes;
1184 areq_ctx->curr_sg = areq_ctx->buff_sg;
1185 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1186 } else {
1187 areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1188 }
1189 }
1190
1191
1192 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1193 mlli_params->curr_pool = drvdata->mlli_buffs_pool;
1194
1195 cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
1196 0, true, &areq_ctx->mlli_nents);
1197 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1198 if (rc)
1199 goto fail_unmap_din;
1200 }
1201
1202 areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
1203 dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n",
1204 cc_dma_buf_type(areq_ctx->data_dma_buf_type));
1205 return 0;
1206
1207fail_unmap_din:
1208 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1209
1210unmap_curr_buff:
1211 if (*curr_buff_cnt)
1212 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1213
1214 return rc;
1215}
1216
1217int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
1218 struct scatterlist *src, unsigned int nbytes,
1219 unsigned int block_size, gfp_t flags)
1220{
1221 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1222 struct device *dev = drvdata_to_dev(drvdata);
1223 u8 *curr_buff = cc_hash_buf(areq_ctx);
1224 u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1225 u8 *next_buff = cc_next_buf(areq_ctx);
1226 u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx);
1227 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1228 unsigned int update_data_len;
1229 u32 total_in_len = nbytes + *curr_buff_cnt;
1230 struct buffer_array sg_data;
1231 unsigned int swap_index = 0;
1232 int rc = 0;
1233 u32 dummy = 0;
1234 u32 mapped_nents = 0;
1235
1236 dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
1237 curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1238
1239 areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1240 mlli_params->curr_pool = NULL;
1241 areq_ctx->curr_sg = NULL;
1242 sg_data.num_of_buffers = 0;
1243 areq_ctx->in_nents = 0;
1244
1245 if (total_in_len < block_size) {
1246 dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
1247 curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
1248 areq_ctx->in_nents = sg_nents_for_len(src, nbytes);
1249 sg_copy_to_buffer(src, areq_ctx->in_nents,
1250 &curr_buff[*curr_buff_cnt], nbytes);
1251 *curr_buff_cnt += nbytes;
1252 return 1;
1253 }
1254
1255
1256 *next_buff_cnt = total_in_len & (block_size - 1);
1257
1258 update_data_len = total_in_len - *next_buff_cnt;
1259
1260 dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n",
1261 *next_buff_cnt, update_data_len);
1262
1263
1264 if (*next_buff_cnt) {
1265 dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
1266 next_buff, (update_data_len - *curr_buff_cnt),
1267 *next_buff_cnt);
1268 cc_copy_sg_portion(dev, next_buff, src,
1269 (update_data_len - *curr_buff_cnt),
1270 nbytes, CC_SG_TO_BUF);
1271
1272 swap_index = 1;
1273 }
1274
1275 if (*curr_buff_cnt) {
1276 rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1277 &sg_data);
1278 if (rc)
1279 return rc;
1280
1281 swap_index = 1;
1282 }
1283
1284 if (update_data_len > *curr_buff_cnt) {
1285 rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
1286 DMA_TO_DEVICE, &areq_ctx->in_nents,
1287 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
1288 &mapped_nents);
1289 if (rc)
1290 goto unmap_curr_buff;
1291 if (mapped_nents == 1 &&
1292 areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1293
1294 memcpy(areq_ctx->buff_sg, src,
1295 sizeof(struct scatterlist));
1296 areq_ctx->buff_sg->length = update_data_len;
1297 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1298 areq_ctx->curr_sg = areq_ctx->buff_sg;
1299 } else {
1300 areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1301 }
1302 }
1303
1304 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1305 mlli_params->curr_pool = drvdata->mlli_buffs_pool;
1306
1307 cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
1308 (update_data_len - *curr_buff_cnt), 0, true,
1309 &areq_ctx->mlli_nents);
1310 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1311 if (rc)
1312 goto fail_unmap_din;
1313 }
1314 areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
1315
1316 return 0;
1317
1318fail_unmap_din:
1319 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1320
1321unmap_curr_buff:
1322 if (*curr_buff_cnt)
1323 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1324
1325 return rc;
1326}
1327
1328void cc_unmap_hash_request(struct device *dev, void *ctx,
1329 struct scatterlist *src, bool do_revert)
1330{
1331 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1332 u32 *prev_len = cc_next_buf_cnt(areq_ctx);
1333
1334
1335
1336
1337 if (areq_ctx->mlli_params.curr_pool) {
1338 dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
1339 &areq_ctx->mlli_params.mlli_dma_addr,
1340 areq_ctx->mlli_params.mlli_virt_addr);
1341 dma_pool_free(areq_ctx->mlli_params.curr_pool,
1342 areq_ctx->mlli_params.mlli_virt_addr,
1343 areq_ctx->mlli_params.mlli_dma_addr);
1344 }
1345
1346 if (src && areq_ctx->in_nents) {
1347 dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
1348 sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
1349 dma_unmap_sg(dev, src,
1350 areq_ctx->in_nents, DMA_TO_DEVICE);
1351 }
1352
1353 if (*prev_len) {
1354 dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
1355 sg_virt(areq_ctx->buff_sg),
1356 &sg_dma_address(areq_ctx->buff_sg),
1357 sg_dma_len(areq_ctx->buff_sg));
1358 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1359 if (!do_revert) {
1360
1361
1362
1363 *prev_len = 0;
1364 } else {
1365 areq_ctx->buff_index ^= 1;
1366 }
1367 }
1368}
1369
1370int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
1371{
1372 struct device *dev = drvdata_to_dev(drvdata);
1373
1374 drvdata->mlli_buffs_pool =
1375 dma_pool_create("dx_single_mlli_tables", dev,
1376 MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
1377 LLI_ENTRY_BYTE_SIZE,
1378 MLLI_TABLE_MIN_ALIGNMENT, 0);
1379
1380 if (!drvdata->mlli_buffs_pool)
1381 return -ENOMEM;
1382
1383 return 0;
1384}
1385
1386int cc_buffer_mgr_fini(struct cc_drvdata *drvdata)
1387{
1388 dma_pool_destroy(drvdata->mlli_buffs_pool);
1389 return 0;
1390}
1391