1
2
3
4#include <linux/kernel.h>
5#include "cc_driver.h"
6#include "cc_buffer_mgr.h"
7#include "cc_request_mgr.h"
8#include "cc_ivgen.h"
9#include "cc_pm.h"
10
11#define CC_MAX_POLL_ITER 10
12
13#define CC_MAX_DESC_SEQ_LEN 23
14
15struct cc_req_mgr_handle {
16
17 unsigned int hw_queue_size;
18 unsigned int min_free_hw_slots;
19 unsigned int max_used_sw_slots;
20 struct cc_crypto_req req_queue[MAX_REQUEST_QUEUE_SIZE];
21 u32 req_queue_head;
22 u32 req_queue_tail;
23 u32 axi_completed;
24 u32 q_free_slots;
25
26
27
28 spinlock_t hw_lock;
29 struct cc_hw_desc compl_desc;
30 u8 *dummy_comp_buff;
31 dma_addr_t dummy_comp_buff_dma;
32
33
34 struct list_head backlog;
35 unsigned int bl_len;
36 spinlock_t bl_lock;
37
38#ifdef COMP_IN_WQ
39 struct workqueue_struct *workq;
40 struct delayed_work compwork;
41#else
42 struct tasklet_struct comptask;
43#endif
44 bool is_runtime_suspended;
45};
46
47struct cc_bl_item {
48 struct cc_crypto_req creq;
49 struct cc_hw_desc desc[CC_MAX_DESC_SEQ_LEN];
50 unsigned int len;
51 struct list_head list;
52 bool notif;
53};
54
55static void comp_handler(unsigned long devarg);
56#ifdef COMP_IN_WQ
57static void comp_work_handler(struct work_struct *work);
58#endif
59
60void cc_req_mgr_fini(struct cc_drvdata *drvdata)
61{
62 struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
63 struct device *dev = drvdata_to_dev(drvdata);
64
65 if (!req_mgr_h)
66 return;
67
68 if (req_mgr_h->dummy_comp_buff_dma) {
69 dma_free_coherent(dev, sizeof(u32), req_mgr_h->dummy_comp_buff,
70 req_mgr_h->dummy_comp_buff_dma);
71 }
72
73 dev_dbg(dev, "max_used_hw_slots=%d\n", (req_mgr_h->hw_queue_size -
74 req_mgr_h->min_free_hw_slots));
75 dev_dbg(dev, "max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots);
76
77#ifdef COMP_IN_WQ
78 flush_workqueue(req_mgr_h->workq);
79 destroy_workqueue(req_mgr_h->workq);
80#else
81
82 tasklet_kill(&req_mgr_h->comptask);
83#endif
84 memset(req_mgr_h, 0, sizeof(struct cc_req_mgr_handle));
85 kfree(req_mgr_h);
86 drvdata->request_mgr_handle = NULL;
87}
88
89int cc_req_mgr_init(struct cc_drvdata *drvdata)
90{
91 struct cc_req_mgr_handle *req_mgr_h;
92 struct device *dev = drvdata_to_dev(drvdata);
93 int rc = 0;
94
95 req_mgr_h = kzalloc(sizeof(*req_mgr_h), GFP_KERNEL);
96 if (!req_mgr_h) {
97 rc = -ENOMEM;
98 goto req_mgr_init_err;
99 }
100
101 drvdata->request_mgr_handle = req_mgr_h;
102
103 spin_lock_init(&req_mgr_h->hw_lock);
104 spin_lock_init(&req_mgr_h->bl_lock);
105 INIT_LIST_HEAD(&req_mgr_h->backlog);
106
107#ifdef COMP_IN_WQ
108 dev_dbg(dev, "Initializing completion workqueue\n");
109 req_mgr_h->workq = create_singlethread_workqueue("arm_cc7x_wq");
110 if (!req_mgr_h->workq) {
111 dev_err(dev, "Failed creating work queue\n");
112 rc = -ENOMEM;
113 goto req_mgr_init_err;
114 }
115 INIT_DELAYED_WORK(&req_mgr_h->compwork, comp_work_handler);
116#else
117 dev_dbg(dev, "Initializing completion tasklet\n");
118 tasklet_init(&req_mgr_h->comptask, comp_handler,
119 (unsigned long)drvdata);
120#endif
121 req_mgr_h->hw_queue_size = cc_ioread(drvdata,
122 CC_REG(DSCRPTR_QUEUE_SRAM_SIZE));
123 dev_dbg(dev, "hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size);
124 if (req_mgr_h->hw_queue_size < MIN_HW_QUEUE_SIZE) {
125 dev_err(dev, "Invalid HW queue size = %u (Min. required is %u)\n",
126 req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE);
127 rc = -ENOMEM;
128 goto req_mgr_init_err;
129 }
130 req_mgr_h->min_free_hw_slots = req_mgr_h->hw_queue_size;
131 req_mgr_h->max_used_sw_slots = 0;
132
133
134 req_mgr_h->dummy_comp_buff =
135 dma_alloc_coherent(dev, sizeof(u32),
136 &req_mgr_h->dummy_comp_buff_dma,
137 GFP_KERNEL);
138 if (!req_mgr_h->dummy_comp_buff) {
139 dev_err(dev, "Not enough memory to allocate DMA (%zu) dropped buffer\n",
140 sizeof(u32));
141 rc = -ENOMEM;
142 goto req_mgr_init_err;
143 }
144
145
146 hw_desc_init(&req_mgr_h->compl_desc);
147 set_din_const(&req_mgr_h->compl_desc, 0, sizeof(u32));
148 set_dout_dlli(&req_mgr_h->compl_desc, req_mgr_h->dummy_comp_buff_dma,
149 sizeof(u32), NS_BIT, 1);
150 set_flow_mode(&req_mgr_h->compl_desc, BYPASS);
151 set_queue_last_ind(&req_mgr_h->compl_desc);
152
153 return 0;
154
155req_mgr_init_err:
156 cc_req_mgr_fini(drvdata);
157 return rc;
158}
159
160static void enqueue_seq(struct cc_drvdata *drvdata, struct cc_hw_desc seq[],
161 unsigned int seq_len)
162{
163 int i, w;
164 void __iomem *reg = drvdata->cc_base + CC_REG(DSCRPTR_QUEUE_WORD0);
165 struct device *dev = drvdata_to_dev(drvdata);
166
167
168
169
170
171
172 for (i = 0; i < seq_len; i++) {
173 for (w = 0; w <= 5; w++)
174 writel_relaxed(seq[i].word[w], reg);
175
176 if (cc_dump_desc)
177 dev_dbg(dev, "desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
178 i, seq[i].word[0], seq[i].word[1],
179 seq[i].word[2], seq[i].word[3],
180 seq[i].word[4], seq[i].word[5]);
181 }
182}
183
184
185
186
187
188
189
190
191static void request_mgr_complete(struct device *dev, void *dx_compl_h,
192 int dummy)
193{
194 struct completion *this_compl = dx_compl_h;
195
196 complete(this_compl);
197}
198
199static int cc_queues_status(struct cc_drvdata *drvdata,
200 struct cc_req_mgr_handle *req_mgr_h,
201 unsigned int total_seq_len)
202{
203 unsigned long poll_queue;
204 struct device *dev = drvdata_to_dev(drvdata);
205
206
207
208
209
210 if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) ==
211 req_mgr_h->req_queue_tail) {
212 dev_err(dev, "SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
213 req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
214 return -ENOSPC;
215 }
216
217 if (req_mgr_h->q_free_slots >= total_seq_len)
218 return 0;
219
220
221 for (poll_queue = 0; poll_queue < CC_MAX_POLL_ITER ; poll_queue++) {
222 req_mgr_h->q_free_slots =
223 cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
224 if (req_mgr_h->q_free_slots < req_mgr_h->min_free_hw_slots)
225 req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots;
226
227 if (req_mgr_h->q_free_slots >= total_seq_len) {
228
229 return 0;
230 }
231
232 dev_dbg(dev, "HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
233 req_mgr_h->q_free_slots, total_seq_len);
234 }
235
236 dev_dbg(dev, "HW FIFO full, timeout. req_queue_head=%d sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
237 req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE,
238 req_mgr_h->q_free_slots, total_seq_len);
239 return -ENOSPC;
240}
241
242
243
244
245
246
247
248
249
250
251
252
253
254static int cc_do_send_request(struct cc_drvdata *drvdata,
255 struct cc_crypto_req *cc_req,
256 struct cc_hw_desc *desc, unsigned int len,
257 bool add_comp, bool ivgen)
258{
259 struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
260 unsigned int used_sw_slots;
261 unsigned int iv_seq_len = 0;
262 unsigned int total_seq_len = len;
263 struct cc_hw_desc iv_seq[CC_IVPOOL_SEQ_LEN];
264 struct device *dev = drvdata_to_dev(drvdata);
265 int rc;
266
267 if (ivgen) {
268 dev_dbg(dev, "Acquire IV from pool into %d DMA addresses %pad, %pad, %pad, IV-size=%u\n",
269 cc_req->ivgen_dma_addr_len,
270 &cc_req->ivgen_dma_addr[0],
271 &cc_req->ivgen_dma_addr[1],
272 &cc_req->ivgen_dma_addr[2],
273 cc_req->ivgen_size);
274
275
276 rc = cc_get_iv(drvdata, cc_req->ivgen_dma_addr,
277 cc_req->ivgen_dma_addr_len,
278 cc_req->ivgen_size, iv_seq, &iv_seq_len);
279
280 if (rc) {
281 dev_err(dev, "Failed to generate IV (rc=%d)\n", rc);
282 return rc;
283 }
284
285 total_seq_len += iv_seq_len;
286 }
287
288 used_sw_slots = ((req_mgr_h->req_queue_head -
289 req_mgr_h->req_queue_tail) &
290 (MAX_REQUEST_QUEUE_SIZE - 1));
291 if (used_sw_slots > req_mgr_h->max_used_sw_slots)
292 req_mgr_h->max_used_sw_slots = used_sw_slots;
293
294
295 req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *cc_req;
296 req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) &
297 (MAX_REQUEST_QUEUE_SIZE - 1);
298
299
300 dev_dbg(dev, "Enqueue request head=%u\n", req_mgr_h->req_queue_head);
301
302
303
304
305
306
307 wmb();
308
309
310 if (ivgen)
311 enqueue_seq(drvdata, iv_seq, iv_seq_len);
312
313 enqueue_seq(drvdata, desc, len);
314
315 if (add_comp) {
316 enqueue_seq(drvdata, &req_mgr_h->compl_desc, 1);
317 total_seq_len++;
318 }
319
320 if (req_mgr_h->q_free_slots < total_seq_len) {
321
322
323
324
325 dev_err(dev, "HW free slot count mismatch.");
326 req_mgr_h->q_free_slots = 0;
327 } else {
328
329 req_mgr_h->q_free_slots -= total_seq_len;
330 }
331
332
333 return -EINPROGRESS;
334}
335
336static void cc_enqueue_backlog(struct cc_drvdata *drvdata,
337 struct cc_bl_item *bli)
338{
339 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
340
341 spin_lock_bh(&mgr->bl_lock);
342 list_add_tail(&bli->list, &mgr->backlog);
343 ++mgr->bl_len;
344 spin_unlock_bh(&mgr->bl_lock);
345 tasklet_schedule(&mgr->comptask);
346}
347
348static void cc_proc_backlog(struct cc_drvdata *drvdata)
349{
350 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
351 struct cc_bl_item *bli;
352 struct cc_crypto_req *creq;
353 struct crypto_async_request *req;
354 bool ivgen;
355 unsigned int total_len;
356 struct device *dev = drvdata_to_dev(drvdata);
357 int rc;
358
359 spin_lock(&mgr->bl_lock);
360
361 while (mgr->bl_len) {
362 bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list);
363 spin_unlock(&mgr->bl_lock);
364
365 creq = &bli->creq;
366 req = (struct crypto_async_request *)creq->user_arg;
367
368
369
370
371
372 if (!bli->notif) {
373 req->complete(req, -EINPROGRESS);
374 bli->notif = true;
375 }
376
377 ivgen = !!creq->ivgen_dma_addr_len;
378 total_len = bli->len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0);
379
380 spin_lock(&mgr->hw_lock);
381
382 rc = cc_queues_status(drvdata, mgr, total_len);
383 if (rc) {
384
385
386
387
388
389 spin_unlock(&mgr->hw_lock);
390 return;
391 }
392
393 rc = cc_do_send_request(drvdata, &bli->creq, bli->desc,
394 bli->len, false, ivgen);
395
396 spin_unlock(&mgr->hw_lock);
397
398 if (rc != -EINPROGRESS) {
399 cc_pm_put_suspend(dev);
400 creq->user_cb(dev, req, rc);
401 }
402
403
404 spin_lock(&mgr->bl_lock);
405 list_del(&bli->list);
406 --mgr->bl_len;
407 }
408
409 spin_unlock(&mgr->bl_lock);
410}
411
412int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
413 struct cc_hw_desc *desc, unsigned int len,
414 struct crypto_async_request *req)
415{
416 int rc;
417 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
418 bool ivgen = !!cc_req->ivgen_dma_addr_len;
419 unsigned int total_len = len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0);
420 struct device *dev = drvdata_to_dev(drvdata);
421 bool backlog_ok = req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
422 gfp_t flags = cc_gfp_flags(req);
423 struct cc_bl_item *bli;
424
425 rc = cc_pm_get(dev);
426 if (rc) {
427 dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
428 return rc;
429 }
430
431 spin_lock_bh(&mgr->hw_lock);
432 rc = cc_queues_status(drvdata, mgr, total_len);
433
434#ifdef CC_DEBUG_FORCE_BACKLOG
435 if (backlog_ok)
436 rc = -ENOSPC;
437#endif
438
439 if (rc == -ENOSPC && backlog_ok) {
440 spin_unlock_bh(&mgr->hw_lock);
441
442 bli = kmalloc(sizeof(*bli), flags);
443 if (!bli) {
444 cc_pm_put_suspend(dev);
445 return -ENOMEM;
446 }
447
448 memcpy(&bli->creq, cc_req, sizeof(*cc_req));
449 memcpy(&bli->desc, desc, len * sizeof(*desc));
450 bli->len = len;
451 bli->notif = false;
452 cc_enqueue_backlog(drvdata, bli);
453 return -EBUSY;
454 }
455
456 if (!rc)
457 rc = cc_do_send_request(drvdata, cc_req, desc, len, false,
458 ivgen);
459
460 spin_unlock_bh(&mgr->hw_lock);
461 return rc;
462}
463
464int cc_send_sync_request(struct cc_drvdata *drvdata,
465 struct cc_crypto_req *cc_req, struct cc_hw_desc *desc,
466 unsigned int len)
467{
468 int rc;
469 struct device *dev = drvdata_to_dev(drvdata);
470 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
471
472 init_completion(&cc_req->seq_compl);
473 cc_req->user_cb = request_mgr_complete;
474 cc_req->user_arg = &cc_req->seq_compl;
475
476 rc = cc_pm_get(dev);
477 if (rc) {
478 dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
479 return rc;
480 }
481
482 while (true) {
483 spin_lock_bh(&mgr->hw_lock);
484 rc = cc_queues_status(drvdata, mgr, len + 1);
485
486 if (!rc)
487 break;
488
489 spin_unlock_bh(&mgr->hw_lock);
490 if (rc != -EAGAIN) {
491 cc_pm_put_suspend(dev);
492 return rc;
493 }
494 wait_for_completion_interruptible(&drvdata->hw_queue_avail);
495 reinit_completion(&drvdata->hw_queue_avail);
496 }
497
498 rc = cc_do_send_request(drvdata, cc_req, desc, len, true, false);
499 spin_unlock_bh(&mgr->hw_lock);
500
501 if (rc != -EINPROGRESS) {
502 cc_pm_put_suspend(dev);
503 return rc;
504 }
505
506 wait_for_completion(&cc_req->seq_compl);
507 return 0;
508}
509
510
511
512
513
514
515
516
517
518
519
520
521int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
522 unsigned int len)
523{
524 struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
525 unsigned int total_seq_len = len;
526 int rc = 0;
527
528
529
530 rc = cc_queues_status(drvdata, req_mgr_h, total_seq_len);
531 if (rc)
532 return rc;
533
534 set_queue_last_ind(&desc[(len - 1)]);
535
536
537
538
539
540
541 wmb();
542 enqueue_seq(drvdata, desc, len);
543
544
545 req_mgr_h->q_free_slots =
546 cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
547
548 return 0;
549}
550
551void complete_request(struct cc_drvdata *drvdata)
552{
553 struct cc_req_mgr_handle *request_mgr_handle =
554 drvdata->request_mgr_handle;
555
556 complete(&drvdata->hw_queue_avail);
557#ifdef COMP_IN_WQ
558 queue_delayed_work(request_mgr_handle->workq,
559 &request_mgr_handle->compwork, 0);
560#else
561 tasklet_schedule(&request_mgr_handle->comptask);
562#endif
563}
564
565#ifdef COMP_IN_WQ
566static void comp_work_handler(struct work_struct *work)
567{
568 struct cc_drvdata *drvdata =
569 container_of(work, struct cc_drvdata, compwork.work);
570
571 comp_handler((unsigned long)drvdata);
572}
573#endif
574
575static void proc_completions(struct cc_drvdata *drvdata)
576{
577 struct cc_crypto_req *cc_req;
578 struct device *dev = drvdata_to_dev(drvdata);
579 struct cc_req_mgr_handle *request_mgr_handle =
580 drvdata->request_mgr_handle;
581 unsigned int *tail = &request_mgr_handle->req_queue_tail;
582 unsigned int *head = &request_mgr_handle->req_queue_head;
583
584 while (request_mgr_handle->axi_completed) {
585 request_mgr_handle->axi_completed--;
586
587
588 if (*head == *tail) {
589
590
591
592
593 dev_err(dev, "Request queue is empty head == tail %u\n",
594 *head);
595 break;
596 }
597
598 cc_req = &request_mgr_handle->req_queue[*tail];
599
600 if (cc_req->user_cb)
601 cc_req->user_cb(dev, cc_req->user_arg, 0);
602 *tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
603 dev_dbg(dev, "Dequeue request tail=%u\n", *tail);
604 dev_dbg(dev, "Request completed. axi_completed=%d\n",
605 request_mgr_handle->axi_completed);
606 cc_pm_put_suspend(dev);
607 }
608}
609
610static inline u32 cc_axi_comp_count(struct cc_drvdata *drvdata)
611{
612 return FIELD_GET(AXIM_MON_COMP_VALUE,
613 cc_ioread(drvdata, CC_REG(AXIM_MON_COMP)));
614}
615
616
617static void comp_handler(unsigned long devarg)
618{
619 struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
620 struct cc_req_mgr_handle *request_mgr_handle =
621 drvdata->request_mgr_handle;
622
623 u32 irq;
624
625 irq = (drvdata->irq & CC_COMP_IRQ_MASK);
626
627 if (irq & CC_COMP_IRQ_MASK) {
628
629
630
631 cc_iowrite(drvdata, CC_REG(HOST_ICR), CC_COMP_IRQ_MASK);
632
633
634
635
636 request_mgr_handle->axi_completed +=
637 cc_axi_comp_count(drvdata);
638
639 while (request_mgr_handle->axi_completed) {
640 do {
641 proc_completions(drvdata);
642
643
644
645 request_mgr_handle->axi_completed =
646 cc_axi_comp_count(drvdata);
647 } while (request_mgr_handle->axi_completed > 0);
648
649 cc_iowrite(drvdata, CC_REG(HOST_ICR),
650 CC_COMP_IRQ_MASK);
651
652 request_mgr_handle->axi_completed +=
653 cc_axi_comp_count(drvdata);
654 }
655 }
656
657
658
659 cc_iowrite(drvdata, CC_REG(HOST_IMR),
660 cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~irq);
661
662 cc_proc_backlog(drvdata);
663}
664
665
666
667
668
669#if defined(CONFIG_PM)
670int cc_resume_req_queue(struct cc_drvdata *drvdata)
671{
672 struct cc_req_mgr_handle *request_mgr_handle =
673 drvdata->request_mgr_handle;
674
675 spin_lock_bh(&request_mgr_handle->hw_lock);
676 request_mgr_handle->is_runtime_suspended = false;
677 spin_unlock_bh(&request_mgr_handle->hw_lock);
678
679 return 0;
680}
681
682
683
684
685
686int cc_suspend_req_queue(struct cc_drvdata *drvdata)
687{
688 struct cc_req_mgr_handle *request_mgr_handle =
689 drvdata->request_mgr_handle;
690
691
692 spin_lock_bh(&request_mgr_handle->hw_lock);
693 if (request_mgr_handle->req_queue_head !=
694 request_mgr_handle->req_queue_tail) {
695 spin_unlock_bh(&request_mgr_handle->hw_lock);
696 return -EBUSY;
697 }
698 request_mgr_handle->is_runtime_suspended = true;
699 spin_unlock_bh(&request_mgr_handle->hw_lock);
700
701 return 0;
702}
703
704bool cc_req_queue_suspended(struct cc_drvdata *drvdata)
705{
706 struct cc_req_mgr_handle *request_mgr_handle =
707 drvdata->request_mgr_handle;
708
709 return request_mgr_handle->is_runtime_suspended;
710}
711
712#endif
713
714