1
2
3
4
5
6
7
8#include <linux/of_irq.h>
9#include <linux/of_address.h>
10
11#include "compat.h"
12#include "ctrl.h"
13#include "regs.h"
14#include "jr.h"
15#include "desc.h"
16#include "intern.h"
17
18struct jr_driver_data {
19
20 struct list_head jr_list;
21 spinlock_t jr_alloc_lock;
22} ____cacheline_aligned;
23
24static struct jr_driver_data driver_data;
25
26static int caam_reset_hw_jr(struct device *dev)
27{
28 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
29 unsigned int timeout = 100000;
30
31
32
33
34
35 clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK);
36
37
38 wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
39 while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
40 JRINT_ERR_HALT_INPROGRESS) && --timeout)
41 cpu_relax();
42
43 if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) !=
44 JRINT_ERR_HALT_COMPLETE || timeout == 0) {
45 dev_err(dev, "failed to flush job ring %d\n", jrp->ridx);
46 return -EIO;
47 }
48
49
50 timeout = 100000;
51 wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
52 while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
53 cpu_relax();
54
55 if (timeout == 0) {
56 dev_err(dev, "failed to reset job ring %d\n", jrp->ridx);
57 return -EIO;
58 }
59
60
61 clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
62
63 return 0;
64}
65
66
67
68
69static int caam_jr_shutdown(struct device *dev)
70{
71 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
72 dma_addr_t inpbusaddr, outbusaddr;
73 int ret;
74
75 ret = caam_reset_hw_jr(dev);
76
77 tasklet_kill(&jrp->irqtask);
78
79
80 free_irq(jrp->irq, dev);
81
82
83 inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
84 outbusaddr = rd_reg64(&jrp->rregs->outring_base);
85 dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
86 jrp->inpring, inpbusaddr);
87 dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
88 jrp->outring, outbusaddr);
89 kfree(jrp->entinfo);
90
91 return ret;
92}
93
94static int caam_jr_remove(struct platform_device *pdev)
95{
96 int ret;
97 struct device *jrdev;
98 struct caam_drv_private_jr *jrpriv;
99
100 jrdev = &pdev->dev;
101 jrpriv = dev_get_drvdata(jrdev);
102
103
104
105
106 if (atomic_read(&jrpriv->tfm_count)) {
107 dev_err(jrdev, "Device is busy\n");
108 return -EBUSY;
109 }
110
111
112 spin_lock(&driver_data.jr_alloc_lock);
113 list_del(&jrpriv->list_node);
114 spin_unlock(&driver_data.jr_alloc_lock);
115
116
117 ret = caam_jr_shutdown(jrdev);
118 if (ret)
119 dev_err(jrdev, "Failed to shut down job ring\n");
120 irq_dispose_mapping(jrpriv->irq);
121
122 return ret;
123}
124
125
126static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
127{
128 struct device *dev = st_dev;
129 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
130 u32 irqstate;
131
132
133
134
135
136 irqstate = rd_reg32(&jrp->rregs->jrintstatus);
137 if (!irqstate)
138 return IRQ_NONE;
139
140
141
142
143
144
145 if (irqstate & JRINT_JR_ERROR) {
146 dev_err(dev, "job ring error: irqstate: %08x\n", irqstate);
147 BUG();
148 }
149
150
151 clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK);
152
153
154 wr_reg32(&jrp->rregs->jrintstatus, irqstate);
155
156 preempt_disable();
157 tasklet_schedule(&jrp->irqtask);
158 preempt_enable();
159
160 return IRQ_HANDLED;
161}
162
163
164static void caam_jr_dequeue(unsigned long devarg)
165{
166 int hw_idx, sw_idx, i, head, tail;
167 struct device *dev = (struct device *)devarg;
168 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
169 void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
170 u32 *userdesc, userstatus;
171 void *userarg;
172
173 while (rd_reg32(&jrp->rregs->outring_used)) {
174
175 head = READ_ONCE(jrp->head);
176
177 spin_lock(&jrp->outlock);
178
179 sw_idx = tail = jrp->tail;
180 hw_idx = jrp->out_ring_read_index;
181
182 for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
183 sw_idx = (tail + i) & (JOBR_DEPTH - 1);
184
185 if (jrp->outring[hw_idx].desc ==
186 caam_dma_to_cpu(jrp->entinfo[sw_idx].desc_addr_dma))
187 break;
188 }
189
190 BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
191
192
193 dma_unmap_single(dev, jrp->outring[hw_idx].desc,
194 jrp->entinfo[sw_idx].desc_size,
195 DMA_TO_DEVICE);
196
197
198 jrp->entinfo[sw_idx].desc_addr_dma = 0;
199
200
201 usercall = jrp->entinfo[sw_idx].callbk;
202 userarg = jrp->entinfo[sw_idx].cbkarg;
203 userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
204 userstatus = caam32_to_cpu(jrp->outring[hw_idx].jrstatus);
205
206
207
208
209
210
211 mb();
212
213
214 wr_reg32(&jrp->rregs->outring_rmvd, 1);
215
216 jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) &
217 (JOBR_DEPTH - 1);
218
219
220
221
222
223
224 if (sw_idx == tail) {
225 do {
226 tail = (tail + 1) & (JOBR_DEPTH - 1);
227 } while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
228 jrp->entinfo[tail].desc_addr_dma == 0);
229
230 jrp->tail = tail;
231 }
232
233 spin_unlock(&jrp->outlock);
234
235
236 usercall(dev, userdesc, userstatus, userarg);
237 }
238
239
240 clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
241}
242
243
244
245
246
247
248
249struct device *caam_jr_alloc(void)
250{
251 struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL;
252 struct device *dev = ERR_PTR(-ENODEV);
253 int min_tfm_cnt = INT_MAX;
254 int tfm_cnt;
255
256 spin_lock(&driver_data.jr_alloc_lock);
257
258 if (list_empty(&driver_data.jr_list)) {
259 spin_unlock(&driver_data.jr_alloc_lock);
260 return ERR_PTR(-ENODEV);
261 }
262
263 list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
264 tfm_cnt = atomic_read(&jrpriv->tfm_count);
265 if (tfm_cnt < min_tfm_cnt) {
266 min_tfm_cnt = tfm_cnt;
267 min_jrpriv = jrpriv;
268 }
269 if (!min_tfm_cnt)
270 break;
271 }
272
273 if (min_jrpriv) {
274 atomic_inc(&min_jrpriv->tfm_count);
275 dev = min_jrpriv->dev;
276 }
277 spin_unlock(&driver_data.jr_alloc_lock);
278
279 return dev;
280}
281EXPORT_SYMBOL(caam_jr_alloc);
282
283
284
285
286
287
288void caam_jr_free(struct device *rdev)
289{
290 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev);
291
292 atomic_dec(&jrpriv->tfm_count);
293}
294EXPORT_SYMBOL(caam_jr_free);
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324int caam_jr_enqueue(struct device *dev, u32 *desc,
325 void (*cbk)(struct device *dev, u32 *desc,
326 u32 status, void *areq),
327 void *areq)
328{
329 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
330 struct caam_jrentry_info *head_entry;
331 int head, tail, desc_size;
332 dma_addr_t desc_dma;
333
334 desc_size = (caam32_to_cpu(*desc) & HDR_JD_LENGTH_MASK) * sizeof(u32);
335 desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE);
336 if (dma_mapping_error(dev, desc_dma)) {
337 dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n");
338 return -EIO;
339 }
340
341 spin_lock_bh(&jrp->inplock);
342
343 head = jrp->head;
344 tail = READ_ONCE(jrp->tail);
345
346 if (!rd_reg32(&jrp->rregs->inpring_avail) ||
347 CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
348 spin_unlock_bh(&jrp->inplock);
349 dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
350 return -EBUSY;
351 }
352
353 head_entry = &jrp->entinfo[head];
354 head_entry->desc_addr_virt = desc;
355 head_entry->desc_size = desc_size;
356 head_entry->callbk = (void *)cbk;
357 head_entry->cbkarg = areq;
358 head_entry->desc_addr_dma = desc_dma;
359
360 jrp->inpring[jrp->inp_ring_write_index] = cpu_to_caam_dma(desc_dma);
361
362
363
364
365
366
367 smp_wmb();
368
369 jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
370 (JOBR_DEPTH - 1);
371 jrp->head = (head + 1) & (JOBR_DEPTH - 1);
372
373
374
375
376
377 wmb();
378
379 wr_reg32(&jrp->rregs->inpring_jobadd, 1);
380
381 spin_unlock_bh(&jrp->inplock);
382
383 return 0;
384}
385EXPORT_SYMBOL(caam_jr_enqueue);
386
387
388
389
390static int caam_jr_init(struct device *dev)
391{
392 struct caam_drv_private_jr *jrp;
393 dma_addr_t inpbusaddr, outbusaddr;
394 int i, error;
395
396 jrp = dev_get_drvdata(dev);
397
398 tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
399
400
401 error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
402 dev_name(dev), dev);
403 if (error) {
404 dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
405 jrp->ridx, jrp->irq);
406 goto out_kill_deq;
407 }
408
409 error = caam_reset_hw_jr(dev);
410 if (error)
411 goto out_free_irq;
412
413 error = -ENOMEM;
414 jrp->inpring = dma_alloc_coherent(dev, sizeof(*jrp->inpring) *
415 JOBR_DEPTH, &inpbusaddr, GFP_KERNEL);
416 if (!jrp->inpring)
417 goto out_free_irq;
418
419 jrp->outring = dma_alloc_coherent(dev, sizeof(*jrp->outring) *
420 JOBR_DEPTH, &outbusaddr, GFP_KERNEL);
421 if (!jrp->outring)
422 goto out_free_inpring;
423
424 jrp->entinfo = kcalloc(JOBR_DEPTH, sizeof(*jrp->entinfo), GFP_KERNEL);
425 if (!jrp->entinfo)
426 goto out_free_outring;
427
428 for (i = 0; i < JOBR_DEPTH; i++)
429 jrp->entinfo[i].desc_addr_dma = !0;
430
431
432 jrp->inp_ring_write_index = 0;
433 jrp->out_ring_read_index = 0;
434 jrp->head = 0;
435 jrp->tail = 0;
436
437 wr_reg64(&jrp->rregs->inpring_base, inpbusaddr);
438 wr_reg64(&jrp->rregs->outring_base, outbusaddr);
439 wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
440 wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
441
442 jrp->ringsize = JOBR_DEPTH;
443
444 spin_lock_init(&jrp->inplock);
445 spin_lock_init(&jrp->outlock);
446
447
448 clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC |
449 (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
450 (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
451
452 return 0;
453
454out_free_outring:
455 dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
456 jrp->outring, outbusaddr);
457out_free_inpring:
458 dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
459 jrp->inpring, inpbusaddr);
460 dev_err(dev, "can't allocate job rings for %d\n", jrp->ridx);
461out_free_irq:
462 free_irq(jrp->irq, dev);
463out_kill_deq:
464 tasklet_kill(&jrp->irqtask);
465 return error;
466}
467
468
469
470
471
472static int caam_jr_probe(struct platform_device *pdev)
473{
474 struct device *jrdev;
475 struct device_node *nprop;
476 struct caam_job_ring __iomem *ctrl;
477 struct caam_drv_private_jr *jrpriv;
478 static int total_jobrs;
479 int error;
480
481 jrdev = &pdev->dev;
482 jrpriv = devm_kmalloc(jrdev, sizeof(*jrpriv), GFP_KERNEL);
483 if (!jrpriv)
484 return -ENOMEM;
485
486 dev_set_drvdata(jrdev, jrpriv);
487
488
489 jrpriv->ridx = total_jobrs++;
490
491 nprop = pdev->dev.of_node;
492
493
494 ctrl = of_iomap(nprop, 0);
495 if (!ctrl) {
496 dev_err(jrdev, "of_iomap() failed\n");
497 return -ENOMEM;
498 }
499
500 jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl;
501
502 if (sizeof(dma_addr_t) == sizeof(u64)) {
503 if (caam_dpaa2)
504 error = dma_set_mask_and_coherent(jrdev,
505 DMA_BIT_MASK(49));
506 else if (of_device_is_compatible(nprop,
507 "fsl,sec-v5.0-job-ring"))
508 error = dma_set_mask_and_coherent(jrdev,
509 DMA_BIT_MASK(40));
510 else
511 error = dma_set_mask_and_coherent(jrdev,
512 DMA_BIT_MASK(36));
513 } else {
514 error = dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
515 }
516 if (error) {
517 dev_err(jrdev, "dma_set_mask_and_coherent failed (%d)\n",
518 error);
519 iounmap(ctrl);
520 return error;
521 }
522
523
524 jrpriv->irq = irq_of_parse_and_map(nprop, 0);
525
526
527 error = caam_jr_init(jrdev);
528 if (error) {
529 irq_dispose_mapping(jrpriv->irq);
530 iounmap(ctrl);
531 return error;
532 }
533
534 jrpriv->dev = jrdev;
535 spin_lock(&driver_data.jr_alloc_lock);
536 list_add_tail(&jrpriv->list_node, &driver_data.jr_list);
537 spin_unlock(&driver_data.jr_alloc_lock);
538
539 atomic_set(&jrpriv->tfm_count, 0);
540
541 return 0;
542}
543
544static const struct of_device_id caam_jr_match[] = {
545 {
546 .compatible = "fsl,sec-v4.0-job-ring",
547 },
548 {
549 .compatible = "fsl,sec4.0-job-ring",
550 },
551 {},
552};
553MODULE_DEVICE_TABLE(of, caam_jr_match);
554
555static struct platform_driver caam_jr_driver = {
556 .driver = {
557 .name = "caam_jr",
558 .of_match_table = caam_jr_match,
559 },
560 .probe = caam_jr_probe,
561 .remove = caam_jr_remove,
562};
563
564static int __init jr_driver_init(void)
565{
566 spin_lock_init(&driver_data.jr_alloc_lock);
567 INIT_LIST_HEAD(&driver_data.jr_list);
568 return platform_driver_register(&caam_jr_driver);
569}
570
571static void __exit jr_driver_exit(void)
572{
573 platform_driver_unregister(&caam_jr_driver);
574}
575
576module_init(jr_driver_init);
577module_exit(jr_driver_exit);
578
579MODULE_LICENSE("GPL");
580MODULE_DESCRIPTION("FSL CAAM JR request backend");
581MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
582