1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/err.h>
21#include <linux/kernel.h>
22#include <linux/init.h>
23#include <linux/module.h>
24#include <linux/interrupt.h>
25#include <linux/platform_device.h>
26#include <linux/io.h>
27#include <linux/slab.h>
28#include <linux/edma.h>
29#include <linux/dma-mapping.h>
30#include <linux/of_address.h>
31#include <linux/of_device.h>
32#include <linux/of_dma.h>
33#include <linux/of_irq.h>
34#include <linux/pm_runtime.h>
35
36#include <linux/platform_data/edma.h>
37
38
39#define PARM_OPT 0x00
40#define PARM_SRC 0x04
41#define PARM_A_B_CNT 0x08
42#define PARM_DST 0x0c
43#define PARM_SRC_DST_BIDX 0x10
44#define PARM_LINK_BCNTRLD 0x14
45#define PARM_SRC_DST_CIDX 0x18
46#define PARM_CCNT 0x1c
47
48#define PARM_SIZE 0x20
49
50
51#define SH_ER 0x00
52#define SH_ECR 0x08
53#define SH_ESR 0x10
54#define SH_CER 0x18
55#define SH_EER 0x20
56#define SH_EECR 0x28
57#define SH_EESR 0x30
58#define SH_SER 0x38
59#define SH_SECR 0x40
60#define SH_IER 0x50
61#define SH_IECR 0x58
62#define SH_IESR 0x60
63#define SH_IPR 0x68
64#define SH_ICR 0x70
65#define SH_IEVAL 0x78
66#define SH_QER 0x80
67#define SH_QEER 0x84
68#define SH_QEECR 0x88
69#define SH_QEESR 0x8c
70#define SH_QSER 0x90
71#define SH_QSECR 0x94
72#define SH_SIZE 0x200
73
74
75#define EDMA_REV 0x0000
76#define EDMA_CCCFG 0x0004
77#define EDMA_QCHMAP 0x0200
78#define EDMA_DMAQNUM 0x0240
79#define EDMA_QDMAQNUM 0x0260
80#define EDMA_QUETCMAP 0x0280
81#define EDMA_QUEPRI 0x0284
82#define EDMA_EMR 0x0300
83#define EDMA_EMCR 0x0308
84#define EDMA_QEMR 0x0310
85#define EDMA_QEMCR 0x0314
86#define EDMA_CCERR 0x0318
87#define EDMA_CCERRCLR 0x031c
88#define EDMA_EEVAL 0x0320
89#define EDMA_DRAE 0x0340
90#define EDMA_QRAE 0x0380
91#define EDMA_QUEEVTENTRY 0x0400
92#define EDMA_QSTAT 0x0600
93#define EDMA_QWMTHRA 0x0620
94#define EDMA_QWMTHRB 0x0624
95#define EDMA_CCSTAT 0x0640
96
97#define EDMA_M 0x1000
98#define EDMA_ECR 0x1008
99#define EDMA_ECRH 0x100C
100#define EDMA_SHADOW0 0x2000
101#define EDMA_PARM 0x4000
102
103#define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5))
104
105#define EDMA_DCHMAP 0x0100
106
107
108#define GET_NUM_DMACH(x) (x & 0x7)
109#define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12)
110#define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16)
111#define GET_NUM_REGN(x) ((x & 0x300000) >> 20)
112#define CHMAP_EXIST BIT(24)
113
114#define EDMA_MAX_DMACH 64
115#define EDMA_MAX_PARAMENTRY 512
116
117
118
119static void __iomem *edmacc_regs_base[EDMA_MAX_CC];
120
121static inline unsigned int edma_read(unsigned ctlr, int offset)
122{
123 return (unsigned int)__raw_readl(edmacc_regs_base[ctlr] + offset);
124}
125
126static inline void edma_write(unsigned ctlr, int offset, int val)
127{
128 __raw_writel(val, edmacc_regs_base[ctlr] + offset);
129}
130static inline void edma_modify(unsigned ctlr, int offset, unsigned and,
131 unsigned or)
132{
133 unsigned val = edma_read(ctlr, offset);
134 val &= and;
135 val |= or;
136 edma_write(ctlr, offset, val);
137}
138static inline void edma_and(unsigned ctlr, int offset, unsigned and)
139{
140 unsigned val = edma_read(ctlr, offset);
141 val &= and;
142 edma_write(ctlr, offset, val);
143}
144static inline void edma_or(unsigned ctlr, int offset, unsigned or)
145{
146 unsigned val = edma_read(ctlr, offset);
147 val |= or;
148 edma_write(ctlr, offset, val);
149}
150static inline unsigned int edma_read_array(unsigned ctlr, int offset, int i)
151{
152 return edma_read(ctlr, offset + (i << 2));
153}
154static inline void edma_write_array(unsigned ctlr, int offset, int i,
155 unsigned val)
156{
157 edma_write(ctlr, offset + (i << 2), val);
158}
159static inline void edma_modify_array(unsigned ctlr, int offset, int i,
160 unsigned and, unsigned or)
161{
162 edma_modify(ctlr, offset + (i << 2), and, or);
163}
164static inline void edma_or_array(unsigned ctlr, int offset, int i, unsigned or)
165{
166 edma_or(ctlr, offset + (i << 2), or);
167}
168static inline void edma_or_array2(unsigned ctlr, int offset, int i, int j,
169 unsigned or)
170{
171 edma_or(ctlr, offset + ((i*2 + j) << 2), or);
172}
173static inline void edma_write_array2(unsigned ctlr, int offset, int i, int j,
174 unsigned val)
175{
176 edma_write(ctlr, offset + ((i*2 + j) << 2), val);
177}
178static inline unsigned int edma_shadow0_read(unsigned ctlr, int offset)
179{
180 return edma_read(ctlr, EDMA_SHADOW0 + offset);
181}
182static inline unsigned int edma_shadow0_read_array(unsigned ctlr, int offset,
183 int i)
184{
185 return edma_read(ctlr, EDMA_SHADOW0 + offset + (i << 2));
186}
187static inline void edma_shadow0_write(unsigned ctlr, int offset, unsigned val)
188{
189 edma_write(ctlr, EDMA_SHADOW0 + offset, val);
190}
191static inline void edma_shadow0_write_array(unsigned ctlr, int offset, int i,
192 unsigned val)
193{
194 edma_write(ctlr, EDMA_SHADOW0 + offset + (i << 2), val);
195}
196static inline unsigned int edma_parm_read(unsigned ctlr, int offset,
197 int param_no)
198{
199 return edma_read(ctlr, EDMA_PARM + offset + (param_no << 5));
200}
201static inline void edma_parm_write(unsigned ctlr, int offset, int param_no,
202 unsigned val)
203{
204 edma_write(ctlr, EDMA_PARM + offset + (param_no << 5), val);
205}
206static inline void edma_parm_modify(unsigned ctlr, int offset, int param_no,
207 unsigned and, unsigned or)
208{
209 edma_modify(ctlr, EDMA_PARM + offset + (param_no << 5), and, or);
210}
211static inline void edma_parm_and(unsigned ctlr, int offset, int param_no,
212 unsigned and)
213{
214 edma_and(ctlr, EDMA_PARM + offset + (param_no << 5), and);
215}
216static inline void edma_parm_or(unsigned ctlr, int offset, int param_no,
217 unsigned or)
218{
219 edma_or(ctlr, EDMA_PARM + offset + (param_no << 5), or);
220}
221
222static inline void set_bits(int offset, int len, unsigned long *p)
223{
224 for (; len > 0; len--)
225 set_bit(offset + (len - 1), p);
226}
227
228static inline void clear_bits(int offset, int len, unsigned long *p)
229{
230 for (; len > 0; len--)
231 clear_bit(offset + (len - 1), p);
232}
233
234
235
236
237struct edma {
238
239 unsigned num_channels;
240 unsigned num_region;
241 unsigned num_slots;
242 unsigned num_tc;
243 enum dma_event_q default_queue;
244
245
246 const s8 *noevent;
247
248 struct edma_soc_info *info;
249
250
251
252
253 DECLARE_BITMAP(edma_inuse, EDMA_MAX_PARAMENTRY);
254
255
256
257
258
259 DECLARE_BITMAP(edma_unused, EDMA_MAX_DMACH);
260
261 unsigned irq_res_start;
262 unsigned irq_res_end;
263
264 struct dma_interrupt_data {
265 void (*callback)(unsigned channel, unsigned short ch_status,
266 void *data);
267 void *data;
268 } intr_data[EDMA_MAX_DMACH];
269};
270
271static struct edma *edma_cc[EDMA_MAX_CC];
272static int arch_num_cc;
273
274
275static const struct edmacc_param dummy_paramset = {
276 .link_bcntrld = 0xffff,
277 .ccnt = 1,
278};
279
280static const struct of_device_id edma_of_ids[] = {
281 { .compatible = "ti,edma3", },
282 {}
283};
284
285
286
287static void map_dmach_queue(unsigned ctlr, unsigned ch_no,
288 enum dma_event_q queue_no)
289{
290 int bit = (ch_no & 0x7) * 4;
291
292
293 if (queue_no == EVENTQ_DEFAULT)
294 queue_no = edma_cc[ctlr]->default_queue;
295
296 queue_no &= 7;
297 edma_modify_array(ctlr, EDMA_DMAQNUM, (ch_no >> 3),
298 ~(0x7 << bit), queue_no << bit);
299}
300
301static void assign_priority_to_queue(unsigned ctlr, int queue_no,
302 int priority)
303{
304 int bit = queue_no * 4;
305 edma_modify(ctlr, EDMA_QUEPRI, ~(0x7 << bit),
306 ((priority & 0x7) << bit));
307}
308
309
310
311
312
313
314
315
316
317
318
319
320static void map_dmach_param(unsigned ctlr)
321{
322 int i;
323 for (i = 0; i < EDMA_MAX_DMACH; i++)
324 edma_write_array(ctlr, EDMA_DCHMAP , i , (i << 5));
325}
326
327static inline void
328setup_dma_interrupt(unsigned lch,
329 void (*callback)(unsigned channel, u16 ch_status, void *data),
330 void *data)
331{
332 unsigned ctlr;
333
334 ctlr = EDMA_CTLR(lch);
335 lch = EDMA_CHAN_SLOT(lch);
336
337 if (!callback)
338 edma_shadow0_write_array(ctlr, SH_IECR, lch >> 5,
339 BIT(lch & 0x1f));
340
341 edma_cc[ctlr]->intr_data[lch].callback = callback;
342 edma_cc[ctlr]->intr_data[lch].data = data;
343
344 if (callback) {
345 edma_shadow0_write_array(ctlr, SH_ICR, lch >> 5,
346 BIT(lch & 0x1f));
347 edma_shadow0_write_array(ctlr, SH_IESR, lch >> 5,
348 BIT(lch & 0x1f));
349 }
350}
351
352static int irq2ctlr(int irq)
353{
354 if (irq >= edma_cc[0]->irq_res_start && irq <= edma_cc[0]->irq_res_end)
355 return 0;
356 else if (irq >= edma_cc[1]->irq_res_start &&
357 irq <= edma_cc[1]->irq_res_end)
358 return 1;
359
360 return -1;
361}
362
363
364
365
366
367
368static irqreturn_t dma_irq_handler(int irq, void *data)
369{
370 int ctlr;
371 u32 sh_ier;
372 u32 sh_ipr;
373 u32 bank;
374
375 ctlr = irq2ctlr(irq);
376 if (ctlr < 0)
377 return IRQ_NONE;
378
379 dev_dbg(data, "dma_irq_handler\n");
380
381 sh_ipr = edma_shadow0_read_array(ctlr, SH_IPR, 0);
382 if (!sh_ipr) {
383 sh_ipr = edma_shadow0_read_array(ctlr, SH_IPR, 1);
384 if (!sh_ipr)
385 return IRQ_NONE;
386 sh_ier = edma_shadow0_read_array(ctlr, SH_IER, 1);
387 bank = 1;
388 } else {
389 sh_ier = edma_shadow0_read_array(ctlr, SH_IER, 0);
390 bank = 0;
391 }
392
393 do {
394 u32 slot;
395 u32 channel;
396
397 dev_dbg(data, "IPR%d %08x\n", bank, sh_ipr);
398
399 slot = __ffs(sh_ipr);
400 sh_ipr &= ~(BIT(slot));
401
402 if (sh_ier & BIT(slot)) {
403 channel = (bank << 5) | slot;
404
405 edma_shadow0_write_array(ctlr, SH_ICR, bank,
406 BIT(slot));
407 if (edma_cc[ctlr]->intr_data[channel].callback)
408 edma_cc[ctlr]->intr_data[channel].callback(
409 channel, EDMA_DMA_COMPLETE,
410 edma_cc[ctlr]->intr_data[channel].data);
411 }
412 } while (sh_ipr);
413
414 edma_shadow0_write(ctlr, SH_IEVAL, 1);
415 return IRQ_HANDLED;
416}
417
418
419
420
421
422
423static irqreturn_t dma_ccerr_handler(int irq, void *data)
424{
425 int i;
426 int ctlr;
427 unsigned int cnt = 0;
428
429 ctlr = irq2ctlr(irq);
430 if (ctlr < 0)
431 return IRQ_NONE;
432
433 dev_dbg(data, "dma_ccerr_handler\n");
434
435 if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
436 (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
437 (edma_read(ctlr, EDMA_QEMR) == 0) &&
438 (edma_read(ctlr, EDMA_CCERR) == 0))
439 return IRQ_NONE;
440
441 while (1) {
442 int j = -1;
443 if (edma_read_array(ctlr, EDMA_EMR, 0))
444 j = 0;
445 else if (edma_read_array(ctlr, EDMA_EMR, 1))
446 j = 1;
447 if (j >= 0) {
448 dev_dbg(data, "EMR%d %08x\n", j,
449 edma_read_array(ctlr, EDMA_EMR, j));
450 for (i = 0; i < 32; i++) {
451 int k = (j << 5) + i;
452 if (edma_read_array(ctlr, EDMA_EMR, j) &
453 BIT(i)) {
454
455 edma_write_array(ctlr, EDMA_EMCR, j,
456 BIT(i));
457
458 edma_shadow0_write_array(ctlr, SH_SECR,
459 j, BIT(i));
460 if (edma_cc[ctlr]->intr_data[k].
461 callback) {
462 edma_cc[ctlr]->intr_data[k].
463 callback(k,
464 EDMA_DMA_CC_ERROR,
465 edma_cc[ctlr]->intr_data
466 [k].data);
467 }
468 }
469 }
470 } else if (edma_read(ctlr, EDMA_QEMR)) {
471 dev_dbg(data, "QEMR %02x\n",
472 edma_read(ctlr, EDMA_QEMR));
473 for (i = 0; i < 8; i++) {
474 if (edma_read(ctlr, EDMA_QEMR) & BIT(i)) {
475
476 edma_write(ctlr, EDMA_QEMCR, BIT(i));
477 edma_shadow0_write(ctlr, SH_QSECR,
478 BIT(i));
479
480
481 }
482 }
483 } else if (edma_read(ctlr, EDMA_CCERR)) {
484 dev_dbg(data, "CCERR %08x\n",
485 edma_read(ctlr, EDMA_CCERR));
486
487
488
489 for (i = 0; i < 8; i++) {
490 if (edma_read(ctlr, EDMA_CCERR) & BIT(i)) {
491
492 edma_write(ctlr, EDMA_CCERRCLR, BIT(i));
493
494
495 }
496 }
497 }
498 if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
499 (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
500 (edma_read(ctlr, EDMA_QEMR) == 0) &&
501 (edma_read(ctlr, EDMA_CCERR) == 0))
502 break;
503 cnt++;
504 if (cnt > 10)
505 break;
506 }
507 edma_write(ctlr, EDMA_EEVAL, 1);
508 return IRQ_HANDLED;
509}
510
511static int reserve_contiguous_slots(int ctlr, unsigned int id,
512 unsigned int num_slots,
513 unsigned int start_slot)
514{
515 int i, j;
516 unsigned int count = num_slots;
517 int stop_slot = start_slot;
518 DECLARE_BITMAP(tmp_inuse, EDMA_MAX_PARAMENTRY);
519
520 for (i = start_slot; i < edma_cc[ctlr]->num_slots; ++i) {
521 j = EDMA_CHAN_SLOT(i);
522 if (!test_and_set_bit(j, edma_cc[ctlr]->edma_inuse)) {
523
524 if (count == num_slots)
525 stop_slot = i;
526
527 count--;
528 set_bit(j, tmp_inuse);
529
530 if (count == 0)
531 break;
532 } else {
533 clear_bit(j, tmp_inuse);
534
535 if (id == EDMA_CONT_PARAMS_FIXED_EXACT) {
536 stop_slot = i;
537 break;
538 } else {
539 count = num_slots;
540 }
541 }
542 }
543
544
545
546
547
548
549
550 if (i == edma_cc[ctlr]->num_slots)
551 stop_slot = i;
552
553 j = start_slot;
554 for_each_set_bit_from(j, tmp_inuse, stop_slot)
555 clear_bit(j, edma_cc[ctlr]->edma_inuse);
556
557 if (count)
558 return -EBUSY;
559
560 for (j = i - num_slots + 1; j <= i; ++j)
561 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(j),
562 &dummy_paramset, PARM_SIZE);
563
564 return EDMA_CTLR_CHAN(ctlr, i - num_slots + 1);
565}
566
567static int prepare_unused_channel_list(struct device *dev, void *data)
568{
569 struct platform_device *pdev = to_platform_device(dev);
570 int i, count, ctlr;
571 struct of_phandle_args dma_spec;
572
573 if (dev->of_node) {
574 count = of_property_count_strings(dev->of_node, "dma-names");
575 if (count < 0)
576 return 0;
577 for (i = 0; i < count; i++) {
578 if (of_parse_phandle_with_args(dev->of_node, "dmas",
579 "#dma-cells", i,
580 &dma_spec))
581 continue;
582
583 if (!of_match_node(edma_of_ids, dma_spec.np)) {
584 of_node_put(dma_spec.np);
585 continue;
586 }
587
588 clear_bit(EDMA_CHAN_SLOT(dma_spec.args[0]),
589 edma_cc[0]->edma_unused);
590 of_node_put(dma_spec.np);
591 }
592 return 0;
593 }
594
595
596 for (i = 0; i < pdev->num_resources; i++) {
597 if ((pdev->resource[i].flags & IORESOURCE_DMA) &&
598 (int)pdev->resource[i].start >= 0) {
599 ctlr = EDMA_CTLR(pdev->resource[i].start);
600 clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start),
601 edma_cc[ctlr]->edma_unused);
602 }
603 }
604
605 return 0;
606}
607
608
609
610static bool unused_chan_list_done;
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644int edma_alloc_channel(int channel,
645 void (*callback)(unsigned channel, u16 ch_status, void *data),
646 void *data,
647 enum dma_event_q eventq_no)
648{
649 unsigned i, done = 0, ctlr = 0;
650 int ret = 0;
651
652 if (!unused_chan_list_done) {
653
654
655
656
657
658 ret = bus_for_each_dev(&platform_bus_type, NULL, NULL,
659 prepare_unused_channel_list);
660 if (ret < 0)
661 return ret;
662
663 unused_chan_list_done = true;
664 }
665
666 if (channel >= 0) {
667 ctlr = EDMA_CTLR(channel);
668 channel = EDMA_CHAN_SLOT(channel);
669 }
670
671 if (channel < 0) {
672 for (i = 0; i < arch_num_cc; i++) {
673 channel = 0;
674 for (;;) {
675 channel = find_next_bit(edma_cc[i]->edma_unused,
676 edma_cc[i]->num_channels,
677 channel);
678 if (channel == edma_cc[i]->num_channels)
679 break;
680 if (!test_and_set_bit(channel,
681 edma_cc[i]->edma_inuse)) {
682 done = 1;
683 ctlr = i;
684 break;
685 }
686 channel++;
687 }
688 if (done)
689 break;
690 }
691 if (!done)
692 return -ENOMEM;
693 } else if (channel >= edma_cc[ctlr]->num_channels) {
694 return -EINVAL;
695 } else if (test_and_set_bit(channel, edma_cc[ctlr]->edma_inuse)) {
696 return -EBUSY;
697 }
698
699
700 edma_or_array2(ctlr, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
701
702
703 edma_stop(EDMA_CTLR_CHAN(ctlr, channel));
704 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
705 &dummy_paramset, PARM_SIZE);
706
707 if (callback)
708 setup_dma_interrupt(EDMA_CTLR_CHAN(ctlr, channel),
709 callback, data);
710
711 map_dmach_queue(ctlr, channel, eventq_no);
712
713 return EDMA_CTLR_CHAN(ctlr, channel);
714}
715EXPORT_SYMBOL(edma_alloc_channel);
716
717
718
719
720
721
722
723
724
725
726
727
728
729void edma_free_channel(unsigned channel)
730{
731 unsigned ctlr;
732
733 ctlr = EDMA_CTLR(channel);
734 channel = EDMA_CHAN_SLOT(channel);
735
736 if (channel >= edma_cc[ctlr]->num_channels)
737 return;
738
739 setup_dma_interrupt(channel, NULL, NULL);
740
741
742 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
743 &dummy_paramset, PARM_SIZE);
744 clear_bit(channel, edma_cc[ctlr]->edma_inuse);
745}
746EXPORT_SYMBOL(edma_free_channel);
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762int edma_alloc_slot(unsigned ctlr, int slot)
763{
764 if (!edma_cc[ctlr])
765 return -EINVAL;
766
767 if (slot >= 0)
768 slot = EDMA_CHAN_SLOT(slot);
769
770 if (slot < 0) {
771 slot = edma_cc[ctlr]->num_channels;
772 for (;;) {
773 slot = find_next_zero_bit(edma_cc[ctlr]->edma_inuse,
774 edma_cc[ctlr]->num_slots, slot);
775 if (slot == edma_cc[ctlr]->num_slots)
776 return -ENOMEM;
777 if (!test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse))
778 break;
779 }
780 } else if (slot < edma_cc[ctlr]->num_channels ||
781 slot >= edma_cc[ctlr]->num_slots) {
782 return -EINVAL;
783 } else if (test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse)) {
784 return -EBUSY;
785 }
786
787 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
788 &dummy_paramset, PARM_SIZE);
789
790 return EDMA_CTLR_CHAN(ctlr, slot);
791}
792EXPORT_SYMBOL(edma_alloc_slot);
793
794
795
796
797
798
799
800
801
802void edma_free_slot(unsigned slot)
803{
804 unsigned ctlr;
805
806 ctlr = EDMA_CTLR(slot);
807 slot = EDMA_CHAN_SLOT(slot);
808
809 if (slot < edma_cc[ctlr]->num_channels ||
810 slot >= edma_cc[ctlr]->num_slots)
811 return;
812
813 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
814 &dummy_paramset, PARM_SIZE);
815 clear_bit(slot, edma_cc[ctlr]->edma_inuse);
816}
817EXPORT_SYMBOL(edma_free_slot);
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count)
846{
847
848
849
850
851
852 if ((id != EDMA_CONT_PARAMS_ANY) &&
853 (slot < edma_cc[ctlr]->num_channels ||
854 slot >= edma_cc[ctlr]->num_slots))
855 return -EINVAL;
856
857
858
859
860
861
862 if (count < 1 || count >
863 (edma_cc[ctlr]->num_slots - edma_cc[ctlr]->num_channels))
864 return -EINVAL;
865
866 switch (id) {
867 case EDMA_CONT_PARAMS_ANY:
868 return reserve_contiguous_slots(ctlr, id, count,
869 edma_cc[ctlr]->num_channels);
870 case EDMA_CONT_PARAMS_FIXED_EXACT:
871 case EDMA_CONT_PARAMS_FIXED_NOT_EXACT:
872 return reserve_contiguous_slots(ctlr, id, count, slot);
873 default:
874 return -EINVAL;
875 }
876
877}
878EXPORT_SYMBOL(edma_alloc_cont_slots);
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893int edma_free_cont_slots(unsigned slot, int count)
894{
895 unsigned ctlr, slot_to_free;
896 int i;
897
898 ctlr = EDMA_CTLR(slot);
899 slot = EDMA_CHAN_SLOT(slot);
900
901 if (slot < edma_cc[ctlr]->num_channels ||
902 slot >= edma_cc[ctlr]->num_slots ||
903 count < 1)
904 return -EINVAL;
905
906 for (i = slot; i < slot + count; ++i) {
907 ctlr = EDMA_CTLR(i);
908 slot_to_free = EDMA_CHAN_SLOT(i);
909
910 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot_to_free),
911 &dummy_paramset, PARM_SIZE);
912 clear_bit(slot_to_free, edma_cc[ctlr]->edma_inuse);
913 }
914
915 return 0;
916}
917EXPORT_SYMBOL(edma_free_cont_slots);
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934void edma_set_src(unsigned slot, dma_addr_t src_port,
935 enum address_mode mode, enum fifo_width width)
936{
937 unsigned ctlr;
938
939 ctlr = EDMA_CTLR(slot);
940 slot = EDMA_CHAN_SLOT(slot);
941
942 if (slot < edma_cc[ctlr]->num_slots) {
943 unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
944
945 if (mode) {
946
947 i = (i & ~(EDMA_FWID)) | (SAM | ((width & 0x7) << 8));
948 } else {
949
950 i &= ~SAM;
951 }
952 edma_parm_write(ctlr, PARM_OPT, slot, i);
953
954
955
956 edma_parm_write(ctlr, PARM_SRC, slot, src_port);
957 }
958}
959EXPORT_SYMBOL(edma_set_src);
960
961
962
963
964
965
966
967
968
969
970
971
972void edma_set_dest(unsigned slot, dma_addr_t dest_port,
973 enum address_mode mode, enum fifo_width width)
974{
975 unsigned ctlr;
976
977 ctlr = EDMA_CTLR(slot);
978 slot = EDMA_CHAN_SLOT(slot);
979
980 if (slot < edma_cc[ctlr]->num_slots) {
981 unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
982
983 if (mode) {
984
985 i = (i & ~(EDMA_FWID)) | (DAM | ((width & 0x7) << 8));
986 } else {
987
988 i &= ~DAM;
989 }
990 edma_parm_write(ctlr, PARM_OPT, slot, i);
991
992
993 edma_parm_write(ctlr, PARM_DST, slot, dest_port);
994 }
995}
996EXPORT_SYMBOL(edma_set_dest);
997
998
999
1000
1001
1002
1003
1004
1005dma_addr_t edma_get_position(unsigned slot, bool dst)
1006{
1007 u32 offs, ctlr = EDMA_CTLR(slot);
1008
1009 slot = EDMA_CHAN_SLOT(slot);
1010
1011 offs = PARM_OFFSET(slot);
1012 offs += dst ? PARM_DST : PARM_SRC;
1013
1014 return edma_read(ctlr, offs);
1015}
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx)
1028{
1029 unsigned ctlr;
1030
1031 ctlr = EDMA_CTLR(slot);
1032 slot = EDMA_CHAN_SLOT(slot);
1033
1034 if (slot < edma_cc[ctlr]->num_slots) {
1035 edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
1036 0xffff0000, src_bidx);
1037 edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
1038 0xffff0000, src_cidx);
1039 }
1040}
1041EXPORT_SYMBOL(edma_set_src_index);
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx)
1054{
1055 unsigned ctlr;
1056
1057 ctlr = EDMA_CTLR(slot);
1058 slot = EDMA_CHAN_SLOT(slot);
1059
1060 if (slot < edma_cc[ctlr]->num_slots) {
1061 edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
1062 0x0000ffff, dest_bidx << 16);
1063 edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
1064 0x0000ffff, dest_cidx << 16);
1065 }
1066}
1067EXPORT_SYMBOL(edma_set_dest_index);
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098void edma_set_transfer_params(unsigned slot,
1099 u16 acnt, u16 bcnt, u16 ccnt,
1100 u16 bcnt_rld, enum sync_dimension sync_mode)
1101{
1102 unsigned ctlr;
1103
1104 ctlr = EDMA_CTLR(slot);
1105 slot = EDMA_CHAN_SLOT(slot);
1106
1107 if (slot < edma_cc[ctlr]->num_slots) {
1108 edma_parm_modify(ctlr, PARM_LINK_BCNTRLD, slot,
1109 0x0000ffff, bcnt_rld << 16);
1110 if (sync_mode == ASYNC)
1111 edma_parm_and(ctlr, PARM_OPT, slot, ~SYNCDIM);
1112 else
1113 edma_parm_or(ctlr, PARM_OPT, slot, SYNCDIM);
1114
1115 edma_parm_write(ctlr, PARM_A_B_CNT, slot, (bcnt << 16) | acnt);
1116 edma_parm_write(ctlr, PARM_CCNT, slot, ccnt);
1117 }
1118}
1119EXPORT_SYMBOL(edma_set_transfer_params);
1120
1121
1122
1123
1124
1125
1126
1127
1128void edma_link(unsigned from, unsigned to)
1129{
1130 unsigned ctlr_from, ctlr_to;
1131
1132 ctlr_from = EDMA_CTLR(from);
1133 from = EDMA_CHAN_SLOT(from);
1134 ctlr_to = EDMA_CTLR(to);
1135 to = EDMA_CHAN_SLOT(to);
1136
1137 if (from >= edma_cc[ctlr_from]->num_slots)
1138 return;
1139 if (to >= edma_cc[ctlr_to]->num_slots)
1140 return;
1141 edma_parm_modify(ctlr_from, PARM_LINK_BCNTRLD, from, 0xffff0000,
1142 PARM_OFFSET(to));
1143}
1144EXPORT_SYMBOL(edma_link);
1145
1146
1147
1148
1149
1150
1151
1152
1153void edma_unlink(unsigned from)
1154{
1155 unsigned ctlr;
1156
1157 ctlr = EDMA_CTLR(from);
1158 from = EDMA_CHAN_SLOT(from);
1159
1160 if (from >= edma_cc[ctlr]->num_slots)
1161 return;
1162 edma_parm_or(ctlr, PARM_LINK_BCNTRLD, from, 0xffff);
1163}
1164EXPORT_SYMBOL(edma_unlink);
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180void edma_write_slot(unsigned slot, const struct edmacc_param *param)
1181{
1182 unsigned ctlr;
1183
1184 ctlr = EDMA_CTLR(slot);
1185 slot = EDMA_CHAN_SLOT(slot);
1186
1187 if (slot >= edma_cc[ctlr]->num_slots)
1188 return;
1189 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), param,
1190 PARM_SIZE);
1191}
1192EXPORT_SYMBOL(edma_write_slot);
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202void edma_read_slot(unsigned slot, struct edmacc_param *param)
1203{
1204 unsigned ctlr;
1205
1206 ctlr = EDMA_CTLR(slot);
1207 slot = EDMA_CHAN_SLOT(slot);
1208
1209 if (slot >= edma_cc[ctlr]->num_slots)
1210 return;
1211 memcpy_fromio(param, edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
1212 PARM_SIZE);
1213}
1214EXPORT_SYMBOL(edma_read_slot);
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227void edma_pause(unsigned channel)
1228{
1229 unsigned ctlr;
1230
1231 ctlr = EDMA_CTLR(channel);
1232 channel = EDMA_CHAN_SLOT(channel);
1233
1234 if (channel < edma_cc[ctlr]->num_channels) {
1235 unsigned int mask = BIT(channel & 0x1f);
1236
1237 edma_shadow0_write_array(ctlr, SH_EECR, channel >> 5, mask);
1238 }
1239}
1240EXPORT_SYMBOL(edma_pause);
1241
1242
1243
1244
1245
1246
1247
1248void edma_resume(unsigned channel)
1249{
1250 unsigned ctlr;
1251
1252 ctlr = EDMA_CTLR(channel);
1253 channel = EDMA_CHAN_SLOT(channel);
1254
1255 if (channel < edma_cc[ctlr]->num_channels) {
1256 unsigned int mask = BIT(channel & 0x1f);
1257
1258 edma_shadow0_write_array(ctlr, SH_EESR, channel >> 5, mask);
1259 }
1260}
1261EXPORT_SYMBOL(edma_resume);
1262
1263int edma_trigger_channel(unsigned channel)
1264{
1265 unsigned ctlr;
1266 unsigned int mask;
1267
1268 ctlr = EDMA_CTLR(channel);
1269 channel = EDMA_CHAN_SLOT(channel);
1270 mask = BIT(channel & 0x1f);
1271
1272 edma_shadow0_write_array(ctlr, SH_ESR, (channel >> 5), mask);
1273
1274 pr_debug("EDMA: ESR%d %08x\n", (channel >> 5),
1275 edma_shadow0_read_array(ctlr, SH_ESR, (channel >> 5)));
1276 return 0;
1277}
1278EXPORT_SYMBOL(edma_trigger_channel);
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291int edma_start(unsigned channel)
1292{
1293 unsigned ctlr;
1294
1295 ctlr = EDMA_CTLR(channel);
1296 channel = EDMA_CHAN_SLOT(channel);
1297
1298 if (channel < edma_cc[ctlr]->num_channels) {
1299 int j = channel >> 5;
1300 unsigned int mask = BIT(channel & 0x1f);
1301
1302
1303 if (test_bit(channel, edma_cc[ctlr]->edma_unused)) {
1304 pr_debug("EDMA: ESR%d %08x\n", j,
1305 edma_shadow0_read_array(ctlr, SH_ESR, j));
1306 edma_shadow0_write_array(ctlr, SH_ESR, j, mask);
1307 return 0;
1308 }
1309
1310
1311 pr_debug("EDMA: ER%d %08x\n", j,
1312 edma_shadow0_read_array(ctlr, SH_ER, j));
1313
1314 edma_write_array(ctlr, EDMA_ECR, j, mask);
1315 edma_write_array(ctlr, EDMA_EMCR, j, mask);
1316
1317 edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1318 edma_shadow0_write_array(ctlr, SH_EESR, j, mask);
1319 pr_debug("EDMA: EER%d %08x\n", j,
1320 edma_shadow0_read_array(ctlr, SH_EER, j));
1321 return 0;
1322 }
1323
1324 return -EINVAL;
1325}
1326EXPORT_SYMBOL(edma_start);
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337void edma_stop(unsigned channel)
1338{
1339 unsigned ctlr;
1340
1341 ctlr = EDMA_CTLR(channel);
1342 channel = EDMA_CHAN_SLOT(channel);
1343
1344 if (channel < edma_cc[ctlr]->num_channels) {
1345 int j = channel >> 5;
1346 unsigned int mask = BIT(channel & 0x1f);
1347
1348 edma_shadow0_write_array(ctlr, SH_EECR, j, mask);
1349 edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
1350 edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1351 edma_write_array(ctlr, EDMA_EMCR, j, mask);
1352
1353 pr_debug("EDMA: EER%d %08x\n", j,
1354 edma_shadow0_read_array(ctlr, SH_EER, j));
1355
1356
1357
1358
1359 }
1360}
1361EXPORT_SYMBOL(edma_stop);
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376void edma_clean_channel(unsigned channel)
1377{
1378 unsigned ctlr;
1379
1380 ctlr = EDMA_CTLR(channel);
1381 channel = EDMA_CHAN_SLOT(channel);
1382
1383 if (channel < edma_cc[ctlr]->num_channels) {
1384 int j = (channel >> 5);
1385 unsigned int mask = BIT(channel & 0x1f);
1386
1387 pr_debug("EDMA: EMR%d %08x\n", j,
1388 edma_read_array(ctlr, EDMA_EMR, j));
1389 edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
1390
1391 edma_write_array(ctlr, EDMA_EMCR, j, mask);
1392
1393 edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1394 edma_write(ctlr, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
1395 }
1396}
1397EXPORT_SYMBOL(edma_clean_channel);
1398
1399
1400
1401
1402
1403
1404void edma_clear_event(unsigned channel)
1405{
1406 unsigned ctlr;
1407
1408 ctlr = EDMA_CTLR(channel);
1409 channel = EDMA_CHAN_SLOT(channel);
1410
1411 if (channel >= edma_cc[ctlr]->num_channels)
1412 return;
1413 if (channel < 32)
1414 edma_write(ctlr, EDMA_ECR, BIT(channel));
1415 else
1416 edma_write(ctlr, EDMA_ECRH, BIT(channel - 32));
1417}
1418EXPORT_SYMBOL(edma_clear_event);
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428void edma_assign_channel_eventq(unsigned channel, enum dma_event_q eventq_no)
1429{
1430 unsigned ctlr;
1431
1432 ctlr = EDMA_CTLR(channel);
1433 channel = EDMA_CHAN_SLOT(channel);
1434
1435 if (channel >= edma_cc[ctlr]->num_channels)
1436 return;
1437
1438
1439 if (eventq_no == EVENTQ_DEFAULT)
1440 eventq_no = edma_cc[ctlr]->default_queue;
1441 if (eventq_no >= edma_cc[ctlr]->num_tc)
1442 return;
1443
1444 map_dmach_queue(ctlr, channel, eventq_no);
1445}
1446EXPORT_SYMBOL(edma_assign_channel_eventq);
1447
1448static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
1449 struct edma *edma_cc, int cc_id)
1450{
1451 int i;
1452 u32 value, cccfg;
1453 s8 (*queue_priority_map)[2];
1454
1455
1456 cccfg = edma_read(cc_id, EDMA_CCCFG);
1457
1458 value = GET_NUM_REGN(cccfg);
1459 edma_cc->num_region = BIT(value);
1460
1461 value = GET_NUM_DMACH(cccfg);
1462 edma_cc->num_channels = BIT(value + 1);
1463
1464 value = GET_NUM_PAENTRY(cccfg);
1465 edma_cc->num_slots = BIT(value + 4);
1466
1467 value = GET_NUM_EVQUE(cccfg);
1468 edma_cc->num_tc = value + 1;
1469
1470 dev_dbg(dev, "eDMA3 CC%d HW configuration (cccfg: 0x%08x):\n", cc_id,
1471 cccfg);
1472 dev_dbg(dev, "num_region: %u\n", edma_cc->num_region);
1473 dev_dbg(dev, "num_channel: %u\n", edma_cc->num_channels);
1474 dev_dbg(dev, "num_slot: %u\n", edma_cc->num_slots);
1475 dev_dbg(dev, "num_tc: %u\n", edma_cc->num_tc);
1476
1477
1478 if (pdata->queue_priority_mapping)
1479 return 0;
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491 queue_priority_map = devm_kzalloc(dev,
1492 (edma_cc->num_tc + 1) * sizeof(s8),
1493 GFP_KERNEL);
1494 if (!queue_priority_map)
1495 return -ENOMEM;
1496
1497 for (i = 0; i < edma_cc->num_tc; i++) {
1498 queue_priority_map[i][0] = i;
1499 queue_priority_map[i][1] = i;
1500 }
1501 queue_priority_map[i][0] = -1;
1502 queue_priority_map[i][1] = -1;
1503
1504 pdata->queue_priority_mapping = queue_priority_map;
1505
1506 pdata->default_queue = i - 1;
1507
1508 return 0;
1509}
1510
1511#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DMADEVICES)
1512
1513static int edma_xbar_event_map(struct device *dev, struct device_node *node,
1514 struct edma_soc_info *pdata, size_t sz)
1515{
1516 const char pname[] = "ti,edma-xbar-event-map";
1517 struct resource res;
1518 void __iomem *xbar;
1519 s16 (*xbar_chans)[2];
1520 size_t nelm = sz / sizeof(s16);
1521 u32 shift, offset, mux;
1522 int ret, i;
1523
1524 xbar_chans = devm_kzalloc(dev, (nelm + 2) * sizeof(s16), GFP_KERNEL);
1525 if (!xbar_chans)
1526 return -ENOMEM;
1527
1528 ret = of_address_to_resource(node, 1, &res);
1529 if (ret)
1530 return -ENOMEM;
1531
1532 xbar = devm_ioremap(dev, res.start, resource_size(&res));
1533 if (!xbar)
1534 return -ENOMEM;
1535
1536 ret = of_property_read_u16_array(node, pname, (u16 *)xbar_chans, nelm);
1537 if (ret)
1538 return -EIO;
1539
1540
1541 nelm >>= 1;
1542 xbar_chans[nelm][0] = xbar_chans[nelm][1] = -1;
1543
1544 for (i = 0; i < nelm; i++) {
1545 shift = (xbar_chans[i][1] & 0x03) << 3;
1546 offset = xbar_chans[i][1] & 0xfffffffc;
1547 mux = readl(xbar + offset);
1548 mux &= ~(0xff << shift);
1549 mux |= xbar_chans[i][0] << shift;
1550 writel(mux, (xbar + offset));
1551 }
1552
1553 pdata->xbar_chans = (const s16 (*)[2]) xbar_chans;
1554 return 0;
1555}
1556
1557static int edma_of_parse_dt(struct device *dev,
1558 struct device_node *node,
1559 struct edma_soc_info *pdata)
1560{
1561 int ret = 0;
1562 struct property *prop;
1563 size_t sz;
1564 struct edma_rsv_info *rsv_info;
1565
1566 rsv_info = devm_kzalloc(dev, sizeof(struct edma_rsv_info), GFP_KERNEL);
1567 if (!rsv_info)
1568 return -ENOMEM;
1569 pdata->rsv = rsv_info;
1570
1571 prop = of_find_property(node, "ti,edma-xbar-event-map", &sz);
1572 if (prop)
1573 ret = edma_xbar_event_map(dev, node, pdata, sz);
1574
1575 return ret;
1576}
1577
1578static struct of_dma_filter_info edma_filter_info = {
1579 .filter_fn = edma_filter_fn,
1580};
1581
1582static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
1583 struct device_node *node)
1584{
1585 struct edma_soc_info *info;
1586 int ret;
1587
1588 info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL);
1589 if (!info)
1590 return ERR_PTR(-ENOMEM);
1591
1592 ret = edma_of_parse_dt(dev, node, info);
1593 if (ret)
1594 return ERR_PTR(ret);
1595
1596 dma_cap_set(DMA_SLAVE, edma_filter_info.dma_cap);
1597 dma_cap_set(DMA_CYCLIC, edma_filter_info.dma_cap);
1598 of_dma_controller_register(dev->of_node, of_dma_simple_xlate,
1599 &edma_filter_info);
1600
1601 return info;
1602}
1603#else
1604static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
1605 struct device_node *node)
1606{
1607 return ERR_PTR(-ENOSYS);
1608}
1609#endif
1610
1611static int edma_probe(struct platform_device *pdev)
1612{
1613 struct edma_soc_info **info = pdev->dev.platform_data;
1614 struct edma_soc_info *ninfo[EDMA_MAX_CC] = {NULL};
1615 s8 (*queue_priority_mapping)[2];
1616 int i, j, off, ln, found = 0;
1617 int status = -1;
1618 const s16 (*rsv_chans)[2];
1619 const s16 (*rsv_slots)[2];
1620 const s16 (*xbar_chans)[2];
1621 int irq[EDMA_MAX_CC] = {0, 0};
1622 int err_irq[EDMA_MAX_CC] = {0, 0};
1623 struct resource *r[EDMA_MAX_CC] = {NULL};
1624 struct resource res[EDMA_MAX_CC];
1625 char res_name[10];
1626 struct device_node *node = pdev->dev.of_node;
1627 struct device *dev = &pdev->dev;
1628 int ret;
1629 struct platform_device_info edma_dev_info = {
1630 .name = "edma-dma-engine",
1631 .dma_mask = DMA_BIT_MASK(32),
1632 .parent = &pdev->dev,
1633 };
1634
1635 if (node) {
1636
1637 if (arch_num_cc) {
1638 dev_err(dev, "only one EDMA instance is supported via DT\n");
1639 return -ENODEV;
1640 }
1641
1642 ninfo[0] = edma_setup_info_from_dt(dev, node);
1643 if (IS_ERR(ninfo[0])) {
1644 dev_err(dev, "failed to get DT data\n");
1645 return PTR_ERR(ninfo[0]);
1646 }
1647
1648 info = ninfo;
1649 }
1650
1651 if (!info)
1652 return -ENODEV;
1653
1654 pm_runtime_enable(dev);
1655 ret = pm_runtime_get_sync(dev);
1656 if (ret < 0) {
1657 dev_err(dev, "pm_runtime_get_sync() failed\n");
1658 return ret;
1659 }
1660
1661 for (j = 0; j < EDMA_MAX_CC; j++) {
1662 if (!info[j]) {
1663 if (!found)
1664 return -ENODEV;
1665 break;
1666 }
1667 if (node) {
1668 ret = of_address_to_resource(node, j, &res[j]);
1669 if (!ret)
1670 r[j] = &res[j];
1671 } else {
1672 sprintf(res_name, "edma_cc%d", j);
1673 r[j] = platform_get_resource_byname(pdev,
1674 IORESOURCE_MEM,
1675 res_name);
1676 }
1677 if (!r[j]) {
1678 if (found)
1679 break;
1680 else
1681 return -ENODEV;
1682 } else {
1683 found = 1;
1684 }
1685
1686 edmacc_regs_base[j] = devm_ioremap_resource(&pdev->dev, r[j]);
1687 if (IS_ERR(edmacc_regs_base[j]))
1688 return PTR_ERR(edmacc_regs_base[j]);
1689
1690 edma_cc[j] = devm_kzalloc(&pdev->dev, sizeof(struct edma),
1691 GFP_KERNEL);
1692 if (!edma_cc[j])
1693 return -ENOMEM;
1694
1695
1696 ret = edma_setup_from_hw(dev, info[j], edma_cc[j], j);
1697 if (ret)
1698 return ret;
1699
1700 edma_cc[j]->default_queue = info[j]->default_queue;
1701
1702 dev_dbg(&pdev->dev, "DMA REG BASE ADDR=%p\n",
1703 edmacc_regs_base[j]);
1704
1705 for (i = 0; i < edma_cc[j]->num_slots; i++)
1706 memcpy_toio(edmacc_regs_base[j] + PARM_OFFSET(i),
1707 &dummy_paramset, PARM_SIZE);
1708
1709
1710 memset(edma_cc[j]->edma_unused, 0xff,
1711 sizeof(edma_cc[j]->edma_unused));
1712
1713 if (info[j]->rsv) {
1714
1715
1716 rsv_chans = info[j]->rsv->rsv_chans;
1717 if (rsv_chans) {
1718 for (i = 0; rsv_chans[i][0] != -1; i++) {
1719 off = rsv_chans[i][0];
1720 ln = rsv_chans[i][1];
1721 clear_bits(off, ln,
1722 edma_cc[j]->edma_unused);
1723 }
1724 }
1725
1726
1727 rsv_slots = info[j]->rsv->rsv_slots;
1728 if (rsv_slots) {
1729 for (i = 0; rsv_slots[i][0] != -1; i++) {
1730 off = rsv_slots[i][0];
1731 ln = rsv_slots[i][1];
1732 set_bits(off, ln,
1733 edma_cc[j]->edma_inuse);
1734 }
1735 }
1736 }
1737
1738
1739 xbar_chans = info[j]->xbar_chans;
1740 if (xbar_chans) {
1741 for (i = 0; xbar_chans[i][1] != -1; i++) {
1742 off = xbar_chans[i][1];
1743 clear_bits(off, 1,
1744 edma_cc[j]->edma_unused);
1745 }
1746 }
1747
1748 if (node) {
1749 irq[j] = irq_of_parse_and_map(node, 0);
1750 err_irq[j] = irq_of_parse_and_map(node, 2);
1751 } else {
1752 char irq_name[10];
1753
1754 sprintf(irq_name, "edma%d", j);
1755 irq[j] = platform_get_irq_byname(pdev, irq_name);
1756
1757 sprintf(irq_name, "edma%d_err", j);
1758 err_irq[j] = platform_get_irq_byname(pdev, irq_name);
1759 }
1760 edma_cc[j]->irq_res_start = irq[j];
1761 edma_cc[j]->irq_res_end = err_irq[j];
1762
1763 status = devm_request_irq(dev, irq[j], dma_irq_handler, 0,
1764 "edma", dev);
1765 if (status < 0) {
1766 dev_dbg(&pdev->dev,
1767 "devm_request_irq %d failed --> %d\n",
1768 irq[j], status);
1769 return status;
1770 }
1771
1772 status = devm_request_irq(dev, err_irq[j], dma_ccerr_handler, 0,
1773 "edma_error", dev);
1774 if (status < 0) {
1775 dev_dbg(&pdev->dev,
1776 "devm_request_irq %d failed --> %d\n",
1777 err_irq[j], status);
1778 return status;
1779 }
1780
1781 for (i = 0; i < edma_cc[j]->num_channels; i++)
1782 map_dmach_queue(j, i, info[j]->default_queue);
1783
1784 queue_priority_mapping = info[j]->queue_priority_mapping;
1785
1786
1787 for (i = 0; queue_priority_mapping[i][0] != -1; i++)
1788 assign_priority_to_queue(j,
1789 queue_priority_mapping[i][0],
1790 queue_priority_mapping[i][1]);
1791
1792
1793
1794
1795 if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST)
1796 map_dmach_param(j);
1797
1798 for (i = 0; i < edma_cc[j]->num_region; i++) {
1799 edma_write_array2(j, EDMA_DRAE, i, 0, 0x0);
1800 edma_write_array2(j, EDMA_DRAE, i, 1, 0x0);
1801 edma_write_array(j, EDMA_QRAE, i, 0x0);
1802 }
1803 edma_cc[j]->info = info[j];
1804 arch_num_cc++;
1805
1806 edma_dev_info.id = j;
1807 platform_device_register_full(&edma_dev_info);
1808 }
1809
1810 return 0;
1811}
1812
1813#ifdef CONFIG_PM_SLEEP
1814static int edma_pm_resume(struct device *dev)
1815{
1816 int i, j;
1817
1818 for (j = 0; j < arch_num_cc; j++) {
1819 struct edma *cc = edma_cc[j];
1820
1821 s8 (*queue_priority_mapping)[2];
1822
1823 queue_priority_mapping = cc->info->queue_priority_mapping;
1824
1825
1826 for (i = 0; queue_priority_mapping[i][0] != -1; i++)
1827 assign_priority_to_queue(j,
1828 queue_priority_mapping[i][0],
1829 queue_priority_mapping[i][1]);
1830
1831
1832
1833
1834
1835 if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST)
1836 map_dmach_param(j);
1837
1838 for (i = 0; i < cc->num_channels; i++) {
1839 if (test_bit(i, cc->edma_inuse)) {
1840
1841 edma_or_array2(j, EDMA_DRAE, 0, i >> 5,
1842 BIT(i & 0x1f));
1843
1844 setup_dma_interrupt(i,
1845 cc->intr_data[i].callback,
1846 cc->intr_data[i].data);
1847 }
1848 }
1849 }
1850
1851 return 0;
1852}
1853#endif
1854
1855static const struct dev_pm_ops edma_pm_ops = {
1856 SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, edma_pm_resume)
1857};
1858
1859static struct platform_driver edma_driver = {
1860 .driver = {
1861 .name = "edma",
1862 .pm = &edma_pm_ops,
1863 .of_match_table = edma_of_ids,
1864 },
1865 .probe = edma_probe,
1866};
1867
1868static int __init edma_init(void)
1869{
1870 return platform_driver_probe(&edma_driver, edma_probe);
1871}
1872arch_initcall(edma_init);
1873
1874