1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77#include <linux/device.h>
78#include <linux/init.h>
79#include <linux/module.h>
80#include <linux/interrupt.h>
81#include <linux/slab.h>
82#include <linux/delay.h>
83#include <linux/dmapool.h>
84#include <linux/dmaengine.h>
85#include <linux/amba/bus.h>
86#include <linux/amba/pl08x.h>
87#include <linux/debugfs.h>
88#include <linux/seq_file.h>
89
90#include <asm/hardware/pl080.h>
91
92#define DRIVER_NAME "pl08xdmac"
93
94
95
96
97
98
99struct vendor_data {
100 u8 channels;
101 bool dualmaster;
102};
103
104
105
106
107
108
109
110struct pl08x_lli {
111 u32 src;
112 u32 dst;
113 u32 lli;
114 u32 cctl;
115};
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132struct pl08x_driver_data {
133 struct dma_device slave;
134 struct dma_device memcpy;
135 void __iomem *base;
136 struct amba_device *adev;
137 const struct vendor_data *vd;
138 struct pl08x_platform_data *pd;
139 struct pl08x_phy_chan *phy_chans;
140 struct dma_pool *pool;
141 int pool_ctr;
142 u8 lli_buses;
143 u8 mem_buses;
144 spinlock_t lock;
145};
146
147
148
149
150
151
152
153
154
155
156#define PL08X_BOUNDARY_SHIFT (10)
157#define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT)
158
159
160#define PL08X_WQ_PERIODMIN 20
161
162
163# define PL08X_LLI_TSFR_SIZE 0x2000
164
165
166#define PL08X_MAX_ALLOCS 0x40
167#define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli))
168#define PL08X_ALIGN 8
169
170static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
171{
172 return container_of(chan, struct pl08x_dma_chan, chan);
173}
174
175static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)
176{
177 return container_of(tx, struct pl08x_txd, tx);
178}
179
180
181
182
183
184
185static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
186{
187 unsigned int val;
188
189 val = readl(ch->base + PL080_CH_CONFIG);
190 return val & PL080_CONFIG_ACTIVE;
191}
192
193
194
195
196
197
198
199static void pl08x_start_txd(struct pl08x_dma_chan *plchan,
200 struct pl08x_txd *txd)
201{
202 struct pl08x_driver_data *pl08x = plchan->host;
203 struct pl08x_phy_chan *phychan = plchan->phychan;
204 struct pl08x_lli *lli = &txd->llis_va[0];
205 u32 val;
206
207 plchan->at = txd;
208
209
210 while (pl08x_phy_channel_busy(phychan))
211 cpu_relax();
212
213 dev_vdbg(&pl08x->adev->dev,
214 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
215 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
216 phychan->id, lli->src, lli->dst, lli->lli, lli->cctl,
217 txd->ccfg);
218
219 writel(lli->src, phychan->base + PL080_CH_SRC_ADDR);
220 writel(lli->dst, phychan->base + PL080_CH_DST_ADDR);
221 writel(lli->lli, phychan->base + PL080_CH_LLI);
222 writel(lli->cctl, phychan->base + PL080_CH_CONTROL);
223 writel(txd->ccfg, phychan->base + PL080_CH_CONFIG);
224
225
226
227 while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id))
228 cpu_relax();
229
230
231 val = readl(phychan->base + PL080_CH_CONFIG);
232 while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
233 val = readl(phychan->base + PL080_CH_CONFIG);
234
235 writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG);
236}
237
238
239
240
241
242
243
244
245
246
247
248static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
249{
250 u32 val;
251 int timeout;
252
253
254 val = readl(ch->base + PL080_CH_CONFIG);
255 val |= PL080_CONFIG_HALT;
256 writel(val, ch->base + PL080_CH_CONFIG);
257
258
259 for (timeout = 1000; timeout; timeout--) {
260 if (!pl08x_phy_channel_busy(ch))
261 break;
262 udelay(1);
263 }
264 if (pl08x_phy_channel_busy(ch))
265 pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id);
266}
267
268static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
269{
270 u32 val;
271
272
273 val = readl(ch->base + PL080_CH_CONFIG);
274 val &= ~PL080_CONFIG_HALT;
275 writel(val, ch->base + PL080_CH_CONFIG);
276}
277
278
279
280
281
282
283
284
285static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
286 struct pl08x_phy_chan *ch)
287{
288 u32 val = readl(ch->base + PL080_CH_CONFIG);
289
290 val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
291 PL080_CONFIG_TC_IRQ_MASK);
292
293 writel(val, ch->base + PL080_CH_CONFIG);
294
295 writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR);
296 writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR);
297}
298
299static inline u32 get_bytes_in_cctl(u32 cctl)
300{
301
302 u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK;
303
304 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
305 case PL080_WIDTH_8BIT:
306 break;
307 case PL080_WIDTH_16BIT:
308 bytes *= 2;
309 break;
310 case PL080_WIDTH_32BIT:
311 bytes *= 4;
312 break;
313 }
314 return bytes;
315}
316
317
318static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
319{
320 struct pl08x_phy_chan *ch;
321 struct pl08x_txd *txd;
322 unsigned long flags;
323 size_t bytes = 0;
324
325 spin_lock_irqsave(&plchan->lock, flags);
326 ch = plchan->phychan;
327 txd = plchan->at;
328
329
330
331
332
333 if (ch && txd) {
334 u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
335
336
337 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));
338
339 if (clli) {
340 struct pl08x_lli *llis_va = txd->llis_va;
341 dma_addr_t llis_bus = txd->llis_bus;
342 int index;
343
344 BUG_ON(clli < llis_bus || clli >= llis_bus +
345 sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS);
346
347
348
349
350
351 index = (clli - llis_bus) / sizeof(struct pl08x_lli);
352
353 for (; index < MAX_NUM_TSFR_LLIS; index++) {
354 bytes += get_bytes_in_cctl(llis_va[index].cctl);
355
356
357
358
359 if (!llis_va[index].lli)
360 break;
361 }
362 }
363 }
364
365
366 if (!list_empty(&plchan->pend_list)) {
367 struct pl08x_txd *txdi;
368 list_for_each_entry(txdi, &plchan->pend_list, node) {
369 bytes += txdi->len;
370 }
371 }
372
373 spin_unlock_irqrestore(&plchan->lock, flags);
374
375 return bytes;
376}
377
378
379
380
381
382
383
384
385static struct pl08x_phy_chan *
386pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
387 struct pl08x_dma_chan *virt_chan)
388{
389 struct pl08x_phy_chan *ch = NULL;
390 unsigned long flags;
391 int i;
392
393 for (i = 0; i < pl08x->vd->channels; i++) {
394 ch = &pl08x->phy_chans[i];
395
396 spin_lock_irqsave(&ch->lock, flags);
397
398 if (!ch->serving) {
399 ch->serving = virt_chan;
400 ch->signal = -1;
401 spin_unlock_irqrestore(&ch->lock, flags);
402 break;
403 }
404
405 spin_unlock_irqrestore(&ch->lock, flags);
406 }
407
408 if (i == pl08x->vd->channels) {
409
410 return NULL;
411 }
412
413 return ch;
414}
415
416static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
417 struct pl08x_phy_chan *ch)
418{
419 unsigned long flags;
420
421 spin_lock_irqsave(&ch->lock, flags);
422
423
424 pl08x_terminate_phy_chan(pl08x, ch);
425
426
427 ch->serving = NULL;
428 spin_unlock_irqrestore(&ch->lock, flags);
429}
430
431
432
433
434
435static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded)
436{
437 switch (coded) {
438 case PL080_WIDTH_8BIT:
439 return 1;
440 case PL080_WIDTH_16BIT:
441 return 2;
442 case PL080_WIDTH_32BIT:
443 return 4;
444 default:
445 break;
446 }
447 BUG();
448 return 0;
449}
450
451static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
452 size_t tsize)
453{
454 u32 retbits = cctl;
455
456
457 retbits &= ~PL080_CONTROL_DWIDTH_MASK;
458 retbits &= ~PL080_CONTROL_SWIDTH_MASK;
459 retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK;
460
461
462 switch (srcwidth) {
463 case 1:
464 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT;
465 break;
466 case 2:
467 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT;
468 break;
469 case 4:
470 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT;
471 break;
472 default:
473 BUG();
474 break;
475 }
476
477 switch (dstwidth) {
478 case 1:
479 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT;
480 break;
481 case 2:
482 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT;
483 break;
484 case 4:
485 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT;
486 break;
487 default:
488 BUG();
489 break;
490 }
491
492 retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT;
493 return retbits;
494}
495
496struct pl08x_lli_build_data {
497 struct pl08x_txd *txd;
498 struct pl08x_driver_data *pl08x;
499 struct pl08x_bus_data srcbus;
500 struct pl08x_bus_data dstbus;
501 size_t remainder;
502};
503
504
505
506
507
508
509static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
510 struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl)
511{
512 if (!(cctl & PL080_CONTROL_DST_INCR)) {
513 *mbus = &bd->srcbus;
514 *sbus = &bd->dstbus;
515 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
516 *mbus = &bd->dstbus;
517 *sbus = &bd->srcbus;
518 } else {
519 if (bd->dstbus.buswidth == 4) {
520 *mbus = &bd->dstbus;
521 *sbus = &bd->srcbus;
522 } else if (bd->srcbus.buswidth == 4) {
523 *mbus = &bd->srcbus;
524 *sbus = &bd->dstbus;
525 } else if (bd->dstbus.buswidth == 2) {
526 *mbus = &bd->dstbus;
527 *sbus = &bd->srcbus;
528 } else if (bd->srcbus.buswidth == 2) {
529 *mbus = &bd->srcbus;
530 *sbus = &bd->dstbus;
531 } else {
532
533 *mbus = &bd->dstbus;
534 *sbus = &bd->srcbus;
535 }
536 }
537}
538
539
540
541
542static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
543 int num_llis, int len, u32 cctl)
544{
545 struct pl08x_lli *llis_va = bd->txd->llis_va;
546 dma_addr_t llis_bus = bd->txd->llis_bus;
547
548 BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
549
550 llis_va[num_llis].cctl = cctl;
551 llis_va[num_llis].src = bd->srcbus.addr;
552 llis_va[num_llis].dst = bd->dstbus.addr;
553 llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli);
554 if (bd->pl08x->lli_buses & PL08X_AHB2)
555 llis_va[num_llis].lli |= PL080_LLI_LM_AHB2;
556
557 if (cctl & PL080_CONTROL_SRC_INCR)
558 bd->srcbus.addr += len;
559 if (cctl & PL080_CONTROL_DST_INCR)
560 bd->dstbus.addr += len;
561
562 BUG_ON(bd->remainder < len);
563
564 bd->remainder -= len;
565}
566
567
568
569
570
571static inline size_t pl08x_pre_boundary(u32 addr, size_t len)
572{
573 size_t boundary_len = PL08X_BOUNDARY_SIZE -
574 (addr & (PL08X_BOUNDARY_SIZE - 1));
575
576 return min(boundary_len, len);
577}
578
579
580
581
582
583
584static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
585 struct pl08x_txd *txd)
586{
587 struct pl08x_bus_data *mbus, *sbus;
588 struct pl08x_lli_build_data bd;
589 int num_llis = 0;
590 u32 cctl;
591 size_t max_bytes_per_lli;
592 size_t total_bytes = 0;
593 struct pl08x_lli *llis_va;
594
595 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT,
596 &txd->llis_bus);
597 if (!txd->llis_va) {
598 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__);
599 return 0;
600 }
601
602 pl08x->pool_ctr++;
603
604
605 cctl = txd->cctl;
606
607 bd.txd = txd;
608 bd.pl08x = pl08x;
609 bd.srcbus.addr = txd->src_addr;
610 bd.dstbus.addr = txd->dst_addr;
611
612
613 bd.srcbus.maxwidth =
614 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >>
615 PL080_CONTROL_SWIDTH_SHIFT);
616
617
618 bd.dstbus.maxwidth =
619 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
620 PL080_CONTROL_DWIDTH_SHIFT);
621
622
623 bd.srcbus.buswidth = bd.srcbus.maxwidth;
624 bd.dstbus.buswidth = bd.dstbus.maxwidth;
625 dev_vdbg(&pl08x->adev->dev,
626 "%s source bus is %d bytes wide, dest bus is %d bytes wide\n",
627 __func__, bd.srcbus.buswidth, bd.dstbus.buswidth);
628
629
630
631
632
633 max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) *
634 PL080_CONTROL_TRANSFER_SIZE_MASK;
635 dev_vdbg(&pl08x->adev->dev,
636 "%s max bytes per lli = %zu\n",
637 __func__, max_bytes_per_lli);
638
639
640 bd.remainder = txd->len;
641 dev_vdbg(&pl08x->adev->dev,
642 "%s remainder = %zu\n",
643 __func__, bd.remainder);
644
645
646
647
648
649
650 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
651
652 if (txd->len < mbus->buswidth) {
653
654 while (bd.remainder) {
655 dev_vdbg(&pl08x->adev->dev,
656 "%s single byte LLIs for a transfer of "
657 "less than a bus width (remain 0x%08x)\n",
658 __func__, bd.remainder);
659 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
660 pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
661 total_bytes++;
662 }
663 } else {
664
665 while ((mbus->addr) % (mbus->buswidth)) {
666 dev_vdbg(&pl08x->adev->dev,
667 "%s adjustment lli for less than bus width "
668 "(remain 0x%08x)\n",
669 __func__, bd.remainder);
670 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
671 pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
672 total_bytes++;
673 }
674
675
676
677
678
679 if (sbus->addr % sbus->buswidth) {
680 dev_dbg(&pl08x->adev->dev,
681 "%s set down bus width to one byte\n",
682 __func__);
683
684 sbus->buswidth = 1;
685 }
686
687
688
689
690
691 while (bd.remainder > (mbus->buswidth - 1)) {
692 size_t lli_len, target_len, tsize, odd_bytes;
693
694
695
696
697
698 target_len = min(bd.remainder, max_bytes_per_lli);
699
700
701
702
703
704
705 if (cctl & PL080_CONTROL_SRC_INCR)
706 bd.srcbus.fill_bytes =
707 pl08x_pre_boundary(bd.srcbus.addr,
708 target_len);
709 else
710 bd.srcbus.fill_bytes = target_len;
711
712 if (cctl & PL080_CONTROL_DST_INCR)
713 bd.dstbus.fill_bytes =
714 pl08x_pre_boundary(bd.dstbus.addr,
715 target_len);
716 else
717 bd.dstbus.fill_bytes = target_len;
718
719
720 lli_len = min(bd.srcbus.fill_bytes,
721 bd.dstbus.fill_bytes);
722
723 BUG_ON(lli_len > bd.remainder);
724
725 if (lli_len <= 0) {
726 dev_err(&pl08x->adev->dev,
727 "%s lli_len is %zu, <= 0\n",
728 __func__, lli_len);
729 return 0;
730 }
731
732 if (lli_len == target_len) {
733
734
735
736
737 lli_len = (lli_len/mbus->buswidth) *
738 mbus->buswidth;
739 odd_bytes = 0;
740 } else {
741
742
743
744
745
746
747
748
749
750 odd_bytes = lli_len % mbus->buswidth;
751 lli_len -= odd_bytes;
752
753 }
754
755 if (lli_len) {
756
757
758
759
760
761
762
763 tsize = lli_len / min(mbus->buswidth,
764 sbus->buswidth);
765 lli_len = tsize * min(mbus->buswidth,
766 sbus->buswidth);
767
768 if (target_len != lli_len) {
769 dev_vdbg(&pl08x->adev->dev,
770 "%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n",
771 __func__, target_len, lli_len, txd->len);
772 }
773
774 cctl = pl08x_cctl_bits(cctl,
775 bd.srcbus.buswidth,
776 bd.dstbus.buswidth,
777 tsize);
778
779 dev_vdbg(&pl08x->adev->dev,
780 "%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n",
781 __func__, lli_len, bd.remainder);
782 pl08x_fill_lli_for_desc(&bd, num_llis++,
783 lli_len, cctl);
784 total_bytes += lli_len;
785 }
786
787
788 if (odd_bytes) {
789
790
791
792
793 int j;
794 for (j = 0; (j < mbus->buswidth)
795 && (bd.remainder); j++) {
796 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
797 dev_vdbg(&pl08x->adev->dev,
798 "%s align with boundary, single byte (remain 0x%08zx)\n",
799 __func__, bd.remainder);
800 pl08x_fill_lli_for_desc(&bd,
801 num_llis++, 1, cctl);
802 total_bytes++;
803 }
804 }
805 }
806
807
808
809
810 while (bd.remainder) {
811 cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
812 dev_vdbg(&pl08x->adev->dev,
813 "%s align with boundary, single odd byte (remain %zu)\n",
814 __func__, bd.remainder);
815 pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
816 total_bytes++;
817 }
818 }
819 if (total_bytes != txd->len) {
820 dev_err(&pl08x->adev->dev,
821 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
822 __func__, total_bytes, txd->len);
823 return 0;
824 }
825
826 if (num_llis >= MAX_NUM_TSFR_LLIS) {
827 dev_err(&pl08x->adev->dev,
828 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
829 __func__, (u32) MAX_NUM_TSFR_LLIS);
830 return 0;
831 }
832
833 llis_va = txd->llis_va;
834
835 llis_va[num_llis - 1].lli = 0;
836
837 llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
838
839#ifdef VERBOSE_DEBUG
840 {
841 int i;
842
843 for (i = 0; i < num_llis; i++) {
844 dev_vdbg(&pl08x->adev->dev,
845 "lli %d @%p: csrc=0x%08x, cdst=0x%08x, cctl=0x%08x, clli=0x%08x\n",
846 i,
847 &llis_va[i],
848 llis_va[i].src,
849 llis_va[i].dst,
850 llis_va[i].cctl,
851 llis_va[i].lli
852 );
853 }
854 }
855#endif
856
857 return num_llis;
858}
859
860
861static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
862 struct pl08x_txd *txd)
863{
864
865 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
866
867 pl08x->pool_ctr--;
868
869 kfree(txd);
870}
871
872static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
873 struct pl08x_dma_chan *plchan)
874{
875 struct pl08x_txd *txdi = NULL;
876 struct pl08x_txd *next;
877
878 if (!list_empty(&plchan->pend_list)) {
879 list_for_each_entry_safe(txdi,
880 next, &plchan->pend_list, node) {
881 list_del(&txdi->node);
882 pl08x_free_txd(pl08x, txdi);
883 }
884 }
885}
886
887
888
889
890static int pl08x_alloc_chan_resources(struct dma_chan *chan)
891{
892 return 0;
893}
894
895static void pl08x_free_chan_resources(struct dma_chan *chan)
896{
897}
898
899
900
901
902static int prep_phy_channel(struct pl08x_dma_chan *plchan,
903 struct pl08x_txd *txd)
904{
905 struct pl08x_driver_data *pl08x = plchan->host;
906 struct pl08x_phy_chan *ch;
907 int ret;
908
909
910 if (plchan->phychan)
911 return 0;
912
913 ch = pl08x_get_phy_channel(pl08x, plchan);
914 if (!ch) {
915
916 dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
917 return -EBUSY;
918 }
919
920
921
922
923
924
925 if (plchan->slave &&
926 ch->signal < 0 &&
927 pl08x->pd->get_signal) {
928 ret = pl08x->pd->get_signal(plchan);
929 if (ret < 0) {
930 dev_dbg(&pl08x->adev->dev,
931 "unable to use physical channel %d for transfer on %s due to platform restrictions\n",
932 ch->id, plchan->name);
933
934 pl08x_put_phy_channel(pl08x, ch);
935 return -EBUSY;
936 }
937 ch->signal = ret;
938
939
940 if (txd->direction == DMA_TO_DEVICE)
941 txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
942 else if (txd->direction == DMA_FROM_DEVICE)
943 txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
944 }
945
946 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
947 ch->id,
948 ch->signal,
949 plchan->name);
950
951 plchan->phychan_hold++;
952 plchan->phychan = ch;
953
954 return 0;
955}
956
957static void release_phy_channel(struct pl08x_dma_chan *plchan)
958{
959 struct pl08x_driver_data *pl08x = plchan->host;
960
961 if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) {
962 pl08x->pd->put_signal(plchan);
963 plchan->phychan->signal = -1;
964 }
965 pl08x_put_phy_channel(pl08x, plchan->phychan);
966 plchan->phychan = NULL;
967}
968
969static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
970{
971 struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
972 struct pl08x_txd *txd = to_pl08x_txd(tx);
973 unsigned long flags;
974
975 spin_lock_irqsave(&plchan->lock, flags);
976
977 plchan->chan.cookie += 1;
978 if (plchan->chan.cookie < 0)
979 plchan->chan.cookie = 1;
980 tx->cookie = plchan->chan.cookie;
981
982
983 list_add_tail(&txd->node, &plchan->pend_list);
984
985
986
987
988
989
990 if (!plchan->slave && !plchan->phychan) {
991
992 plchan->state = PL08X_CHAN_WAITING;
993 plchan->waiting = txd;
994 } else {
995 plchan->phychan_hold--;
996 }
997
998 spin_unlock_irqrestore(&plchan->lock, flags);
999
1000 return tx->cookie;
1001}
1002
1003static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
1004 struct dma_chan *chan, unsigned long flags)
1005{
1006 struct dma_async_tx_descriptor *retval = NULL;
1007
1008 return retval;
1009}
1010
1011
1012
1013
1014
1015
1016static enum dma_status
1017pl08x_dma_tx_status(struct dma_chan *chan,
1018 dma_cookie_t cookie,
1019 struct dma_tx_state *txstate)
1020{
1021 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1022 dma_cookie_t last_used;
1023 dma_cookie_t last_complete;
1024 enum dma_status ret;
1025 u32 bytesleft = 0;
1026
1027 last_used = plchan->chan.cookie;
1028 last_complete = plchan->lc;
1029
1030 ret = dma_async_is_complete(cookie, last_complete, last_used);
1031 if (ret == DMA_SUCCESS) {
1032 dma_set_tx_state(txstate, last_complete, last_used, 0);
1033 return ret;
1034 }
1035
1036
1037
1038
1039 last_used = plchan->chan.cookie;
1040 last_complete = plchan->lc;
1041
1042
1043 bytesleft = pl08x_getbytes_chan(plchan);
1044
1045 dma_set_tx_state(txstate, last_complete, last_used,
1046 bytesleft);
1047
1048 if (plchan->state == PL08X_CHAN_PAUSED)
1049 return DMA_PAUSED;
1050
1051
1052 return DMA_IN_PROGRESS;
1053}
1054
1055
1056struct burst_table {
1057 int burstwords;
1058 u32 reg;
1059};
1060
1061static const struct burst_table burst_sizes[] = {
1062 {
1063 .burstwords = 256,
1064 .reg = (PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT) |
1065 (PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT),
1066 },
1067 {
1068 .burstwords = 128,
1069 .reg = (PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT) |
1070 (PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT),
1071 },
1072 {
1073 .burstwords = 64,
1074 .reg = (PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT) |
1075 (PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT),
1076 },
1077 {
1078 .burstwords = 32,
1079 .reg = (PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT) |
1080 (PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT),
1081 },
1082 {
1083 .burstwords = 16,
1084 .reg = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT) |
1085 (PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT),
1086 },
1087 {
1088 .burstwords = 8,
1089 .reg = (PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT) |
1090 (PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT),
1091 },
1092 {
1093 .burstwords = 4,
1094 .reg = (PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT) |
1095 (PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT),
1096 },
1097 {
1098 .burstwords = 1,
1099 .reg = (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) |
1100 (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT),
1101 },
1102};
1103
1104static int dma_set_runtime_config(struct dma_chan *chan,
1105 struct dma_slave_config *config)
1106{
1107 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1108 struct pl08x_driver_data *pl08x = plchan->host;
1109 struct pl08x_channel_data *cd = plchan->cd;
1110 enum dma_slave_buswidth addr_width;
1111 dma_addr_t addr;
1112 u32 maxburst;
1113 u32 cctl = 0;
1114 int i;
1115
1116 if (!plchan->slave)
1117 return -EINVAL;
1118
1119
1120 plchan->runtime_direction = config->direction;
1121 if (config->direction == DMA_TO_DEVICE) {
1122 addr = config->dst_addr;
1123 addr_width = config->dst_addr_width;
1124 maxburst = config->dst_maxburst;
1125 } else if (config->direction == DMA_FROM_DEVICE) {
1126 addr = config->src_addr;
1127 addr_width = config->src_addr_width;
1128 maxburst = config->src_maxburst;
1129 } else {
1130 dev_err(&pl08x->adev->dev,
1131 "bad runtime_config: alien transfer direction\n");
1132 return -EINVAL;
1133 }
1134
1135 switch (addr_width) {
1136 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1137 cctl |= (PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT) |
1138 (PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT);
1139 break;
1140 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1141 cctl |= (PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT) |
1142 (PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT);
1143 break;
1144 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1145 cctl |= (PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT) |
1146 (PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT);
1147 break;
1148 default:
1149 dev_err(&pl08x->adev->dev,
1150 "bad runtime_config: alien address width\n");
1151 return -EINVAL;
1152 }
1153
1154
1155
1156
1157
1158
1159
1160 if (plchan->cd->single || maxburst == 0) {
1161 cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) |
1162 (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT);
1163 } else {
1164 for (i = 0; i < ARRAY_SIZE(burst_sizes); i++)
1165 if (burst_sizes[i].burstwords <= maxburst)
1166 break;
1167 cctl |= burst_sizes[i].reg;
1168 }
1169
1170 plchan->runtime_addr = addr;
1171
1172
1173 cd->cctl = cctl;
1174
1175 dev_dbg(&pl08x->adev->dev,
1176 "configured channel %s (%s) for %s, data width %d, "
1177 "maxburst %d words, LE, CCTL=0x%08x\n",
1178 dma_chan_name(chan), plchan->name,
1179 (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
1180 addr_width,
1181 maxburst,
1182 cctl);
1183
1184 return 0;
1185}
1186
1187
1188
1189
1190
1191static void pl08x_issue_pending(struct dma_chan *chan)
1192{
1193 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1194 unsigned long flags;
1195
1196 spin_lock_irqsave(&plchan->lock, flags);
1197
1198 if (plchan->at || plchan->state == PL08X_CHAN_WAITING) {
1199 spin_unlock_irqrestore(&plchan->lock, flags);
1200 return;
1201 }
1202
1203
1204 if (!list_empty(&plchan->pend_list)) {
1205 struct pl08x_txd *next;
1206
1207 next = list_first_entry(&plchan->pend_list,
1208 struct pl08x_txd,
1209 node);
1210 list_del(&next->node);
1211 plchan->state = PL08X_CHAN_RUNNING;
1212
1213 pl08x_start_txd(plchan, next);
1214 }
1215
1216 spin_unlock_irqrestore(&plchan->lock, flags);
1217}
1218
1219static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
1220 struct pl08x_txd *txd)
1221{
1222 struct pl08x_driver_data *pl08x = plchan->host;
1223 unsigned long flags;
1224 int num_llis, ret;
1225
1226 num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
1227 if (!num_llis) {
1228 kfree(txd);
1229 return -EINVAL;
1230 }
1231
1232 spin_lock_irqsave(&plchan->lock, flags);
1233
1234
1235
1236
1237
1238 ret = prep_phy_channel(plchan, txd);
1239 if (ret) {
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252 if (plchan->slave) {
1253 pl08x_free_txd_list(pl08x, plchan);
1254 pl08x_free_txd(pl08x, txd);
1255 spin_unlock_irqrestore(&plchan->lock, flags);
1256 return -EBUSY;
1257 }
1258 } else
1259
1260
1261
1262
1263
1264
1265 if (plchan->state == PL08X_CHAN_IDLE)
1266 plchan->state = PL08X_CHAN_PAUSED;
1267
1268 spin_unlock_irqrestore(&plchan->lock, flags);
1269
1270 return 0;
1271}
1272
1273
1274
1275
1276
1277
1278static u32 pl08x_select_bus(struct pl08x_driver_data *pl08x, u8 src, u8 dst)
1279{
1280 u32 cctl = 0;
1281
1282 if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1)))
1283 cctl |= PL080_CONTROL_DST_AHB2;
1284 if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2)))
1285 cctl |= PL080_CONTROL_SRC_AHB2;
1286
1287 return cctl;
1288}
1289
1290static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
1291 unsigned long flags)
1292{
1293 struct pl08x_txd *txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT);
1294
1295 if (txd) {
1296 dma_async_tx_descriptor_init(&txd->tx, &plchan->chan);
1297 txd->tx.flags = flags;
1298 txd->tx.tx_submit = pl08x_tx_submit;
1299 INIT_LIST_HEAD(&txd->node);
1300
1301
1302 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK |
1303 PL080_CONFIG_TC_IRQ_MASK;
1304 }
1305 return txd;
1306}
1307
1308
1309
1310
1311static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1312 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1313 size_t len, unsigned long flags)
1314{
1315 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1316 struct pl08x_driver_data *pl08x = plchan->host;
1317 struct pl08x_txd *txd;
1318 int ret;
1319
1320 txd = pl08x_get_txd(plchan, flags);
1321 if (!txd) {
1322 dev_err(&pl08x->adev->dev,
1323 "%s no memory for descriptor\n", __func__);
1324 return NULL;
1325 }
1326
1327 txd->direction = DMA_NONE;
1328 txd->src_addr = src;
1329 txd->dst_addr = dest;
1330 txd->len = len;
1331
1332
1333 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1334 txd->cctl = pl08x->pd->memcpy_channel.cctl &
1335 ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
1336
1337
1338 txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
1339
1340 if (pl08x->vd->dualmaster)
1341 txd->cctl |= pl08x_select_bus(pl08x,
1342 pl08x->mem_buses, pl08x->mem_buses);
1343
1344 ret = pl08x_prep_channel_resources(plchan, txd);
1345 if (ret)
1346 return NULL;
1347
1348 return &txd->tx;
1349}
1350
1351static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1352 struct dma_chan *chan, struct scatterlist *sgl,
1353 unsigned int sg_len, enum dma_data_direction direction,
1354 unsigned long flags)
1355{
1356 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1357 struct pl08x_driver_data *pl08x = plchan->host;
1358 struct pl08x_txd *txd;
1359 u8 src_buses, dst_buses;
1360 int ret;
1361
1362
1363
1364
1365 if (sg_len != 1) {
1366 dev_err(&pl08x->adev->dev, "%s prepared too long sglist\n",
1367 __func__);
1368 BUG();
1369 }
1370
1371 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
1372 __func__, sgl->length, plchan->name);
1373
1374 txd = pl08x_get_txd(plchan, flags);
1375 if (!txd) {
1376 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
1377 return NULL;
1378 }
1379
1380 if (direction != plchan->runtime_direction)
1381 dev_err(&pl08x->adev->dev, "%s DMA setup does not match "
1382 "the direction configured for the PrimeCell\n",
1383 __func__);
1384
1385
1386
1387
1388
1389
1390 txd->direction = direction;
1391 txd->len = sgl->length;
1392
1393 txd->cctl = plchan->cd->cctl &
1394 ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 |
1395 PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR |
1396 PL080_CONTROL_PROT_MASK);
1397
1398
1399 txd->cctl |= PL080_CONTROL_PROT_SYS;
1400
1401 if (direction == DMA_TO_DEVICE) {
1402 txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1403 txd->cctl |= PL080_CONTROL_SRC_INCR;
1404 txd->src_addr = sgl->dma_address;
1405 if (plchan->runtime_addr)
1406 txd->dst_addr = plchan->runtime_addr;
1407 else
1408 txd->dst_addr = plchan->cd->addr;
1409 src_buses = pl08x->mem_buses;
1410 dst_buses = plchan->cd->periph_buses;
1411 } else if (direction == DMA_FROM_DEVICE) {
1412 txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1413 txd->cctl |= PL080_CONTROL_DST_INCR;
1414 if (plchan->runtime_addr)
1415 txd->src_addr = plchan->runtime_addr;
1416 else
1417 txd->src_addr = plchan->cd->addr;
1418 txd->dst_addr = sgl->dma_address;
1419 src_buses = plchan->cd->periph_buses;
1420 dst_buses = pl08x->mem_buses;
1421 } else {
1422 dev_err(&pl08x->adev->dev,
1423 "%s direction unsupported\n", __func__);
1424 return NULL;
1425 }
1426
1427 txd->cctl |= pl08x_select_bus(pl08x, src_buses, dst_buses);
1428
1429 ret = pl08x_prep_channel_resources(plchan, txd);
1430 if (ret)
1431 return NULL;
1432
1433 return &txd->tx;
1434}
1435
1436static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1437 unsigned long arg)
1438{
1439 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1440 struct pl08x_driver_data *pl08x = plchan->host;
1441 unsigned long flags;
1442 int ret = 0;
1443
1444
1445 if (cmd == DMA_SLAVE_CONFIG) {
1446 return dma_set_runtime_config(chan,
1447 (struct dma_slave_config *)arg);
1448 }
1449
1450
1451
1452
1453
1454 spin_lock_irqsave(&plchan->lock, flags);
1455 if (!plchan->phychan && !plchan->at) {
1456 spin_unlock_irqrestore(&plchan->lock, flags);
1457 return 0;
1458 }
1459
1460 switch (cmd) {
1461 case DMA_TERMINATE_ALL:
1462 plchan->state = PL08X_CHAN_IDLE;
1463
1464 if (plchan->phychan) {
1465 pl08x_terminate_phy_chan(pl08x, plchan->phychan);
1466
1467
1468
1469
1470
1471 release_phy_channel(plchan);
1472 }
1473
1474 if (plchan->at) {
1475 pl08x_free_txd(pl08x, plchan->at);
1476 plchan->at = NULL;
1477 }
1478
1479 pl08x_free_txd_list(pl08x, plchan);
1480 break;
1481 case DMA_PAUSE:
1482 pl08x_pause_phy_chan(plchan->phychan);
1483 plchan->state = PL08X_CHAN_PAUSED;
1484 break;
1485 case DMA_RESUME:
1486 pl08x_resume_phy_chan(plchan->phychan);
1487 plchan->state = PL08X_CHAN_RUNNING;
1488 break;
1489 default:
1490
1491 ret = -ENXIO;
1492 break;
1493 }
1494
1495 spin_unlock_irqrestore(&plchan->lock, flags);
1496
1497 return ret;
1498}
1499
1500bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
1501{
1502 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1503 char *name = chan_id;
1504
1505
1506 if (!strcmp(plchan->name, name))
1507 return true;
1508
1509 return false;
1510}
1511
1512
1513
1514
1515
1516
1517
1518static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
1519{
1520 u32 val;
1521
1522 val = readl(pl08x->base + PL080_CONFIG);
1523 val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE);
1524
1525 val |= PL080_CONFIG_ENABLE;
1526 writel(val, pl08x->base + PL080_CONFIG);
1527}
1528
1529static void pl08x_unmap_buffers(struct pl08x_txd *txd)
1530{
1531 struct device *dev = txd->tx.chan->device->dev;
1532
1533 if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
1534 if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
1535 dma_unmap_single(dev, txd->src_addr, txd->len,
1536 DMA_TO_DEVICE);
1537 else
1538 dma_unmap_page(dev, txd->src_addr, txd->len,
1539 DMA_TO_DEVICE);
1540 }
1541 if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
1542 if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
1543 dma_unmap_single(dev, txd->dst_addr, txd->len,
1544 DMA_FROM_DEVICE);
1545 else
1546 dma_unmap_page(dev, txd->dst_addr, txd->len,
1547 DMA_FROM_DEVICE);
1548 }
1549}
1550
1551static void pl08x_tasklet(unsigned long data)
1552{
1553 struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data;
1554 struct pl08x_driver_data *pl08x = plchan->host;
1555 struct pl08x_txd *txd;
1556 unsigned long flags;
1557
1558 spin_lock_irqsave(&plchan->lock, flags);
1559
1560 txd = plchan->at;
1561 plchan->at = NULL;
1562
1563 if (txd) {
1564
1565 plchan->lc = txd->tx.cookie;
1566 }
1567
1568
1569 if (!list_empty(&plchan->pend_list)) {
1570 struct pl08x_txd *next;
1571
1572 next = list_first_entry(&plchan->pend_list,
1573 struct pl08x_txd,
1574 node);
1575 list_del(&next->node);
1576
1577 pl08x_start_txd(plchan, next);
1578 } else if (plchan->phychan_hold) {
1579
1580
1581
1582
1583
1584 } else {
1585 struct pl08x_dma_chan *waiting = NULL;
1586
1587
1588
1589
1590
1591 release_phy_channel(plchan);
1592 plchan->state = PL08X_CHAN_IDLE;
1593
1594
1595
1596
1597
1598
1599
1600 list_for_each_entry(waiting, &pl08x->memcpy.channels,
1601 chan.device_node) {
1602 if (waiting->state == PL08X_CHAN_WAITING &&
1603 waiting->waiting != NULL) {
1604 int ret;
1605
1606
1607 ret = prep_phy_channel(waiting,
1608 waiting->waiting);
1609 BUG_ON(ret);
1610 waiting->phychan_hold--;
1611 waiting->state = PL08X_CHAN_RUNNING;
1612 waiting->waiting = NULL;
1613 pl08x_issue_pending(&waiting->chan);
1614 break;
1615 }
1616 }
1617 }
1618
1619 spin_unlock_irqrestore(&plchan->lock, flags);
1620
1621 if (txd) {
1622 dma_async_tx_callback callback = txd->tx.callback;
1623 void *callback_param = txd->tx.callback_param;
1624
1625
1626 if (!plchan->slave)
1627 pl08x_unmap_buffers(txd);
1628
1629
1630 spin_lock_irqsave(&plchan->lock, flags);
1631 pl08x_free_txd(pl08x, txd);
1632 spin_unlock_irqrestore(&plchan->lock, flags);
1633
1634
1635 if (callback)
1636 callback(callback_param);
1637 }
1638}
1639
1640static irqreturn_t pl08x_irq(int irq, void *dev)
1641{
1642 struct pl08x_driver_data *pl08x = dev;
1643 u32 mask = 0;
1644 u32 val;
1645 int i;
1646
1647 val = readl(pl08x->base + PL080_ERR_STATUS);
1648 if (val) {
1649
1650 dev_err(&pl08x->adev->dev,
1651 "%s error interrupt, register value 0x%08x\n",
1652 __func__, val);
1653
1654
1655
1656
1657
1658 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
1659 }
1660 val = readl(pl08x->base + PL080_INT_STATUS);
1661 for (i = 0; i < pl08x->vd->channels; i++) {
1662 if ((1 << i) & val) {
1663
1664 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
1665 struct pl08x_dma_chan *plchan = phychan->serving;
1666
1667
1668 tasklet_schedule(&plchan->tasklet);
1669
1670 mask |= (1 << i);
1671 }
1672 }
1673
1674 writel(mask, pl08x->base + PL080_TC_CLEAR);
1675
1676 return mask ? IRQ_HANDLED : IRQ_NONE;
1677}
1678
1679
1680
1681
1682
1683static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1684 struct dma_device *dmadev,
1685 unsigned int channels,
1686 bool slave)
1687{
1688 struct pl08x_dma_chan *chan;
1689 int i;
1690
1691 INIT_LIST_HEAD(&dmadev->channels);
1692
1693
1694
1695
1696
1697
1698 for (i = 0; i < channels; i++) {
1699 chan = kzalloc(sizeof(struct pl08x_dma_chan), GFP_KERNEL);
1700 if (!chan) {
1701 dev_err(&pl08x->adev->dev,
1702 "%s no memory for channel\n", __func__);
1703 return -ENOMEM;
1704 }
1705
1706 chan->host = pl08x;
1707 chan->state = PL08X_CHAN_IDLE;
1708
1709 if (slave) {
1710 chan->slave = true;
1711 chan->name = pl08x->pd->slave_channels[i].bus_id;
1712 chan->cd = &pl08x->pd->slave_channels[i];
1713 } else {
1714 chan->cd = &pl08x->pd->memcpy_channel;
1715 chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
1716 if (!chan->name) {
1717 kfree(chan);
1718 return -ENOMEM;
1719 }
1720 }
1721 if (chan->cd->circular_buffer) {
1722 dev_err(&pl08x->adev->dev,
1723 "channel %s: circular buffers not supported\n",
1724 chan->name);
1725 kfree(chan);
1726 continue;
1727 }
1728 dev_info(&pl08x->adev->dev,
1729 "initialize virtual channel \"%s\"\n",
1730 chan->name);
1731
1732 chan->chan.device = dmadev;
1733 chan->chan.cookie = 0;
1734 chan->lc = 0;
1735
1736 spin_lock_init(&chan->lock);
1737 INIT_LIST_HEAD(&chan->pend_list);
1738 tasklet_init(&chan->tasklet, pl08x_tasklet,
1739 (unsigned long) chan);
1740
1741 list_add_tail(&chan->chan.device_node, &dmadev->channels);
1742 }
1743 dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n",
1744 i, slave ? "slave" : "memcpy");
1745 return i;
1746}
1747
1748static void pl08x_free_virtual_channels(struct dma_device *dmadev)
1749{
1750 struct pl08x_dma_chan *chan = NULL;
1751 struct pl08x_dma_chan *next;
1752
1753 list_for_each_entry_safe(chan,
1754 next, &dmadev->channels, chan.device_node) {
1755 list_del(&chan->chan.device_node);
1756 kfree(chan);
1757 }
1758}
1759
1760#ifdef CONFIG_DEBUG_FS
1761static const char *pl08x_state_str(enum pl08x_dma_chan_state state)
1762{
1763 switch (state) {
1764 case PL08X_CHAN_IDLE:
1765 return "idle";
1766 case PL08X_CHAN_RUNNING:
1767 return "running";
1768 case PL08X_CHAN_PAUSED:
1769 return "paused";
1770 case PL08X_CHAN_WAITING:
1771 return "waiting";
1772 default:
1773 break;
1774 }
1775 return "UNKNOWN STATE";
1776}
1777
1778static int pl08x_debugfs_show(struct seq_file *s, void *data)
1779{
1780 struct pl08x_driver_data *pl08x = s->private;
1781 struct pl08x_dma_chan *chan;
1782 struct pl08x_phy_chan *ch;
1783 unsigned long flags;
1784 int i;
1785
1786 seq_printf(s, "PL08x physical channels:\n");
1787 seq_printf(s, "CHANNEL:\tUSER:\n");
1788 seq_printf(s, "--------\t-----\n");
1789 for (i = 0; i < pl08x->vd->channels; i++) {
1790 struct pl08x_dma_chan *virt_chan;
1791
1792 ch = &pl08x->phy_chans[i];
1793
1794 spin_lock_irqsave(&ch->lock, flags);
1795 virt_chan = ch->serving;
1796
1797 seq_printf(s, "%d\t\t%s\n",
1798 ch->id, virt_chan ? virt_chan->name : "(none)");
1799
1800 spin_unlock_irqrestore(&ch->lock, flags);
1801 }
1802
1803 seq_printf(s, "\nPL08x virtual memcpy channels:\n");
1804 seq_printf(s, "CHANNEL:\tSTATE:\n");
1805 seq_printf(s, "--------\t------\n");
1806 list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) {
1807 seq_printf(s, "%s\t\t%s\n", chan->name,
1808 pl08x_state_str(chan->state));
1809 }
1810
1811 seq_printf(s, "\nPL08x virtual slave channels:\n");
1812 seq_printf(s, "CHANNEL:\tSTATE:\n");
1813 seq_printf(s, "--------\t------\n");
1814 list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) {
1815 seq_printf(s, "%s\t\t%s\n", chan->name,
1816 pl08x_state_str(chan->state));
1817 }
1818
1819 return 0;
1820}
1821
1822static int pl08x_debugfs_open(struct inode *inode, struct file *file)
1823{
1824 return single_open(file, pl08x_debugfs_show, inode->i_private);
1825}
1826
1827static const struct file_operations pl08x_debugfs_operations = {
1828 .open = pl08x_debugfs_open,
1829 .read = seq_read,
1830 .llseek = seq_lseek,
1831 .release = single_release,
1832};
1833
1834static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
1835{
1836
1837 (void) debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO,
1838 NULL, pl08x,
1839 &pl08x_debugfs_operations);
1840}
1841
1842#else
1843static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
1844{
1845}
1846#endif
1847
1848static int pl08x_probe(struct amba_device *adev, struct amba_id *id)
1849{
1850 struct pl08x_driver_data *pl08x;
1851 const struct vendor_data *vd = id->data;
1852 int ret = 0;
1853 int i;
1854
1855 ret = amba_request_regions(adev, NULL);
1856 if (ret)
1857 return ret;
1858
1859
1860 pl08x = kzalloc(sizeof(struct pl08x_driver_data), GFP_KERNEL);
1861 if (!pl08x) {
1862 ret = -ENOMEM;
1863 goto out_no_pl08x;
1864 }
1865
1866
1867 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
1868 pl08x->memcpy.dev = &adev->dev;
1869 pl08x->memcpy.device_alloc_chan_resources = pl08x_alloc_chan_resources;
1870 pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources;
1871 pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy;
1872 pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
1873 pl08x->memcpy.device_tx_status = pl08x_dma_tx_status;
1874 pl08x->memcpy.device_issue_pending = pl08x_issue_pending;
1875 pl08x->memcpy.device_control = pl08x_control;
1876
1877
1878 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
1879 pl08x->slave.dev = &adev->dev;
1880 pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources;
1881 pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources;
1882 pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
1883 pl08x->slave.device_tx_status = pl08x_dma_tx_status;
1884 pl08x->slave.device_issue_pending = pl08x_issue_pending;
1885 pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
1886 pl08x->slave.device_control = pl08x_control;
1887
1888
1889 pl08x->pd = dev_get_platdata(&adev->dev);
1890 if (!pl08x->pd) {
1891 dev_err(&adev->dev, "no platform data supplied\n");
1892 goto out_no_platdata;
1893 }
1894
1895
1896 pl08x->adev = adev;
1897 pl08x->vd = vd;
1898
1899
1900 pl08x->lli_buses = PL08X_AHB1;
1901 pl08x->mem_buses = PL08X_AHB1;
1902 if (pl08x->vd->dualmaster) {
1903 pl08x->lli_buses = pl08x->pd->lli_buses;
1904 pl08x->mem_buses = pl08x->pd->mem_buses;
1905 }
1906
1907
1908 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
1909 PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0);
1910 if (!pl08x->pool) {
1911 ret = -ENOMEM;
1912 goto out_no_lli_pool;
1913 }
1914
1915 spin_lock_init(&pl08x->lock);
1916
1917 pl08x->base = ioremap(adev->res.start, resource_size(&adev->res));
1918 if (!pl08x->base) {
1919 ret = -ENOMEM;
1920 goto out_no_ioremap;
1921 }
1922
1923
1924 pl08x_ensure_on(pl08x);
1925
1926
1927 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
1928 writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
1929
1930 ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED,
1931 DRIVER_NAME, pl08x);
1932 if (ret) {
1933 dev_err(&adev->dev, "%s failed to request interrupt %d\n",
1934 __func__, adev->irq[0]);
1935 goto out_no_irq;
1936 }
1937
1938
1939 pl08x->phy_chans = kmalloc((vd->channels * sizeof(struct pl08x_phy_chan)),
1940 GFP_KERNEL);
1941 if (!pl08x->phy_chans) {
1942 dev_err(&adev->dev, "%s failed to allocate "
1943 "physical channel holders\n",
1944 __func__);
1945 goto out_no_phychans;
1946 }
1947
1948 for (i = 0; i < vd->channels; i++) {
1949 struct pl08x_phy_chan *ch = &pl08x->phy_chans[i];
1950
1951 ch->id = i;
1952 ch->base = pl08x->base + PL080_Cx_BASE(i);
1953 spin_lock_init(&ch->lock);
1954 ch->serving = NULL;
1955 ch->signal = -1;
1956 dev_info(&adev->dev,
1957 "physical channel %d is %s\n", i,
1958 pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
1959 }
1960
1961
1962 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy,
1963 pl08x->vd->channels, false);
1964 if (ret <= 0) {
1965 dev_warn(&pl08x->adev->dev,
1966 "%s failed to enumerate memcpy channels - %d\n",
1967 __func__, ret);
1968 goto out_no_memcpy;
1969 }
1970 pl08x->memcpy.chancnt = ret;
1971
1972
1973 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
1974 pl08x->pd->num_slave_channels,
1975 true);
1976 if (ret <= 0) {
1977 dev_warn(&pl08x->adev->dev,
1978 "%s failed to enumerate slave channels - %d\n",
1979 __func__, ret);
1980 goto out_no_slave;
1981 }
1982 pl08x->slave.chancnt = ret;
1983
1984 ret = dma_async_device_register(&pl08x->memcpy);
1985 if (ret) {
1986 dev_warn(&pl08x->adev->dev,
1987 "%s failed to register memcpy as an async device - %d\n",
1988 __func__, ret);
1989 goto out_no_memcpy_reg;
1990 }
1991
1992 ret = dma_async_device_register(&pl08x->slave);
1993 if (ret) {
1994 dev_warn(&pl08x->adev->dev,
1995 "%s failed to register slave as an async device - %d\n",
1996 __func__, ret);
1997 goto out_no_slave_reg;
1998 }
1999
2000 amba_set_drvdata(adev, pl08x);
2001 init_pl08x_debugfs(pl08x);
2002 dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
2003 amba_part(adev), amba_rev(adev),
2004 (unsigned long long)adev->res.start, adev->irq[0]);
2005 return 0;
2006
2007out_no_slave_reg:
2008 dma_async_device_unregister(&pl08x->memcpy);
2009out_no_memcpy_reg:
2010 pl08x_free_virtual_channels(&pl08x->slave);
2011out_no_slave:
2012 pl08x_free_virtual_channels(&pl08x->memcpy);
2013out_no_memcpy:
2014 kfree(pl08x->phy_chans);
2015out_no_phychans:
2016 free_irq(adev->irq[0], pl08x);
2017out_no_irq:
2018 iounmap(pl08x->base);
2019out_no_ioremap:
2020 dma_pool_destroy(pl08x->pool);
2021out_no_lli_pool:
2022out_no_platdata:
2023 kfree(pl08x);
2024out_no_pl08x:
2025 amba_release_regions(adev);
2026 return ret;
2027}
2028
2029
2030static struct vendor_data vendor_pl080 = {
2031 .channels = 8,
2032 .dualmaster = true,
2033};
2034
2035static struct vendor_data vendor_pl081 = {
2036 .channels = 2,
2037 .dualmaster = false,
2038};
2039
2040static struct amba_id pl08x_ids[] = {
2041
2042 {
2043 .id = 0x00041080,
2044 .mask = 0x000fffff,
2045 .data = &vendor_pl080,
2046 },
2047
2048 {
2049 .id = 0x00041081,
2050 .mask = 0x000fffff,
2051 .data = &vendor_pl081,
2052 },
2053
2054 {
2055 .id = 0x00280880,
2056 .mask = 0x00ffffff,
2057 .data = &vendor_pl080,
2058 },
2059 { 0, 0 },
2060};
2061
2062static struct amba_driver pl08x_amba_driver = {
2063 .drv.name = DRIVER_NAME,
2064 .id_table = pl08x_ids,
2065 .probe = pl08x_probe,
2066};
2067
2068static int __init pl08x_init(void)
2069{
2070 int retval;
2071 retval = amba_driver_register(&pl08x_amba_driver);
2072 if (retval)
2073 printk(KERN_WARNING DRIVER_NAME
2074 "failed to register as an AMBA device (%d)\n",
2075 retval);
2076 return retval;
2077}
2078subsys_initcall(pl08x_init);
2079