1
2
3
4
5
6
7
8
9
10
11#include <linux/bitmap.h>
12#include <linux/bitops.h>
13#include <linux/clk.h>
14#include <linux/dmaengine.h>
15#include <linux/dmapool.h>
16#include <linux/interrupt.h>
17#include <linux/module.h>
18#include <linux/of_dma.h>
19#include <linux/platform_device.h>
20#include <linux/slab.h>
21#include <linux/spinlock.h>
22
23#include "virt-dma.h"
24
25
26
27#define SUN4I_DMA_CFG_LOADING BIT(31)
28#define SUN4I_DMA_CFG_DST_DATA_WIDTH(width) ((width) << 25)
29#define SUN4I_DMA_CFG_DST_BURST_LENGTH(len) ((len) << 23)
30#define SUN4I_DMA_CFG_DST_ADDR_MODE(mode) ((mode) << 21)
31#define SUN4I_DMA_CFG_DST_DRQ_TYPE(type) ((type) << 16)
32#define SUN4I_DMA_CFG_SRC_DATA_WIDTH(width) ((width) << 9)
33#define SUN4I_DMA_CFG_SRC_BURST_LENGTH(len) ((len) << 7)
34#define SUN4I_DMA_CFG_SRC_ADDR_MODE(mode) ((mode) << 5)
35#define SUN4I_DMA_CFG_SRC_DRQ_TYPE(type) (type)
36
37
38
39
40#define SUN4I_NDMA_DRQ_TYPE_SDRAM 0x16
41#define SUN4I_NDMA_DRQ_TYPE_LIMIT (0x1F + 1)
42
43
44
45
46#define SUN4I_NDMA_ADDR_MODE_LINEAR 0
47#define SUN4I_NDMA_ADDR_MODE_IO 1
48
49
50#define SUN4I_NDMA_CFG_CONT_MODE BIT(30)
51#define SUN4I_NDMA_CFG_WAIT_STATE(n) ((n) << 27)
52#define SUN4I_NDMA_CFG_DST_NON_SECURE BIT(22)
53#define SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN BIT(15)
54#define SUN4I_NDMA_CFG_SRC_NON_SECURE BIT(6)
55
56
57
58
59#define SUN4I_DDMA_ADDR_MODE_LINEAR 0
60#define SUN4I_DDMA_ADDR_MODE_IO 1
61#define SUN4I_DDMA_ADDR_MODE_HORIZONTAL_PAGE 2
62#define SUN4I_DDMA_ADDR_MODE_VERTICAL_PAGE 3
63
64
65#define SUN4I_DDMA_DRQ_TYPE_SDRAM 0x1
66#define SUN4I_DDMA_DRQ_TYPE_LIMIT (0x1F + 1)
67
68
69
70
71#define SUN4I_DDMA_CFG_BUSY BIT(30)
72#define SUN4I_DDMA_CFG_CONT_MODE BIT(29)
73#define SUN4I_DDMA_CFG_DST_NON_SECURE BIT(28)
74#define SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN BIT(15)
75#define SUN4I_DDMA_CFG_SRC_NON_SECURE BIT(12)
76
77
78#define SUN4I_DDMA_PARA_DST_DATA_BLK_SIZE(n) (((n) - 1) << 24)
79#define SUN4I_DDMA_PARA_DST_WAIT_CYCLES(n) (((n) - 1) << 16)
80#define SUN4I_DDMA_PARA_SRC_DATA_BLK_SIZE(n) (((n) - 1) << 8)
81#define SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(n) (((n) - 1) << 0)
82
83
84
85
86#define SUN4I_DMA_IRQ_ENABLE_REG 0x0
87#define SUN4I_DMA_IRQ_PENDING_STATUS_REG 0x4
88
89
90#define SUN4I_NDMA_CHANNEL_REG_BASE(n) (0x100 + (n) * 0x20)
91#define SUN4I_NDMA_CFG_REG 0x0
92#define SUN4I_NDMA_SRC_ADDR_REG 0x4
93#define SUN4I_NDMA_DST_ADDR_REG 0x8
94#define SUN4I_NDMA_BYTE_COUNT_REG 0xC
95
96
97#define SUN4I_DDMA_CHANNEL_REG_BASE(n) (0x300 + (n) * 0x20)
98#define SUN4I_DDMA_CFG_REG 0x0
99#define SUN4I_DDMA_SRC_ADDR_REG 0x4
100#define SUN4I_DDMA_DST_ADDR_REG 0x8
101#define SUN4I_DDMA_BYTE_COUNT_REG 0xC
102#define SUN4I_DDMA_PARA_REG 0x18
103
104
105
106
107
108
109
110
111
112#define SUN4I_NDMA_NR_MAX_CHANNELS 8
113#define SUN4I_DDMA_NR_MAX_CHANNELS 8
114#define SUN4I_DMA_NR_MAX_CHANNELS \
115 (SUN4I_NDMA_NR_MAX_CHANNELS + SUN4I_DDMA_NR_MAX_CHANNELS)
116#define SUN4I_NDMA_NR_MAX_VCHANS (29 * 2 - 1)
117#define SUN4I_DDMA_NR_MAX_VCHANS 21
118#define SUN4I_DMA_NR_MAX_VCHANS \
119 (SUN4I_NDMA_NR_MAX_VCHANS + SUN4I_DDMA_NR_MAX_VCHANS)
120
121
122
123#define SUN4I_DDMA_MAGIC_SPI_PARAMETERS \
124 (SUN4I_DDMA_PARA_DST_DATA_BLK_SIZE(1) | \
125 SUN4I_DDMA_PARA_SRC_DATA_BLK_SIZE(1) | \
126 SUN4I_DDMA_PARA_DST_WAIT_CYCLES(2) | \
127 SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(2))
128
129struct sun4i_dma_pchan {
130
131 void __iomem *base;
132
133 struct sun4i_dma_vchan *vchan;
134
135 int is_dedicated;
136};
137
138struct sun4i_dma_vchan {
139 struct virt_dma_chan vc;
140 struct dma_slave_config cfg;
141 struct sun4i_dma_pchan *pchan;
142 struct sun4i_dma_promise *processing;
143 struct sun4i_dma_contract *contract;
144 u8 endpoint;
145 int is_dedicated;
146};
147
148struct sun4i_dma_promise {
149 u32 cfg;
150 u32 para;
151 dma_addr_t src;
152 dma_addr_t dst;
153 size_t len;
154 struct list_head list;
155};
156
157
158struct sun4i_dma_contract {
159 struct virt_dma_desc vd;
160 struct list_head demands;
161 struct list_head completed_demands;
162 int is_cyclic;
163};
164
165struct sun4i_dma_dev {
166 DECLARE_BITMAP(pchans_used, SUN4I_DMA_NR_MAX_CHANNELS);
167 struct dma_device slave;
168 struct sun4i_dma_pchan *pchans;
169 struct sun4i_dma_vchan *vchans;
170 void __iomem *base;
171 struct clk *clk;
172 int irq;
173 spinlock_t lock;
174};
175
176static struct sun4i_dma_dev *to_sun4i_dma_dev(struct dma_device *dev)
177{
178 return container_of(dev, struct sun4i_dma_dev, slave);
179}
180
181static struct sun4i_dma_vchan *to_sun4i_dma_vchan(struct dma_chan *chan)
182{
183 return container_of(chan, struct sun4i_dma_vchan, vc.chan);
184}
185
186static struct sun4i_dma_contract *to_sun4i_dma_contract(struct virt_dma_desc *vd)
187{
188 return container_of(vd, struct sun4i_dma_contract, vd);
189}
190
191static struct device *chan2dev(struct dma_chan *chan)
192{
193 return &chan->dev->device;
194}
195
196static int convert_burst(u32 maxburst)
197{
198 if (maxburst > 8)
199 return -EINVAL;
200
201
202 return (maxburst >> 2);
203}
204
205static int convert_buswidth(enum dma_slave_buswidth addr_width)
206{
207 if (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)
208 return -EINVAL;
209
210
211 return (addr_width >> 1);
212}
213
214static void sun4i_dma_free_chan_resources(struct dma_chan *chan)
215{
216 struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
217
218 vchan_free_chan_resources(&vchan->vc);
219}
220
221static struct sun4i_dma_pchan *find_and_use_pchan(struct sun4i_dma_dev *priv,
222 struct sun4i_dma_vchan *vchan)
223{
224 struct sun4i_dma_pchan *pchan = NULL, *pchans = priv->pchans;
225 unsigned long flags;
226 int i, max;
227
228
229
230
231
232 if (vchan->is_dedicated) {
233 i = SUN4I_NDMA_NR_MAX_CHANNELS;
234 max = SUN4I_DMA_NR_MAX_CHANNELS;
235 } else {
236 i = 0;
237 max = SUN4I_NDMA_NR_MAX_CHANNELS;
238 }
239
240 spin_lock_irqsave(&priv->lock, flags);
241 for_each_clear_bit_from(i, &priv->pchans_used, max) {
242 pchan = &pchans[i];
243 pchan->vchan = vchan;
244 set_bit(i, priv->pchans_used);
245 break;
246 }
247 spin_unlock_irqrestore(&priv->lock, flags);
248
249 return pchan;
250}
251
252static void release_pchan(struct sun4i_dma_dev *priv,
253 struct sun4i_dma_pchan *pchan)
254{
255 unsigned long flags;
256 int nr = pchan - priv->pchans;
257
258 spin_lock_irqsave(&priv->lock, flags);
259
260 pchan->vchan = NULL;
261 clear_bit(nr, priv->pchans_used);
262
263 spin_unlock_irqrestore(&priv->lock, flags);
264}
265
266static void configure_pchan(struct sun4i_dma_pchan *pchan,
267 struct sun4i_dma_promise *d)
268{
269
270
271
272
273 if (pchan->is_dedicated) {
274 writel_relaxed(d->src, pchan->base + SUN4I_DDMA_SRC_ADDR_REG);
275 writel_relaxed(d->dst, pchan->base + SUN4I_DDMA_DST_ADDR_REG);
276 writel_relaxed(d->len, pchan->base + SUN4I_DDMA_BYTE_COUNT_REG);
277 writel_relaxed(d->para, pchan->base + SUN4I_DDMA_PARA_REG);
278 writel_relaxed(d->cfg, pchan->base + SUN4I_DDMA_CFG_REG);
279 } else {
280 writel_relaxed(d->src, pchan->base + SUN4I_NDMA_SRC_ADDR_REG);
281 writel_relaxed(d->dst, pchan->base + SUN4I_NDMA_DST_ADDR_REG);
282 writel_relaxed(d->len, pchan->base + SUN4I_NDMA_BYTE_COUNT_REG);
283 writel_relaxed(d->cfg, pchan->base + SUN4I_NDMA_CFG_REG);
284 }
285}
286
287static void set_pchan_interrupt(struct sun4i_dma_dev *priv,
288 struct sun4i_dma_pchan *pchan,
289 int half, int end)
290{
291 u32 reg;
292 int pchan_number = pchan - priv->pchans;
293 unsigned long flags;
294
295 spin_lock_irqsave(&priv->lock, flags);
296
297 reg = readl_relaxed(priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
298
299 if (half)
300 reg |= BIT(pchan_number * 2);
301 else
302 reg &= ~BIT(pchan_number * 2);
303
304 if (end)
305 reg |= BIT(pchan_number * 2 + 1);
306 else
307 reg &= ~BIT(pchan_number * 2 + 1);
308
309 writel_relaxed(reg, priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
310
311 spin_unlock_irqrestore(&priv->lock, flags);
312}
313
314
315
316
317
318
319
320
321
322
323static int __execute_vchan_pending(struct sun4i_dma_dev *priv,
324 struct sun4i_dma_vchan *vchan)
325{
326 struct sun4i_dma_promise *promise = NULL;
327 struct sun4i_dma_contract *contract = NULL;
328 struct sun4i_dma_pchan *pchan;
329 struct virt_dma_desc *vd;
330 int ret;
331
332 lockdep_assert_held(&vchan->vc.lock);
333
334
335 pchan = find_and_use_pchan(priv, vchan);
336 if (!pchan)
337 return -EBUSY;
338
339
340
341
342
343 if (vchan->processing) {
344 dev_dbg(chan2dev(&vchan->vc.chan),
345 "processing something to this endpoint already\n");
346 ret = -EBUSY;
347 goto release_pchan;
348 }
349
350 do {
351
352 vd = vchan_next_desc(&vchan->vc);
353 if (!vd) {
354 dev_dbg(chan2dev(&vchan->vc.chan),
355 "No pending contract found");
356 ret = 0;
357 goto release_pchan;
358 }
359
360 contract = to_sun4i_dma_contract(vd);
361 if (list_empty(&contract->demands)) {
362
363 list_del(&contract->vd.node);
364 vchan_cookie_complete(&contract->vd);
365 dev_dbg(chan2dev(&vchan->vc.chan),
366 "Empty contract found and marked complete");
367 }
368 } while (list_empty(&contract->demands));
369
370
371 promise = list_first_entry(&contract->demands,
372 struct sun4i_dma_promise, list);
373 vchan->processing = promise;
374
375
376 if (promise) {
377 vchan->contract = contract;
378 vchan->pchan = pchan;
379 set_pchan_interrupt(priv, pchan, contract->is_cyclic, 1);
380 configure_pchan(pchan, promise);
381 }
382
383 return 0;
384
385release_pchan:
386 release_pchan(priv, pchan);
387 return ret;
388}
389
390static int sanitize_config(struct dma_slave_config *sconfig,
391 enum dma_transfer_direction direction)
392{
393 switch (direction) {
394 case DMA_MEM_TO_DEV:
395 if ((sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) ||
396 !sconfig->dst_maxburst)
397 return -EINVAL;
398
399 if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
400 sconfig->src_addr_width = sconfig->dst_addr_width;
401
402 if (!sconfig->src_maxburst)
403 sconfig->src_maxburst = sconfig->dst_maxburst;
404
405 break;
406
407 case DMA_DEV_TO_MEM:
408 if ((sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) ||
409 !sconfig->src_maxburst)
410 return -EINVAL;
411
412 if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
413 sconfig->dst_addr_width = sconfig->src_addr_width;
414
415 if (!sconfig->dst_maxburst)
416 sconfig->dst_maxburst = sconfig->src_maxburst;
417
418 break;
419 default:
420 return 0;
421 }
422
423 return 0;
424}
425
426
427
428
429
430
431
432
433
434
435static struct sun4i_dma_promise *
436generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
437 size_t len, struct dma_slave_config *sconfig,
438 enum dma_transfer_direction direction)
439{
440 struct sun4i_dma_promise *promise;
441 int ret;
442
443 ret = sanitize_config(sconfig, direction);
444 if (ret)
445 return NULL;
446
447 promise = kzalloc(sizeof(*promise), GFP_NOWAIT);
448 if (!promise)
449 return NULL;
450
451 promise->src = src;
452 promise->dst = dest;
453 promise->len = len;
454 promise->cfg = SUN4I_DMA_CFG_LOADING |
455 SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN;
456
457 dev_dbg(chan2dev(chan),
458 "src burst %d, dst burst %d, src buswidth %d, dst buswidth %d",
459 sconfig->src_maxburst, sconfig->dst_maxburst,
460 sconfig->src_addr_width, sconfig->dst_addr_width);
461
462
463 ret = convert_burst(sconfig->src_maxburst);
464 if (ret < 0)
465 goto fail;
466 promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
467
468
469 ret = convert_burst(sconfig->dst_maxburst);
470 if (ret < 0)
471 goto fail;
472 promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
473
474
475 ret = convert_buswidth(sconfig->src_addr_width);
476 if (ret < 0)
477 goto fail;
478 promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
479
480
481 ret = convert_buswidth(sconfig->dst_addr_width);
482 if (ret < 0)
483 goto fail;
484 promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
485
486 return promise;
487
488fail:
489 kfree(promise);
490 return NULL;
491}
492
493
494
495
496
497
498
499
500
501
502static struct sun4i_dma_promise *
503generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
504 size_t len, struct dma_slave_config *sconfig)
505{
506 struct sun4i_dma_promise *promise;
507 int ret;
508
509 promise = kzalloc(sizeof(*promise), GFP_NOWAIT);
510 if (!promise)
511 return NULL;
512
513 promise->src = src;
514 promise->dst = dest;
515 promise->len = len;
516 promise->cfg = SUN4I_DMA_CFG_LOADING |
517 SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN;
518
519
520 ret = convert_burst(sconfig->src_maxburst);
521 if (ret < 0)
522 goto fail;
523 promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
524
525
526 ret = convert_burst(sconfig->dst_maxburst);
527 if (ret < 0)
528 goto fail;
529 promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
530
531
532 ret = convert_buswidth(sconfig->src_addr_width);
533 if (ret < 0)
534 goto fail;
535 promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
536
537
538 ret = convert_buswidth(sconfig->dst_addr_width);
539 if (ret < 0)
540 goto fail;
541 promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
542
543 return promise;
544
545fail:
546 kfree(promise);
547 return NULL;
548}
549
550
551
552
553
554
555
556
557
558static struct sun4i_dma_contract *generate_dma_contract(void)
559{
560 struct sun4i_dma_contract *contract;
561
562 contract = kzalloc(sizeof(*contract), GFP_NOWAIT);
563 if (!contract)
564 return NULL;
565
566 INIT_LIST_HEAD(&contract->demands);
567 INIT_LIST_HEAD(&contract->completed_demands);
568
569 return contract;
570}
571
572
573
574
575
576
577
578
579static struct sun4i_dma_promise *
580get_next_cyclic_promise(struct sun4i_dma_contract *contract)
581{
582 struct sun4i_dma_promise *promise;
583
584 promise = list_first_entry_or_null(&contract->demands,
585 struct sun4i_dma_promise, list);
586 if (!promise) {
587 list_splice_init(&contract->completed_demands,
588 &contract->demands);
589 promise = list_first_entry(&contract->demands,
590 struct sun4i_dma_promise, list);
591 }
592
593 return promise;
594}
595
596
597
598
599static void sun4i_dma_free_contract(struct virt_dma_desc *vd)
600{
601 struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd);
602 struct sun4i_dma_promise *promise, *tmp;
603
604
605 list_for_each_entry_safe(promise, tmp, &contract->demands, list)
606 kfree(promise);
607
608 list_for_each_entry_safe(promise, tmp, &contract->completed_demands, list)
609 kfree(promise);
610
611 kfree(contract);
612}
613
614static struct dma_async_tx_descriptor *
615sun4i_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
616 dma_addr_t src, size_t len, unsigned long flags)
617{
618 struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
619 struct dma_slave_config *sconfig = &vchan->cfg;
620 struct sun4i_dma_promise *promise;
621 struct sun4i_dma_contract *contract;
622
623 contract = generate_dma_contract();
624 if (!contract)
625 return NULL;
626
627
628
629
630
631
632 sconfig->src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
633 sconfig->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
634 sconfig->src_maxburst = 8;
635 sconfig->dst_maxburst = 8;
636
637 if (vchan->is_dedicated)
638 promise = generate_ddma_promise(chan, src, dest, len, sconfig);
639 else
640 promise = generate_ndma_promise(chan, src, dest, len, sconfig,
641 DMA_MEM_TO_MEM);
642
643 if (!promise) {
644 kfree(contract);
645 return NULL;
646 }
647
648
649 if (vchan->is_dedicated) {
650 promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM) |
651 SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM);
652 } else {
653 promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) |
654 SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM);
655 }
656
657
658 list_add_tail(&promise->list, &contract->demands);
659
660
661 return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
662}
663
664static struct dma_async_tx_descriptor *
665sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
666 size_t period_len, enum dma_transfer_direction dir,
667 unsigned long flags)
668{
669 struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
670 struct dma_slave_config *sconfig = &vchan->cfg;
671 struct sun4i_dma_promise *promise;
672 struct sun4i_dma_contract *contract;
673 dma_addr_t src, dest;
674 u32 endpoints;
675 int nr_periods, offset, plength, i;
676
677 if (!is_slave_direction(dir)) {
678 dev_err(chan2dev(chan), "Invalid DMA direction\n");
679 return NULL;
680 }
681
682 if (vchan->is_dedicated) {
683
684
685
686
687
688
689 dev_err(chan2dev(chan),
690 "Cyclic transfers are only supported on Normal DMA\n");
691 return NULL;
692 }
693
694 contract = generate_dma_contract();
695 if (!contract)
696 return NULL;
697
698 contract->is_cyclic = 1;
699
700
701 if (dir == DMA_MEM_TO_DEV) {
702 src = buf;
703 dest = sconfig->dst_addr;
704 endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) |
705 SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
706 SUN4I_DMA_CFG_DST_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO);
707 } else {
708 src = sconfig->src_addr;
709 dest = buf;
710 endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
711 SUN4I_DMA_CFG_SRC_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO) |
712 SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM);
713 }
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743 nr_periods = DIV_ROUND_UP(len / period_len, 2);
744 for (i = 0; i < nr_periods; i++) {
745
746 offset = i * period_len * 2;
747 plength = min((len - offset), (period_len * 2));
748 if (dir == DMA_MEM_TO_DEV)
749 src = buf + offset;
750 else
751 dest = buf + offset;
752
753
754 promise = generate_ndma_promise(chan, src, dest,
755 plength, sconfig, dir);
756 if (!promise) {
757
758 return NULL;
759 }
760 promise->cfg |= endpoints;
761
762
763 list_add_tail(&promise->list, &contract->demands);
764 }
765
766
767 return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
768}
769
770static struct dma_async_tx_descriptor *
771sun4i_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
772 unsigned int sg_len, enum dma_transfer_direction dir,
773 unsigned long flags, void *context)
774{
775 struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
776 struct dma_slave_config *sconfig = &vchan->cfg;
777 struct sun4i_dma_promise *promise;
778 struct sun4i_dma_contract *contract;
779 u8 ram_type, io_mode, linear_mode;
780 struct scatterlist *sg;
781 dma_addr_t srcaddr, dstaddr;
782 u32 endpoints, para;
783 int i;
784
785 if (!sgl)
786 return NULL;
787
788 if (!is_slave_direction(dir)) {
789 dev_err(chan2dev(chan), "Invalid DMA direction\n");
790 return NULL;
791 }
792
793 contract = generate_dma_contract();
794 if (!contract)
795 return NULL;
796
797 if (vchan->is_dedicated) {
798 io_mode = SUN4I_DDMA_ADDR_MODE_IO;
799 linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
800 ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM;
801 } else {
802 io_mode = SUN4I_NDMA_ADDR_MODE_IO;
803 linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
804 ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM;
805 }
806
807 if (dir == DMA_MEM_TO_DEV)
808 endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
809 SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) |
810 SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type) |
811 SUN4I_DMA_CFG_SRC_ADDR_MODE(linear_mode);
812 else
813 endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) |
814 SUN4I_DMA_CFG_DST_ADDR_MODE(linear_mode) |
815 SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
816 SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode);
817
818 for_each_sg(sgl, sg, sg_len, i) {
819
820 if (dir == DMA_MEM_TO_DEV) {
821 srcaddr = sg_dma_address(sg);
822 dstaddr = sconfig->dst_addr;
823 } else {
824 srcaddr = sconfig->src_addr;
825 dstaddr = sg_dma_address(sg);
826 }
827
828
829
830
831
832
833
834
835
836
837 para = SUN4I_DDMA_MAGIC_SPI_PARAMETERS;
838
839
840 if (vchan->is_dedicated)
841 promise = generate_ddma_promise(chan, srcaddr, dstaddr,
842 sg_dma_len(sg),
843 sconfig);
844 else
845 promise = generate_ndma_promise(chan, srcaddr, dstaddr,
846 sg_dma_len(sg),
847 sconfig, dir);
848
849 if (!promise)
850 return NULL;
851
852 promise->cfg |= endpoints;
853 promise->para = para;
854
855
856 list_add_tail(&promise->list, &contract->demands);
857 }
858
859
860
861
862
863 return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
864}
865
866static int sun4i_dma_terminate_all(struct dma_chan *chan)
867{
868 struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
869 struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
870 struct sun4i_dma_pchan *pchan = vchan->pchan;
871 LIST_HEAD(head);
872 unsigned long flags;
873
874 spin_lock_irqsave(&vchan->vc.lock, flags);
875 vchan_get_all_descriptors(&vchan->vc, &head);
876 spin_unlock_irqrestore(&vchan->vc.lock, flags);
877
878
879
880
881
882 if (pchan) {
883 if (pchan->is_dedicated)
884 writel(0, pchan->base + SUN4I_DDMA_CFG_REG);
885 else
886 writel(0, pchan->base + SUN4I_NDMA_CFG_REG);
887 set_pchan_interrupt(priv, pchan, 0, 0);
888 release_pchan(priv, pchan);
889 }
890
891 spin_lock_irqsave(&vchan->vc.lock, flags);
892 vchan_dma_desc_free_list(&vchan->vc, &head);
893
894 vchan->processing = NULL;
895 vchan->pchan = NULL;
896 spin_unlock_irqrestore(&vchan->vc.lock, flags);
897
898 return 0;
899}
900
901static int sun4i_dma_config(struct dma_chan *chan,
902 struct dma_slave_config *config)
903{
904 struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
905
906 memcpy(&vchan->cfg, config, sizeof(*config));
907
908 return 0;
909}
910
911static struct dma_chan *sun4i_dma_of_xlate(struct of_phandle_args *dma_spec,
912 struct of_dma *ofdma)
913{
914 struct sun4i_dma_dev *priv = ofdma->of_dma_data;
915 struct sun4i_dma_vchan *vchan;
916 struct dma_chan *chan;
917 u8 is_dedicated = dma_spec->args[0];
918 u8 endpoint = dma_spec->args[1];
919
920
921 if (is_dedicated != 0 && is_dedicated != 1)
922 return NULL;
923
924
925 if ((is_dedicated && endpoint >= SUN4I_DDMA_DRQ_TYPE_LIMIT) ||
926 (!is_dedicated && endpoint >= SUN4I_NDMA_DRQ_TYPE_LIMIT))
927 return NULL;
928
929 chan = dma_get_any_slave_channel(&priv->slave);
930 if (!chan)
931 return NULL;
932
933
934 vchan = to_sun4i_dma_vchan(chan);
935 vchan->is_dedicated = is_dedicated;
936 vchan->endpoint = endpoint;
937
938 return chan;
939}
940
941static enum dma_status sun4i_dma_tx_status(struct dma_chan *chan,
942 dma_cookie_t cookie,
943 struct dma_tx_state *state)
944{
945 struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
946 struct sun4i_dma_pchan *pchan = vchan->pchan;
947 struct sun4i_dma_contract *contract;
948 struct sun4i_dma_promise *promise;
949 struct virt_dma_desc *vd;
950 unsigned long flags;
951 enum dma_status ret;
952 size_t bytes = 0;
953
954 ret = dma_cookie_status(chan, cookie, state);
955 if (!state || (ret == DMA_COMPLETE))
956 return ret;
957
958 spin_lock_irqsave(&vchan->vc.lock, flags);
959 vd = vchan_find_desc(&vchan->vc, cookie);
960 if (!vd)
961 goto exit;
962 contract = to_sun4i_dma_contract(vd);
963
964 list_for_each_entry(promise, &contract->demands, list)
965 bytes += promise->len;
966
967
968
969
970
971
972 promise = list_first_entry_or_null(&contract->demands,
973 struct sun4i_dma_promise, list);
974 if (promise && pchan) {
975 bytes -= promise->len;
976 if (pchan->is_dedicated)
977 bytes += readl(pchan->base + SUN4I_DDMA_BYTE_COUNT_REG);
978 else
979 bytes += readl(pchan->base + SUN4I_NDMA_BYTE_COUNT_REG);
980 }
981
982exit:
983
984 dma_set_residue(state, bytes);
985 spin_unlock_irqrestore(&vchan->vc.lock, flags);
986
987 return ret;
988}
989
990static void sun4i_dma_issue_pending(struct dma_chan *chan)
991{
992 struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
993 struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
994 unsigned long flags;
995
996 spin_lock_irqsave(&vchan->vc.lock, flags);
997
998
999
1000
1001
1002 if (vchan_issue_pending(&vchan->vc))
1003 __execute_vchan_pending(priv, vchan);
1004
1005 spin_unlock_irqrestore(&vchan->vc.lock, flags);
1006}
1007
1008static irqreturn_t sun4i_dma_interrupt(int irq, void *dev_id)
1009{
1010 struct sun4i_dma_dev *priv = dev_id;
1011 struct sun4i_dma_pchan *pchans = priv->pchans, *pchan;
1012 struct sun4i_dma_vchan *vchan;
1013 struct sun4i_dma_contract *contract;
1014 struct sun4i_dma_promise *promise;
1015 unsigned long pendirq, irqs, disableirqs;
1016 int bit, i, free_room, allow_mitigation = 1;
1017
1018 pendirq = readl_relaxed(priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
1019
1020handle_pending:
1021
1022 disableirqs = 0;
1023 free_room = 0;
1024
1025 for_each_set_bit(bit, &pendirq, 32) {
1026 pchan = &pchans[bit >> 1];
1027 vchan = pchan->vchan;
1028 if (!vchan)
1029 continue;
1030 contract = vchan->contract;
1031
1032
1033
1034
1035
1036 if (bit & 1) {
1037 spin_lock(&vchan->vc.lock);
1038
1039
1040
1041
1042
1043 list_del(&vchan->processing->list);
1044 list_add_tail(&vchan->processing->list,
1045 &contract->completed_demands);
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060 if (contract->is_cyclic) {
1061 promise = get_next_cyclic_promise(contract);
1062 vchan->processing = promise;
1063 configure_pchan(pchan, promise);
1064 vchan_cyclic_callback(&contract->vd);
1065 } else {
1066 vchan->processing = NULL;
1067 vchan->pchan = NULL;
1068
1069 free_room = 1;
1070 disableirqs |= BIT(bit);
1071 release_pchan(priv, pchan);
1072 }
1073
1074 spin_unlock(&vchan->vc.lock);
1075 } else {
1076
1077 if (contract->is_cyclic)
1078 vchan_cyclic_callback(&contract->vd);
1079 else
1080 disableirqs |= BIT(bit);
1081 }
1082 }
1083
1084
1085 spin_lock(&priv->lock);
1086 irqs = readl_relaxed(priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
1087 writel_relaxed(irqs & ~disableirqs,
1088 priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
1089 spin_unlock(&priv->lock);
1090
1091
1092 writel_relaxed(pendirq, priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
1093
1094
1095
1096
1097
1098 if (free_room) {
1099 for (i = 0; i < SUN4I_DMA_NR_MAX_VCHANS; i++) {
1100 vchan = &priv->vchans[i];
1101 spin_lock(&vchan->vc.lock);
1102 __execute_vchan_pending(priv, vchan);
1103 spin_unlock(&vchan->vc.lock);
1104 }
1105 }
1106
1107
1108
1109
1110
1111 if (allow_mitigation) {
1112 pendirq = readl_relaxed(priv->base +
1113 SUN4I_DMA_IRQ_PENDING_STATUS_REG);
1114 if (pendirq) {
1115 allow_mitigation = 0;
1116 goto handle_pending;
1117 }
1118 }
1119
1120 return IRQ_HANDLED;
1121}
1122
1123static int sun4i_dma_probe(struct platform_device *pdev)
1124{
1125 struct sun4i_dma_dev *priv;
1126 struct resource *res;
1127 int i, j, ret;
1128
1129 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
1130 if (!priv)
1131 return -ENOMEM;
1132
1133 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1134 priv->base = devm_ioremap_resource(&pdev->dev, res);
1135 if (IS_ERR(priv->base))
1136 return PTR_ERR(priv->base);
1137
1138 priv->irq = platform_get_irq(pdev, 0);
1139 if (priv->irq < 0) {
1140 dev_err(&pdev->dev, "Cannot claim IRQ\n");
1141 return priv->irq;
1142 }
1143
1144 priv->clk = devm_clk_get(&pdev->dev, NULL);
1145 if (IS_ERR(priv->clk)) {
1146 dev_err(&pdev->dev, "No clock specified\n");
1147 return PTR_ERR(priv->clk);
1148 }
1149
1150 platform_set_drvdata(pdev, priv);
1151 spin_lock_init(&priv->lock);
1152
1153 dma_cap_zero(priv->slave.cap_mask);
1154 dma_cap_set(DMA_PRIVATE, priv->slave.cap_mask);
1155 dma_cap_set(DMA_MEMCPY, priv->slave.cap_mask);
1156 dma_cap_set(DMA_CYCLIC, priv->slave.cap_mask);
1157 dma_cap_set(DMA_SLAVE, priv->slave.cap_mask);
1158
1159 INIT_LIST_HEAD(&priv->slave.channels);
1160 priv->slave.device_free_chan_resources = sun4i_dma_free_chan_resources;
1161 priv->slave.device_tx_status = sun4i_dma_tx_status;
1162 priv->slave.device_issue_pending = sun4i_dma_issue_pending;
1163 priv->slave.device_prep_slave_sg = sun4i_dma_prep_slave_sg;
1164 priv->slave.device_prep_dma_memcpy = sun4i_dma_prep_dma_memcpy;
1165 priv->slave.device_prep_dma_cyclic = sun4i_dma_prep_dma_cyclic;
1166 priv->slave.device_config = sun4i_dma_config;
1167 priv->slave.device_terminate_all = sun4i_dma_terminate_all;
1168 priv->slave.copy_align = 2;
1169 priv->slave.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1170 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1171 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1172 priv->slave.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1173 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1174 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1175 priv->slave.directions = BIT(DMA_DEV_TO_MEM) |
1176 BIT(DMA_MEM_TO_DEV);
1177 priv->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1178
1179 priv->slave.dev = &pdev->dev;
1180
1181 priv->pchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_CHANNELS,
1182 sizeof(struct sun4i_dma_pchan), GFP_KERNEL);
1183 priv->vchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_VCHANS,
1184 sizeof(struct sun4i_dma_vchan), GFP_KERNEL);
1185 if (!priv->vchans || !priv->pchans)
1186 return -ENOMEM;
1187
1188
1189
1190
1191
1192
1193 for (i = 0; i < SUN4I_NDMA_NR_MAX_CHANNELS; i++)
1194 priv->pchans[i].base = priv->base +
1195 SUN4I_NDMA_CHANNEL_REG_BASE(i);
1196
1197 for (j = 0; i < SUN4I_DMA_NR_MAX_CHANNELS; i++, j++) {
1198 priv->pchans[i].base = priv->base +
1199 SUN4I_DDMA_CHANNEL_REG_BASE(j);
1200 priv->pchans[i].is_dedicated = 1;
1201 }
1202
1203 for (i = 0; i < SUN4I_DMA_NR_MAX_VCHANS; i++) {
1204 struct sun4i_dma_vchan *vchan = &priv->vchans[i];
1205
1206 spin_lock_init(&vchan->vc.lock);
1207 vchan->vc.desc_free = sun4i_dma_free_contract;
1208 vchan_init(&vchan->vc, &priv->slave);
1209 }
1210
1211 ret = clk_prepare_enable(priv->clk);
1212 if (ret) {
1213 dev_err(&pdev->dev, "Couldn't enable the clock\n");
1214 return ret;
1215 }
1216
1217
1218
1219
1220
1221 writel(0, priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
1222 writel(0xFFFFFFFF, priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
1223
1224 ret = devm_request_irq(&pdev->dev, priv->irq, sun4i_dma_interrupt,
1225 0, dev_name(&pdev->dev), priv);
1226 if (ret) {
1227 dev_err(&pdev->dev, "Cannot request IRQ\n");
1228 goto err_clk_disable;
1229 }
1230
1231 ret = dma_async_device_register(&priv->slave);
1232 if (ret) {
1233 dev_warn(&pdev->dev, "Failed to register DMA engine device\n");
1234 goto err_clk_disable;
1235 }
1236
1237 ret = of_dma_controller_register(pdev->dev.of_node, sun4i_dma_of_xlate,
1238 priv);
1239 if (ret) {
1240 dev_err(&pdev->dev, "of_dma_controller_register failed\n");
1241 goto err_dma_unregister;
1242 }
1243
1244 dev_dbg(&pdev->dev, "Successfully probed SUN4I_DMA\n");
1245
1246 return 0;
1247
1248err_dma_unregister:
1249 dma_async_device_unregister(&priv->slave);
1250err_clk_disable:
1251 clk_disable_unprepare(priv->clk);
1252 return ret;
1253}
1254
1255static int sun4i_dma_remove(struct platform_device *pdev)
1256{
1257 struct sun4i_dma_dev *priv = platform_get_drvdata(pdev);
1258
1259
1260 disable_irq(priv->irq);
1261
1262 of_dma_controller_free(pdev->dev.of_node);
1263 dma_async_device_unregister(&priv->slave);
1264
1265 clk_disable_unprepare(priv->clk);
1266
1267 return 0;
1268}
1269
1270static const struct of_device_id sun4i_dma_match[] = {
1271 { .compatible = "allwinner,sun4i-a10-dma" },
1272 { },
1273};
1274MODULE_DEVICE_TABLE(of, sun4i_dma_match);
1275
1276static struct platform_driver sun4i_dma_driver = {
1277 .probe = sun4i_dma_probe,
1278 .remove = sun4i_dma_remove,
1279 .driver = {
1280 .name = "sun4i-dma",
1281 .of_match_table = sun4i_dma_match,
1282 },
1283};
1284
1285module_platform_driver(sun4i_dma_driver);
1286
1287MODULE_DESCRIPTION("Allwinner A10 Dedicated DMA Controller Driver");
1288MODULE_AUTHOR("Emilio López <emilio@elopez.com.ar>");
1289MODULE_LICENSE("GPL");
1290