1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/dmaengine.h>
21#include <linux/dma-mapping.h>
22#include <linux/dmapool.h>
23#include <linux/err.h>
24#include <linux/init.h>
25#include <linux/interrupt.h>
26#include <linux/list.h>
27#include <linux/module.h>
28#include <linux/platform_device.h>
29#include <linux/slab.h>
30#include <linux/io.h>
31#include <linux/spinlock.h>
32#include <linux/of.h>
33#include <linux/of_dma.h>
34
35#include "virt-dma.h"
36
37#define BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED 14
38#define BCM2835_DMA_CHAN_NAME_SIZE 8
39
40
41
42
43
44
45
46
47
48struct bcm2835_dmadev {
49 struct dma_device ddev;
50 void __iomem *base;
51 struct device_dma_parameters dma_parms;
52 dma_addr_t zero_page;
53};
54
55struct bcm2835_dma_cb {
56 uint32_t info;
57 uint32_t src;
58 uint32_t dst;
59 uint32_t length;
60 uint32_t stride;
61 uint32_t next;
62 uint32_t pad[2];
63};
64
65struct bcm2835_cb_entry {
66 struct bcm2835_dma_cb *cb;
67 dma_addr_t paddr;
68};
69
70struct bcm2835_chan {
71 struct virt_dma_chan vc;
72
73 struct dma_slave_config cfg;
74 unsigned int dreq;
75
76 int ch;
77 struct bcm2835_desc *desc;
78 struct dma_pool *cb_pool;
79
80 void __iomem *chan_base;
81 int irq_number;
82 unsigned int irq_flags;
83
84 bool is_lite_channel;
85};
86
87struct bcm2835_desc {
88 struct bcm2835_chan *c;
89 struct virt_dma_desc vd;
90 enum dma_transfer_direction dir;
91
92 unsigned int frames;
93 size_t size;
94
95 bool cyclic;
96
97 struct bcm2835_cb_entry cb_list[];
98};
99
100#define BCM2835_DMA_CS 0x00
101#define BCM2835_DMA_ADDR 0x04
102#define BCM2835_DMA_TI 0x08
103#define BCM2835_DMA_SOURCE_AD 0x0c
104#define BCM2835_DMA_DEST_AD 0x10
105#define BCM2835_DMA_LEN 0x14
106#define BCM2835_DMA_STRIDE 0x18
107#define BCM2835_DMA_NEXTCB 0x1c
108#define BCM2835_DMA_DEBUG 0x20
109
110
111#define BCM2835_DMA_ACTIVE BIT(0)
112#define BCM2835_DMA_END BIT(1)
113#define BCM2835_DMA_INT BIT(2)
114#define BCM2835_DMA_DREQ BIT(3)
115#define BCM2835_DMA_ISPAUSED BIT(4)
116#define BCM2835_DMA_ISHELD BIT(5)
117#define BCM2835_DMA_WAITING_FOR_WRITES BIT(6)
118
119
120#define BCM2835_DMA_ERR BIT(8)
121#define BCM2835_DMA_PRIORITY(x) ((x & 15) << 16)
122#define BCM2835_DMA_PANIC_PRIORITY(x) ((x & 15) << 20)
123
124#define BCM2835_DMA_WAIT_FOR_WRITES BIT(28)
125#define BCM2835_DMA_DIS_DEBUG BIT(29)
126#define BCM2835_DMA_ABORT BIT(30)
127#define BCM2835_DMA_RESET BIT(31)
128
129
130#define BCM2835_DMA_INT_EN BIT(0)
131#define BCM2835_DMA_TDMODE BIT(1)
132#define BCM2835_DMA_WAIT_RESP BIT(3)
133#define BCM2835_DMA_D_INC BIT(4)
134#define BCM2835_DMA_D_WIDTH BIT(5)
135#define BCM2835_DMA_D_DREQ BIT(6)
136#define BCM2835_DMA_D_IGNORE BIT(7)
137#define BCM2835_DMA_S_INC BIT(8)
138#define BCM2835_DMA_S_WIDTH BIT(9)
139#define BCM2835_DMA_S_DREQ BIT(10)
140#define BCM2835_DMA_S_IGNORE BIT(11)
141#define BCM2835_DMA_BURST_LENGTH(x) ((x & 15) << 12)
142#define BCM2835_DMA_PER_MAP(x) ((x & 31) << 16)
143#define BCM2835_DMA_WAIT(x) ((x & 31) << 21)
144#define BCM2835_DMA_NO_WIDE_BURSTS BIT(26)
145
146
147#define BCM2835_DMA_DEBUG_LAST_NOT_SET_ERR BIT(0)
148#define BCM2835_DMA_DEBUG_FIFO_ERR BIT(1)
149#define BCM2835_DMA_DEBUG_READ_ERR BIT(2)
150#define BCM2835_DMA_DEBUG_OUTSTANDING_WRITES_SHIFT 4
151#define BCM2835_DMA_DEBUG_OUTSTANDING_WRITES_BITS 4
152#define BCM2835_DMA_DEBUG_ID_SHIFT 16
153#define BCM2835_DMA_DEBUG_ID_BITS 9
154#define BCM2835_DMA_DEBUG_STATE_SHIFT 16
155#define BCM2835_DMA_DEBUG_STATE_BITS 9
156#define BCM2835_DMA_DEBUG_VERSION_SHIFT 25
157#define BCM2835_DMA_DEBUG_VERSION_BITS 3
158#define BCM2835_DMA_DEBUG_LITE BIT(28)
159
160
161#define BCM2835_DMA_INT_STATUS 0xfe0
162#define BCM2835_DMA_ENABLE 0xff0
163
164#define BCM2835_DMA_DATA_TYPE_S8 1
165#define BCM2835_DMA_DATA_TYPE_S16 2
166#define BCM2835_DMA_DATA_TYPE_S32 4
167#define BCM2835_DMA_DATA_TYPE_S128 16
168
169
170#define BCM2835_DMA_CHAN(n) ((n) << 8)
171#define BCM2835_DMA_CHANIO(base, n) ((base) + BCM2835_DMA_CHAN(n))
172
173
174#define MAX_DMA_LEN SZ_1G
175#define MAX_LITE_DMA_LEN (SZ_64K - 4)
176
177static inline size_t bcm2835_dma_max_frame_length(struct bcm2835_chan *c)
178{
179
180 return c->is_lite_channel ? MAX_LITE_DMA_LEN : MAX_DMA_LEN;
181}
182
183
184static inline size_t bcm2835_dma_frames_for_length(size_t len,
185 size_t max_len)
186{
187 return DIV_ROUND_UP(len, max_len);
188}
189
190static inline struct bcm2835_dmadev *to_bcm2835_dma_dev(struct dma_device *d)
191{
192 return container_of(d, struct bcm2835_dmadev, ddev);
193}
194
195static inline struct bcm2835_chan *to_bcm2835_dma_chan(struct dma_chan *c)
196{
197 return container_of(c, struct bcm2835_chan, vc.chan);
198}
199
200static inline struct bcm2835_desc *to_bcm2835_dma_desc(
201 struct dma_async_tx_descriptor *t)
202{
203 return container_of(t, struct bcm2835_desc, vd.tx);
204}
205
206static void bcm2835_dma_free_cb_chain(struct bcm2835_desc *desc)
207{
208 size_t i;
209
210 for (i = 0; i < desc->frames; i++)
211 dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb,
212 desc->cb_list[i].paddr);
213
214 kfree(desc);
215}
216
217static void bcm2835_dma_desc_free(struct virt_dma_desc *vd)
218{
219 bcm2835_dma_free_cb_chain(
220 container_of(vd, struct bcm2835_desc, vd));
221}
222
223static void bcm2835_dma_create_cb_set_length(
224 struct bcm2835_chan *chan,
225 struct bcm2835_dma_cb *control_block,
226 size_t len,
227 size_t period_len,
228 size_t *total_len,
229 u32 finalextrainfo)
230{
231 size_t max_len = bcm2835_dma_max_frame_length(chan);
232
233
234 control_block->length = min_t(u32, len, max_len);
235
236
237 if (!period_len)
238 return;
239
240
241
242
243
244
245
246
247
248
249 if (*total_len + control_block->length < period_len) {
250
251 *total_len += control_block->length;
252 return;
253 }
254
255
256 control_block->length = period_len - *total_len;
257
258
259 *total_len = 0;
260
261
262 control_block->info |= finalextrainfo;
263}
264
265static inline size_t bcm2835_dma_count_frames_for_sg(
266 struct bcm2835_chan *c,
267 struct scatterlist *sgl,
268 unsigned int sg_len)
269{
270 size_t frames = 0;
271 struct scatterlist *sgent;
272 unsigned int i;
273 size_t plength = bcm2835_dma_max_frame_length(c);
274
275 for_each_sg(sgl, sgent, sg_len, i)
276 frames += bcm2835_dma_frames_for_length(
277 sg_dma_len(sgent), plength);
278
279 return frames;
280}
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302static struct bcm2835_desc *bcm2835_dma_create_cb_chain(
303 struct dma_chan *chan, enum dma_transfer_direction direction,
304 bool cyclic, u32 info, u32 finalextrainfo, size_t frames,
305 dma_addr_t src, dma_addr_t dst, size_t buf_len,
306 size_t period_len, gfp_t gfp)
307{
308 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
309 size_t len = buf_len, total_len;
310 size_t frame;
311 struct bcm2835_desc *d;
312 struct bcm2835_cb_entry *cb_entry;
313 struct bcm2835_dma_cb *control_block;
314
315 if (!frames)
316 return NULL;
317
318
319 d = kzalloc(struct_size(d, cb_list, frames), gfp);
320 if (!d)
321 return NULL;
322
323 d->c = c;
324 d->dir = direction;
325 d->cyclic = cyclic;
326
327
328
329
330
331 for (frame = 0, total_len = 0; frame < frames; d->frames++, frame++) {
332 cb_entry = &d->cb_list[frame];
333 cb_entry->cb = dma_pool_alloc(c->cb_pool, gfp,
334 &cb_entry->paddr);
335 if (!cb_entry->cb)
336 goto error_cb;
337
338
339 control_block = cb_entry->cb;
340 control_block->info = info;
341 control_block->src = src;
342 control_block->dst = dst;
343 control_block->stride = 0;
344 control_block->next = 0;
345
346 if (buf_len) {
347
348 bcm2835_dma_create_cb_set_length(
349 c, control_block,
350 len, period_len, &total_len,
351 cyclic ? finalextrainfo : 0);
352
353
354 len -= control_block->length;
355 }
356
357
358 if (frame)
359 d->cb_list[frame - 1].cb->next = cb_entry->paddr;
360
361
362 if (src && (info & BCM2835_DMA_S_INC))
363 src += control_block->length;
364 if (dst && (info & BCM2835_DMA_D_INC))
365 dst += control_block->length;
366
367
368 d->size += control_block->length;
369 }
370
371
372 d->cb_list[d->frames - 1].cb->info |= finalextrainfo;
373
374
375 if (buf_len && (d->size != buf_len))
376 goto error_cb;
377
378 return d;
379error_cb:
380 bcm2835_dma_free_cb_chain(d);
381
382 return NULL;
383}
384
385static void bcm2835_dma_fill_cb_chain_with_sg(
386 struct dma_chan *chan,
387 enum dma_transfer_direction direction,
388 struct bcm2835_cb_entry *cb,
389 struct scatterlist *sgl,
390 unsigned int sg_len)
391{
392 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
393 size_t len, max_len;
394 unsigned int i;
395 dma_addr_t addr;
396 struct scatterlist *sgent;
397
398 max_len = bcm2835_dma_max_frame_length(c);
399 for_each_sg(sgl, sgent, sg_len, i) {
400 for (addr = sg_dma_address(sgent), len = sg_dma_len(sgent);
401 len > 0;
402 addr += cb->cb->length, len -= cb->cb->length, cb++) {
403 if (direction == DMA_DEV_TO_MEM)
404 cb->cb->dst = addr;
405 else
406 cb->cb->src = addr;
407 cb->cb->length = min(len, max_len);
408 }
409 }
410}
411
412static void bcm2835_dma_abort(struct bcm2835_chan *c)
413{
414 void __iomem *chan_base = c->chan_base;
415 long int timeout = 10000;
416
417
418
419
420
421 if (!readl(chan_base + BCM2835_DMA_ADDR))
422 return;
423
424
425 writel(0, chan_base + BCM2835_DMA_CS);
426
427
428 while ((readl(chan_base + BCM2835_DMA_CS) &
429 BCM2835_DMA_WAITING_FOR_WRITES) && --timeout)
430 cpu_relax();
431
432
433 if (!timeout)
434 dev_err(c->vc.chan.device->dev,
435 "failed to complete outstanding writes\n");
436
437 writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS);
438}
439
440static void bcm2835_dma_start_desc(struct bcm2835_chan *c)
441{
442 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
443 struct bcm2835_desc *d;
444
445 if (!vd) {
446 c->desc = NULL;
447 return;
448 }
449
450 list_del(&vd->node);
451
452 c->desc = d = to_bcm2835_dma_desc(&vd->tx);
453
454 writel(d->cb_list[0].paddr, c->chan_base + BCM2835_DMA_ADDR);
455 writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
456}
457
458static irqreturn_t bcm2835_dma_callback(int irq, void *data)
459{
460 struct bcm2835_chan *c = data;
461 struct bcm2835_desc *d;
462 unsigned long flags;
463
464
465 if (c->irq_flags & IRQF_SHARED) {
466
467 flags = readl(c->chan_base + BCM2835_DMA_CS);
468
469 if (!(flags & BCM2835_DMA_INT))
470 return IRQ_NONE;
471 }
472
473 spin_lock_irqsave(&c->vc.lock, flags);
474
475
476
477
478
479
480
481
482 writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE,
483 c->chan_base + BCM2835_DMA_CS);
484
485 d = c->desc;
486
487 if (d) {
488 if (d->cyclic) {
489
490 vchan_cyclic_callback(&d->vd);
491 } else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) {
492 vchan_cookie_complete(&c->desc->vd);
493 bcm2835_dma_start_desc(c);
494 }
495 }
496
497 spin_unlock_irqrestore(&c->vc.lock, flags);
498
499 return IRQ_HANDLED;
500}
501
502static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan)
503{
504 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
505 struct device *dev = c->vc.chan.device->dev;
506
507 dev_dbg(dev, "Allocating DMA channel %d\n", c->ch);
508
509
510
511
512
513 c->cb_pool = dma_pool_create(dev_name(dev), dev,
514 sizeof(struct bcm2835_dma_cb), 32, 0);
515 if (!c->cb_pool) {
516 dev_err(dev, "unable to allocate descriptor pool\n");
517 return -ENOMEM;
518 }
519
520 return request_irq(c->irq_number, bcm2835_dma_callback,
521 c->irq_flags, "DMA IRQ", c);
522}
523
524static void bcm2835_dma_free_chan_resources(struct dma_chan *chan)
525{
526 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
527
528 vchan_free_chan_resources(&c->vc);
529 free_irq(c->irq_number, c);
530 dma_pool_destroy(c->cb_pool);
531
532 dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch);
533}
534
535static size_t bcm2835_dma_desc_size(struct bcm2835_desc *d)
536{
537 return d->size;
538}
539
540static size_t bcm2835_dma_desc_size_pos(struct bcm2835_desc *d, dma_addr_t addr)
541{
542 unsigned int i;
543 size_t size;
544
545 for (size = i = 0; i < d->frames; i++) {
546 struct bcm2835_dma_cb *control_block = d->cb_list[i].cb;
547 size_t this_size = control_block->length;
548 dma_addr_t dma;
549
550 if (d->dir == DMA_DEV_TO_MEM)
551 dma = control_block->dst;
552 else
553 dma = control_block->src;
554
555 if (size)
556 size += this_size;
557 else if (addr >= dma && addr < dma + this_size)
558 size += dma + this_size - addr;
559 }
560
561 return size;
562}
563
564static enum dma_status bcm2835_dma_tx_status(struct dma_chan *chan,
565 dma_cookie_t cookie, struct dma_tx_state *txstate)
566{
567 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
568 struct virt_dma_desc *vd;
569 enum dma_status ret;
570 unsigned long flags;
571
572 ret = dma_cookie_status(chan, cookie, txstate);
573 if (ret == DMA_COMPLETE || !txstate)
574 return ret;
575
576 spin_lock_irqsave(&c->vc.lock, flags);
577 vd = vchan_find_desc(&c->vc, cookie);
578 if (vd) {
579 txstate->residue =
580 bcm2835_dma_desc_size(to_bcm2835_dma_desc(&vd->tx));
581 } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
582 struct bcm2835_desc *d = c->desc;
583 dma_addr_t pos;
584
585 if (d->dir == DMA_MEM_TO_DEV)
586 pos = readl(c->chan_base + BCM2835_DMA_SOURCE_AD);
587 else if (d->dir == DMA_DEV_TO_MEM)
588 pos = readl(c->chan_base + BCM2835_DMA_DEST_AD);
589 else
590 pos = 0;
591
592 txstate->residue = bcm2835_dma_desc_size_pos(d, pos);
593 } else {
594 txstate->residue = 0;
595 }
596
597 spin_unlock_irqrestore(&c->vc.lock, flags);
598
599 return ret;
600}
601
602static void bcm2835_dma_issue_pending(struct dma_chan *chan)
603{
604 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
605 unsigned long flags;
606
607 spin_lock_irqsave(&c->vc.lock, flags);
608 if (vchan_issue_pending(&c->vc) && !c->desc)
609 bcm2835_dma_start_desc(c);
610
611 spin_unlock_irqrestore(&c->vc.lock, flags);
612}
613
614static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_memcpy(
615 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
616 size_t len, unsigned long flags)
617{
618 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
619 struct bcm2835_desc *d;
620 u32 info = BCM2835_DMA_D_INC | BCM2835_DMA_S_INC;
621 u32 extra = BCM2835_DMA_INT_EN | BCM2835_DMA_WAIT_RESP;
622 size_t max_len = bcm2835_dma_max_frame_length(c);
623 size_t frames;
624
625
626 if (!src || !dst || !len)
627 return NULL;
628
629
630 frames = bcm2835_dma_frames_for_length(len, max_len);
631
632
633 d = bcm2835_dma_create_cb_chain(chan, DMA_MEM_TO_MEM, false,
634 info, extra, frames,
635 src, dst, len, 0, GFP_KERNEL);
636 if (!d)
637 return NULL;
638
639 return vchan_tx_prep(&c->vc, &d->vd, flags);
640}
641
642static struct dma_async_tx_descriptor *bcm2835_dma_prep_slave_sg(
643 struct dma_chan *chan,
644 struct scatterlist *sgl, unsigned int sg_len,
645 enum dma_transfer_direction direction,
646 unsigned long flags, void *context)
647{
648 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
649 struct bcm2835_desc *d;
650 dma_addr_t src = 0, dst = 0;
651 u32 info = BCM2835_DMA_WAIT_RESP;
652 u32 extra = BCM2835_DMA_INT_EN;
653 size_t frames;
654
655 if (!is_slave_direction(direction)) {
656 dev_err(chan->device->dev,
657 "%s: bad direction?\n", __func__);
658 return NULL;
659 }
660
661 if (c->dreq != 0)
662 info |= BCM2835_DMA_PER_MAP(c->dreq);
663
664 if (direction == DMA_DEV_TO_MEM) {
665 if (c->cfg.src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)
666 return NULL;
667 src = c->cfg.src_addr;
668 info |= BCM2835_DMA_S_DREQ | BCM2835_DMA_D_INC;
669 } else {
670 if (c->cfg.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)
671 return NULL;
672 dst = c->cfg.dst_addr;
673 info |= BCM2835_DMA_D_DREQ | BCM2835_DMA_S_INC;
674 }
675
676
677 frames = bcm2835_dma_count_frames_for_sg(c, sgl, sg_len);
678
679
680 d = bcm2835_dma_create_cb_chain(chan, direction, false,
681 info, extra,
682 frames, src, dst, 0, 0,
683 GFP_NOWAIT);
684 if (!d)
685 return NULL;
686
687
688 bcm2835_dma_fill_cb_chain_with_sg(chan, direction, d->cb_list,
689 sgl, sg_len);
690
691 return vchan_tx_prep(&c->vc, &d->vd, flags);
692}
693
694static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
695 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
696 size_t period_len, enum dma_transfer_direction direction,
697 unsigned long flags)
698{
699 struct bcm2835_dmadev *od = to_bcm2835_dma_dev(chan->device);
700 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
701 struct bcm2835_desc *d;
702 dma_addr_t src, dst;
703 u32 info = BCM2835_DMA_WAIT_RESP;
704 u32 extra = 0;
705 size_t max_len = bcm2835_dma_max_frame_length(c);
706 size_t frames;
707
708
709 if (!is_slave_direction(direction)) {
710 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
711 return NULL;
712 }
713
714 if (!buf_len) {
715 dev_err(chan->device->dev,
716 "%s: bad buffer length (= 0)\n", __func__);
717 return NULL;
718 }
719
720 if (flags & DMA_PREP_INTERRUPT)
721 extra |= BCM2835_DMA_INT_EN;
722 else
723 period_len = buf_len;
724
725
726
727
728
729 if (buf_len % period_len)
730 dev_warn_once(chan->device->dev,
731 "%s: buffer_length (%zd) is not a multiple of period_len (%zd)\n",
732 __func__, buf_len, period_len);
733
734
735 if (c->dreq != 0)
736 info |= BCM2835_DMA_PER_MAP(c->dreq);
737
738 if (direction == DMA_DEV_TO_MEM) {
739 if (c->cfg.src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)
740 return NULL;
741 src = c->cfg.src_addr;
742 dst = buf_addr;
743 info |= BCM2835_DMA_S_DREQ | BCM2835_DMA_D_INC;
744 } else {
745 if (c->cfg.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)
746 return NULL;
747 dst = c->cfg.dst_addr;
748 src = buf_addr;
749 info |= BCM2835_DMA_D_DREQ | BCM2835_DMA_S_INC;
750
751
752 if (buf_addr == od->zero_page && !c->is_lite_channel)
753 info |= BCM2835_DMA_S_IGNORE;
754 }
755
756
757 frames =
758 DIV_ROUND_UP(buf_len, period_len) *
759
760 bcm2835_dma_frames_for_length(period_len, max_len);
761
762
763
764
765
766
767 d = bcm2835_dma_create_cb_chain(chan, direction, true,
768 info, extra,
769 frames, src, dst, buf_len,
770 period_len, GFP_NOWAIT);
771 if (!d)
772 return NULL;
773
774
775 d->cb_list[d->frames - 1].cb->next = d->cb_list[0].paddr;
776
777 return vchan_tx_prep(&c->vc, &d->vd, flags);
778}
779
780static int bcm2835_dma_slave_config(struct dma_chan *chan,
781 struct dma_slave_config *cfg)
782{
783 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
784
785 c->cfg = *cfg;
786
787 return 0;
788}
789
790static int bcm2835_dma_terminate_all(struct dma_chan *chan)
791{
792 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
793 unsigned long flags;
794 LIST_HEAD(head);
795
796 spin_lock_irqsave(&c->vc.lock, flags);
797
798
799 if (c->desc) {
800 if (c->desc->vd.tx.flags & DMA_PREP_INTERRUPT)
801 vchan_terminate_vdesc(&c->desc->vd);
802 else
803 vchan_vdesc_fini(&c->desc->vd);
804 c->desc = NULL;
805 bcm2835_dma_abort(c);
806 }
807
808 vchan_get_all_descriptors(&c->vc, &head);
809 spin_unlock_irqrestore(&c->vc.lock, flags);
810 vchan_dma_desc_free_list(&c->vc, &head);
811
812 return 0;
813}
814
815static void bcm2835_dma_synchronize(struct dma_chan *chan)
816{
817 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
818
819 vchan_synchronize(&c->vc);
820}
821
822static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id,
823 int irq, unsigned int irq_flags)
824{
825 struct bcm2835_chan *c;
826
827 c = devm_kzalloc(d->ddev.dev, sizeof(*c), GFP_KERNEL);
828 if (!c)
829 return -ENOMEM;
830
831 c->vc.desc_free = bcm2835_dma_desc_free;
832 vchan_init(&c->vc, &d->ddev);
833
834 c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id);
835 c->ch = chan_id;
836 c->irq_number = irq;
837 c->irq_flags = irq_flags;
838
839
840 if (readl(c->chan_base + BCM2835_DMA_DEBUG) &
841 BCM2835_DMA_DEBUG_LITE)
842 c->is_lite_channel = true;
843
844 return 0;
845}
846
847static void bcm2835_dma_free(struct bcm2835_dmadev *od)
848{
849 struct bcm2835_chan *c, *next;
850
851 list_for_each_entry_safe(c, next, &od->ddev.channels,
852 vc.chan.device_node) {
853 list_del(&c->vc.chan.device_node);
854 tasklet_kill(&c->vc.task);
855 }
856
857 dma_unmap_page_attrs(od->ddev.dev, od->zero_page, PAGE_SIZE,
858 DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
859}
860
861static const struct of_device_id bcm2835_dma_of_match[] = {
862 { .compatible = "brcm,bcm2835-dma", },
863 {},
864};
865MODULE_DEVICE_TABLE(of, bcm2835_dma_of_match);
866
867static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec,
868 struct of_dma *ofdma)
869{
870 struct bcm2835_dmadev *d = ofdma->of_dma_data;
871 struct dma_chan *chan;
872
873 chan = dma_get_any_slave_channel(&d->ddev);
874 if (!chan)
875 return NULL;
876
877
878 to_bcm2835_dma_chan(chan)->dreq = spec->args[0];
879
880 return chan;
881}
882
883static int bcm2835_dma_probe(struct platform_device *pdev)
884{
885 struct bcm2835_dmadev *od;
886 struct resource *res;
887 void __iomem *base;
888 int rc;
889 int i, j;
890 int irq[BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED + 1];
891 int irq_flags;
892 uint32_t chans_available;
893 char chan_name[BCM2835_DMA_CHAN_NAME_SIZE];
894
895 if (!pdev->dev.dma_mask)
896 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
897
898 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
899 if (rc) {
900 dev_err(&pdev->dev, "Unable to set DMA mask\n");
901 return rc;
902 }
903
904 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
905 if (!od)
906 return -ENOMEM;
907
908 pdev->dev.dma_parms = &od->dma_parms;
909 dma_set_max_seg_size(&pdev->dev, 0x3FFFFFFF);
910
911 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
912 base = devm_ioremap_resource(&pdev->dev, res);
913 if (IS_ERR(base))
914 return PTR_ERR(base);
915
916 od->base = base;
917
918 dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
919 dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask);
920 dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
921 dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask);
922 od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources;
923 od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources;
924 od->ddev.device_tx_status = bcm2835_dma_tx_status;
925 od->ddev.device_issue_pending = bcm2835_dma_issue_pending;
926 od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic;
927 od->ddev.device_prep_slave_sg = bcm2835_dma_prep_slave_sg;
928 od->ddev.device_prep_dma_memcpy = bcm2835_dma_prep_dma_memcpy;
929 od->ddev.device_config = bcm2835_dma_slave_config;
930 od->ddev.device_terminate_all = bcm2835_dma_terminate_all;
931 od->ddev.device_synchronize = bcm2835_dma_synchronize;
932 od->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
933 od->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
934 od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
935 BIT(DMA_MEM_TO_MEM);
936 od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
937 od->ddev.descriptor_reuse = true;
938 od->ddev.dev = &pdev->dev;
939 INIT_LIST_HEAD(&od->ddev.channels);
940
941 platform_set_drvdata(pdev, od);
942
943 od->zero_page = dma_map_page_attrs(od->ddev.dev, ZERO_PAGE(0), 0,
944 PAGE_SIZE, DMA_TO_DEVICE,
945 DMA_ATTR_SKIP_CPU_SYNC);
946 if (dma_mapping_error(od->ddev.dev, od->zero_page)) {
947 dev_err(&pdev->dev, "Failed to map zero page\n");
948 return -ENOMEM;
949 }
950
951
952 if (of_property_read_u32(pdev->dev.of_node,
953 "brcm,dma-channel-mask",
954 &chans_available)) {
955 dev_err(&pdev->dev, "Failed to get channel mask\n");
956 rc = -EINVAL;
957 goto err_no_dma;
958 }
959
960
961 for (i = 0; i <= BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED; i++) {
962
963 if (!(chans_available & (1 << i))) {
964 irq[i] = -1;
965 continue;
966 }
967
968
969 snprintf(chan_name, sizeof(chan_name), "dma%i", i);
970 irq[i] = platform_get_irq_byname(pdev, chan_name);
971 if (irq[i] >= 0)
972 continue;
973
974
975 dev_warn_once(&pdev->dev,
976 "missing interrupt-names property in device tree - legacy interpretation is used\n");
977
978
979
980
981 irq[i] = platform_get_irq(pdev, i < 11 ? i : 11);
982 }
983
984
985 for (i = 0; i <= BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED; i++) {
986
987 if (irq[i] < 0)
988 continue;
989
990
991 irq_flags = 0;
992 for (j = 0; j <= BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED; j++)
993 if ((i != j) && (irq[j] == irq[i])) {
994 irq_flags = IRQF_SHARED;
995 break;
996 }
997
998
999 rc = bcm2835_dma_chan_init(od, i, irq[i], irq_flags);
1000 if (rc)
1001 goto err_no_dma;
1002 }
1003
1004 dev_dbg(&pdev->dev, "Initialized %i DMA channels\n", i);
1005
1006
1007 rc = of_dma_controller_register(pdev->dev.of_node,
1008 bcm2835_dma_xlate, od);
1009 if (rc) {
1010 dev_err(&pdev->dev, "Failed to register DMA controller\n");
1011 goto err_no_dma;
1012 }
1013
1014 rc = dma_async_device_register(&od->ddev);
1015 if (rc) {
1016 dev_err(&pdev->dev,
1017 "Failed to register slave DMA engine device: %d\n", rc);
1018 goto err_no_dma;
1019 }
1020
1021 dev_dbg(&pdev->dev, "Load BCM2835 DMA engine driver\n");
1022
1023 return 0;
1024
1025err_no_dma:
1026 bcm2835_dma_free(od);
1027 return rc;
1028}
1029
1030static int bcm2835_dma_remove(struct platform_device *pdev)
1031{
1032 struct bcm2835_dmadev *od = platform_get_drvdata(pdev);
1033
1034 dma_async_device_unregister(&od->ddev);
1035 bcm2835_dma_free(od);
1036
1037 return 0;
1038}
1039
1040static struct platform_driver bcm2835_dma_driver = {
1041 .probe = bcm2835_dma_probe,
1042 .remove = bcm2835_dma_remove,
1043 .driver = {
1044 .name = "bcm2835-dma",
1045 .of_match_table = of_match_ptr(bcm2835_dma_of_match),
1046 },
1047};
1048
1049module_platform_driver(bcm2835_dma_driver);
1050
1051MODULE_ALIAS("platform:bcm2835-dma");
1052MODULE_DESCRIPTION("BCM2835 DMA engine driver");
1053MODULE_AUTHOR("Florian Meier <florian.meier@koalo.de>");
1054MODULE_LICENSE("GPL");
1055