1
2
3
4
5
6
7
8
9#include <linux/sched.h>
10#include <linux/device.h>
11#include <linux/dmaengine.h>
12#include <linux/init.h>
13#include <linux/interrupt.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/sa11x0-dma.h>
18#include <linux/slab.h>
19#include <linux/spinlock.h>
20
21#include "virt-dma.h"
22
23#define NR_PHY_CHAN 6
24#define DMA_ALIGN 3
25#define DMA_MAX_SIZE 0x1fff
26#define DMA_CHUNK_SIZE 0x1000
27
28#define DMA_DDAR 0x00
29#define DMA_DCSR_S 0x04
30#define DMA_DCSR_C 0x08
31#define DMA_DCSR_R 0x0c
32#define DMA_DBSA 0x10
33#define DMA_DBTA 0x14
34#define DMA_DBSB 0x18
35#define DMA_DBTB 0x1c
36#define DMA_SIZE 0x20
37
38#define DCSR_RUN (1 << 0)
39#define DCSR_IE (1 << 1)
40#define DCSR_ERROR (1 << 2)
41#define DCSR_DONEA (1 << 3)
42#define DCSR_STRTA (1 << 4)
43#define DCSR_DONEB (1 << 5)
44#define DCSR_STRTB (1 << 6)
45#define DCSR_BIU (1 << 7)
46
47#define DDAR_RW (1 << 0)
48#define DDAR_E (1 << 1)
49#define DDAR_BS (1 << 2)
50#define DDAR_DW (1 << 3)
51#define DDAR_Ser0UDCTr (0x0 << 4)
52#define DDAR_Ser0UDCRc (0x1 << 4)
53#define DDAR_Ser1SDLCTr (0x2 << 4)
54#define DDAR_Ser1SDLCRc (0x3 << 4)
55#define DDAR_Ser1UARTTr (0x4 << 4)
56#define DDAR_Ser1UARTRc (0x5 << 4)
57#define DDAR_Ser2ICPTr (0x6 << 4)
58#define DDAR_Ser2ICPRc (0x7 << 4)
59#define DDAR_Ser3UARTTr (0x8 << 4)
60#define DDAR_Ser3UARTRc (0x9 << 4)
61#define DDAR_Ser4MCP0Tr (0xa << 4)
62#define DDAR_Ser4MCP0Rc (0xb << 4)
63#define DDAR_Ser4MCP1Tr (0xc << 4)
64#define DDAR_Ser4MCP1Rc (0xd << 4)
65#define DDAR_Ser4SSPTr (0xe << 4)
66#define DDAR_Ser4SSPRc (0xf << 4)
67
68struct sa11x0_dma_sg {
69 u32 addr;
70 u32 len;
71};
72
73struct sa11x0_dma_desc {
74 struct virt_dma_desc vd;
75
76 u32 ddar;
77 size_t size;
78 unsigned period;
79 bool cyclic;
80
81 unsigned sglen;
82 struct sa11x0_dma_sg sg[0];
83};
84
85struct sa11x0_dma_phy;
86
87struct sa11x0_dma_chan {
88 struct virt_dma_chan vc;
89
90
91 struct sa11x0_dma_phy *phy;
92 enum dma_status status;
93
94
95 struct list_head node;
96
97 u32 ddar;
98 const char *name;
99};
100
101struct sa11x0_dma_phy {
102 void __iomem *base;
103 struct sa11x0_dma_dev *dev;
104 unsigned num;
105
106 struct sa11x0_dma_chan *vchan;
107
108
109 unsigned sg_load;
110 struct sa11x0_dma_desc *txd_load;
111 unsigned sg_done;
112 struct sa11x0_dma_desc *txd_done;
113 u32 dbs[2];
114 u32 dbt[2];
115 u32 dcsr;
116};
117
118struct sa11x0_dma_dev {
119 struct dma_device slave;
120 void __iomem *base;
121 spinlock_t lock;
122 struct tasklet_struct task;
123 struct list_head chan_pending;
124 struct sa11x0_dma_phy phy[NR_PHY_CHAN];
125};
126
127static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan)
128{
129 return container_of(chan, struct sa11x0_dma_chan, vc.chan);
130}
131
132static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
133{
134 return container_of(dmadev, struct sa11x0_dma_dev, slave);
135}
136
137static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c)
138{
139 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
140
141 return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL;
142}
143
144static void sa11x0_dma_free_desc(struct virt_dma_desc *vd)
145{
146 kfree(container_of(vd, struct sa11x0_dma_desc, vd));
147}
148
149static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd)
150{
151 list_del(&txd->vd.node);
152 p->txd_load = txd;
153 p->sg_load = 0;
154
155 dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n",
156 p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar);
157}
158
159static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
160 struct sa11x0_dma_chan *c)
161{
162 struct sa11x0_dma_desc *txd = p->txd_load;
163 struct sa11x0_dma_sg *sg;
164 void __iomem *base = p->base;
165 unsigned dbsx, dbtx;
166 u32 dcsr;
167
168 if (!txd)
169 return;
170
171 dcsr = readl_relaxed(base + DMA_DCSR_R);
172
173
174 if ((dcsr & (DCSR_STRTA | DCSR_STRTB)) == (DCSR_STRTA | DCSR_STRTB))
175 return;
176
177 if (p->sg_load == txd->sglen) {
178 if (!txd->cyclic) {
179 struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c);
180
181
182
183
184
185
186 if (txn && txn->ddar == txd->ddar) {
187 txd = txn;
188 sa11x0_dma_start_desc(p, txn);
189 } else {
190 p->txd_load = NULL;
191 return;
192 }
193 } else {
194
195 p->sg_load = 0;
196 }
197 }
198
199 sg = &txd->sg[p->sg_load++];
200
201
202 if (((dcsr & (DCSR_BIU | DCSR_STRTB)) == (DCSR_BIU | DCSR_STRTB)) ||
203 ((dcsr & (DCSR_BIU | DCSR_STRTA)) == 0)) {
204 dbsx = DMA_DBSA;
205 dbtx = DMA_DBTA;
206 dcsr = DCSR_STRTA | DCSR_IE | DCSR_RUN;
207 } else {
208 dbsx = DMA_DBSB;
209 dbtx = DMA_DBTB;
210 dcsr = DCSR_STRTB | DCSR_IE | DCSR_RUN;
211 }
212
213 writel_relaxed(sg->addr, base + dbsx);
214 writel_relaxed(sg->len, base + dbtx);
215 writel(dcsr, base + DMA_DCSR_S);
216
217 dev_dbg(p->dev->slave.dev, "pchan %u: load: DCSR:%02x DBS%c:%08x DBT%c:%08x\n",
218 p->num, dcsr,
219 'A' + (dbsx == DMA_DBSB), sg->addr,
220 'A' + (dbtx == DMA_DBTB), sg->len);
221}
222
223static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p,
224 struct sa11x0_dma_chan *c)
225{
226 struct sa11x0_dma_desc *txd = p->txd_done;
227
228 if (++p->sg_done == txd->sglen) {
229 if (!txd->cyclic) {
230 vchan_cookie_complete(&txd->vd);
231
232 p->sg_done = 0;
233 p->txd_done = p->txd_load;
234
235 if (!p->txd_done)
236 tasklet_schedule(&p->dev->task);
237 } else {
238 if ((p->sg_done % txd->period) == 0)
239 vchan_cyclic_callback(&txd->vd);
240
241
242 p->sg_done = 0;
243 }
244 }
245
246 sa11x0_dma_start_sg(p, c);
247}
248
249static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
250{
251 struct sa11x0_dma_phy *p = dev_id;
252 struct sa11x0_dma_dev *d = p->dev;
253 struct sa11x0_dma_chan *c;
254 u32 dcsr;
255
256 dcsr = readl_relaxed(p->base + DMA_DCSR_R);
257 if (!(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB)))
258 return IRQ_NONE;
259
260
261 writel_relaxed(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB),
262 p->base + DMA_DCSR_C);
263
264 dev_dbg(d->slave.dev, "pchan %u: irq: DCSR:%02x\n", p->num, dcsr);
265
266 if (dcsr & DCSR_ERROR) {
267 dev_err(d->slave.dev, "pchan %u: error. DCSR:%02x DDAR:%08x DBSA:%08x DBTA:%08x DBSB:%08x DBTB:%08x\n",
268 p->num, dcsr,
269 readl_relaxed(p->base + DMA_DDAR),
270 readl_relaxed(p->base + DMA_DBSA),
271 readl_relaxed(p->base + DMA_DBTA),
272 readl_relaxed(p->base + DMA_DBSB),
273 readl_relaxed(p->base + DMA_DBTB));
274 }
275
276 c = p->vchan;
277 if (c) {
278 unsigned long flags;
279
280 spin_lock_irqsave(&c->vc.lock, flags);
281
282
283
284
285
286
287
288 if (c->phy == p) {
289 if (dcsr & DCSR_DONEA)
290 sa11x0_dma_complete(p, c);
291 if (dcsr & DCSR_DONEB)
292 sa11x0_dma_complete(p, c);
293 }
294 spin_unlock_irqrestore(&c->vc.lock, flags);
295 }
296
297 return IRQ_HANDLED;
298}
299
300static void sa11x0_dma_start_txd(struct sa11x0_dma_chan *c)
301{
302 struct sa11x0_dma_desc *txd = sa11x0_dma_next_desc(c);
303
304
305 if (txd) {
306 struct sa11x0_dma_phy *p = c->phy;
307
308 sa11x0_dma_start_desc(p, txd);
309 p->txd_done = txd;
310 p->sg_done = 0;
311
312
313 WARN_ON(readl_relaxed(p->base + DMA_DCSR_R) &
314 (DCSR_STRTA | DCSR_STRTB));
315
316
317 writel_relaxed(DCSR_RUN | DCSR_STRTA | DCSR_STRTB,
318 p->base + DMA_DCSR_C);
319 writel_relaxed(txd->ddar, p->base + DMA_DDAR);
320
321
322 sa11x0_dma_start_sg(p, c);
323 sa11x0_dma_start_sg(p, c);
324 }
325}
326
327static void sa11x0_dma_tasklet(unsigned long arg)
328{
329 struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg;
330 struct sa11x0_dma_phy *p;
331 struct sa11x0_dma_chan *c;
332 unsigned pch, pch_alloc = 0;
333
334 dev_dbg(d->slave.dev, "tasklet enter\n");
335
336 list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) {
337 spin_lock_irq(&c->vc.lock);
338 p = c->phy;
339 if (p && !p->txd_done) {
340 sa11x0_dma_start_txd(c);
341 if (!p->txd_done) {
342
343 dev_dbg(d->slave.dev, "pchan %u: free\n", p->num);
344
345
346 c->phy = NULL;
347 p->vchan = NULL;
348 }
349 }
350 spin_unlock_irq(&c->vc.lock);
351 }
352
353 spin_lock_irq(&d->lock);
354 for (pch = 0; pch < NR_PHY_CHAN; pch++) {
355 p = &d->phy[pch];
356
357 if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
358 c = list_first_entry(&d->chan_pending,
359 struct sa11x0_dma_chan, node);
360 list_del_init(&c->node);
361
362 pch_alloc |= 1 << pch;
363
364
365 p->vchan = c;
366
367 dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
368 }
369 }
370 spin_unlock_irq(&d->lock);
371
372 for (pch = 0; pch < NR_PHY_CHAN; pch++) {
373 if (pch_alloc & (1 << pch)) {
374 p = &d->phy[pch];
375 c = p->vchan;
376
377 spin_lock_irq(&c->vc.lock);
378 c->phy = p;
379
380 sa11x0_dma_start_txd(c);
381 spin_unlock_irq(&c->vc.lock);
382 }
383 }
384
385 dev_dbg(d->slave.dev, "tasklet exit\n");
386}
387
388
389static void sa11x0_dma_free_chan_resources(struct dma_chan *chan)
390{
391 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
392 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
393 unsigned long flags;
394
395 spin_lock_irqsave(&d->lock, flags);
396 list_del_init(&c->node);
397 spin_unlock_irqrestore(&d->lock, flags);
398
399 vchan_free_chan_resources(&c->vc);
400}
401
402static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p)
403{
404 unsigned reg;
405 u32 dcsr;
406
407 dcsr = readl_relaxed(p->base + DMA_DCSR_R);
408
409 if ((dcsr & (DCSR_BIU | DCSR_STRTA)) == DCSR_STRTA ||
410 (dcsr & (DCSR_BIU | DCSR_STRTB)) == DCSR_BIU)
411 reg = DMA_DBSA;
412 else
413 reg = DMA_DBSB;
414
415 return readl_relaxed(p->base + reg);
416}
417
418static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
419 dma_cookie_t cookie, struct dma_tx_state *state)
420{
421 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
422 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
423 struct sa11x0_dma_phy *p;
424 struct virt_dma_desc *vd;
425 unsigned long flags;
426 enum dma_status ret;
427
428 ret = dma_cookie_status(&c->vc.chan, cookie, state);
429 if (ret == DMA_COMPLETE)
430 return ret;
431
432 if (!state)
433 return c->status;
434
435 spin_lock_irqsave(&c->vc.lock, flags);
436 p = c->phy;
437
438
439
440
441
442 vd = vchan_find_desc(&c->vc, cookie);
443 if (vd) {
444 state->residue = container_of(vd, struct sa11x0_dma_desc, vd)->size;
445 } else if (!p) {
446 state->residue = 0;
447 } else {
448 struct sa11x0_dma_desc *txd;
449 size_t bytes = 0;
450
451 if (p->txd_done && p->txd_done->vd.tx.cookie == cookie)
452 txd = p->txd_done;
453 else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie)
454 txd = p->txd_load;
455 else
456 txd = NULL;
457
458 ret = c->status;
459 if (txd) {
460 dma_addr_t addr = sa11x0_dma_pos(p);
461 unsigned i;
462
463 dev_vdbg(d->slave.dev, "tx_status: addr:%pad\n", &addr);
464
465 for (i = 0; i < txd->sglen; i++) {
466 dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n",
467 i, txd->sg[i].addr, txd->sg[i].len);
468 if (addr >= txd->sg[i].addr &&
469 addr < txd->sg[i].addr + txd->sg[i].len) {
470 unsigned len;
471
472 len = txd->sg[i].len -
473 (addr - txd->sg[i].addr);
474 dev_vdbg(d->slave.dev, "tx_status: [%u] +%x\n",
475 i, len);
476 bytes += len;
477 i++;
478 break;
479 }
480 }
481 for (; i < txd->sglen; i++) {
482 dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x ++\n",
483 i, txd->sg[i].addr, txd->sg[i].len);
484 bytes += txd->sg[i].len;
485 }
486 }
487 state->residue = bytes;
488 }
489 spin_unlock_irqrestore(&c->vc.lock, flags);
490
491 dev_vdbg(d->slave.dev, "tx_status: bytes 0x%x\n", state->residue);
492
493 return ret;
494}
495
496
497
498
499
500
501static void sa11x0_dma_issue_pending(struct dma_chan *chan)
502{
503 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
504 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
505 unsigned long flags;
506
507 spin_lock_irqsave(&c->vc.lock, flags);
508 if (vchan_issue_pending(&c->vc)) {
509 if (!c->phy) {
510 spin_lock(&d->lock);
511 if (list_empty(&c->node)) {
512 list_add_tail(&c->node, &d->chan_pending);
513 tasklet_schedule(&d->task);
514 dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
515 }
516 spin_unlock(&d->lock);
517 }
518 } else
519 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
520 spin_unlock_irqrestore(&c->vc.lock, flags);
521}
522
523static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
524 struct dma_chan *chan, struct scatterlist *sg, unsigned int sglen,
525 enum dma_transfer_direction dir, unsigned long flags, void *context)
526{
527 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
528 struct sa11x0_dma_desc *txd;
529 struct scatterlist *sgent;
530 unsigned i, j = sglen;
531 size_t size = 0;
532
533
534 if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
535 dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
536 &c->vc, c->ddar, dir);
537 return NULL;
538 }
539
540
541 if (sglen == 0)
542 return NULL;
543
544 for_each_sg(sg, sgent, sglen, i) {
545 dma_addr_t addr = sg_dma_address(sgent);
546 unsigned int len = sg_dma_len(sgent);
547
548 if (len > DMA_MAX_SIZE)
549 j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1;
550 if (addr & DMA_ALIGN) {
551 dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %pad\n",
552 &c->vc, &addr);
553 return NULL;
554 }
555 }
556
557 txd = kzalloc(struct_size(txd, sg, j), GFP_ATOMIC);
558 if (!txd) {
559 dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
560 return NULL;
561 }
562
563 j = 0;
564 for_each_sg(sg, sgent, sglen, i) {
565 dma_addr_t addr = sg_dma_address(sgent);
566 unsigned len = sg_dma_len(sgent);
567
568 size += len;
569
570 do {
571 unsigned tlen = len;
572
573
574
575
576
577
578
579 if (tlen > DMA_MAX_SIZE) {
580 unsigned mult = DIV_ROUND_UP(tlen,
581 DMA_MAX_SIZE & ~DMA_ALIGN);
582
583 tlen = (tlen / mult) & ~DMA_ALIGN;
584 }
585
586 txd->sg[j].addr = addr;
587 txd->sg[j].len = tlen;
588
589 addr += tlen;
590 len -= tlen;
591 j++;
592 } while (len);
593 }
594
595 txd->ddar = c->ddar;
596 txd->size = size;
597 txd->sglen = j;
598
599 dev_dbg(chan->device->dev, "vchan %p: txd %p: size %zu nr %u\n",
600 &c->vc, &txd->vd, txd->size, txd->sglen);
601
602 return vchan_tx_prep(&c->vc, &txd->vd, flags);
603}
604
605static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic(
606 struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
607 enum dma_transfer_direction dir, unsigned long flags)
608{
609 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
610 struct sa11x0_dma_desc *txd;
611 unsigned i, j, k, sglen, sgperiod;
612
613
614 if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
615 dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
616 &c->vc, c->ddar, dir);
617 return NULL;
618 }
619
620 sgperiod = DIV_ROUND_UP(period, DMA_MAX_SIZE & ~DMA_ALIGN);
621 sglen = size * sgperiod / period;
622
623
624 if (sglen == 0)
625 return NULL;
626
627 txd = kzalloc(struct_size(txd, sg, sglen), GFP_ATOMIC);
628 if (!txd) {
629 dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
630 return NULL;
631 }
632
633 for (i = k = 0; i < size / period; i++) {
634 size_t tlen, len = period;
635
636 for (j = 0; j < sgperiod; j++, k++) {
637 tlen = len;
638
639 if (tlen > DMA_MAX_SIZE) {
640 unsigned mult = DIV_ROUND_UP(tlen, DMA_MAX_SIZE & ~DMA_ALIGN);
641 tlen = (tlen / mult) & ~DMA_ALIGN;
642 }
643
644 txd->sg[k].addr = addr;
645 txd->sg[k].len = tlen;
646 addr += tlen;
647 len -= tlen;
648 }
649
650 WARN_ON(len != 0);
651 }
652
653 WARN_ON(k != sglen);
654
655 txd->ddar = c->ddar;
656 txd->size = size;
657 txd->sglen = sglen;
658 txd->cyclic = 1;
659 txd->period = sgperiod;
660
661 return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
662}
663
664static int sa11x0_dma_device_config(struct dma_chan *chan,
665 struct dma_slave_config *cfg)
666{
667 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
668 u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW);
669 dma_addr_t addr;
670 enum dma_slave_buswidth width;
671 u32 maxburst;
672
673 if (ddar & DDAR_RW) {
674 addr = cfg->src_addr;
675 width = cfg->src_addr_width;
676 maxburst = cfg->src_maxburst;
677 } else {
678 addr = cfg->dst_addr;
679 width = cfg->dst_addr_width;
680 maxburst = cfg->dst_maxburst;
681 }
682
683 if ((width != DMA_SLAVE_BUSWIDTH_1_BYTE &&
684 width != DMA_SLAVE_BUSWIDTH_2_BYTES) ||
685 (maxburst != 4 && maxburst != 8))
686 return -EINVAL;
687
688 if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
689 ddar |= DDAR_DW;
690 if (maxburst == 8)
691 ddar |= DDAR_BS;
692
693 dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %pad width %u burst %u\n",
694 &c->vc, &addr, width, maxburst);
695
696 c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6;
697
698 return 0;
699}
700
701static int sa11x0_dma_device_pause(struct dma_chan *chan)
702{
703 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
704 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
705 struct sa11x0_dma_phy *p;
706 LIST_HEAD(head);
707 unsigned long flags;
708
709 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
710 spin_lock_irqsave(&c->vc.lock, flags);
711 if (c->status == DMA_IN_PROGRESS) {
712 c->status = DMA_PAUSED;
713
714 p = c->phy;
715 if (p) {
716 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
717 } else {
718 spin_lock(&d->lock);
719 list_del_init(&c->node);
720 spin_unlock(&d->lock);
721 }
722 }
723 spin_unlock_irqrestore(&c->vc.lock, flags);
724
725 return 0;
726}
727
728static int sa11x0_dma_device_resume(struct dma_chan *chan)
729{
730 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
731 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
732 struct sa11x0_dma_phy *p;
733 LIST_HEAD(head);
734 unsigned long flags;
735
736 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
737 spin_lock_irqsave(&c->vc.lock, flags);
738 if (c->status == DMA_PAUSED) {
739 c->status = DMA_IN_PROGRESS;
740
741 p = c->phy;
742 if (p) {
743 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
744 } else if (!list_empty(&c->vc.desc_issued)) {
745 spin_lock(&d->lock);
746 list_add_tail(&c->node, &d->chan_pending);
747 spin_unlock(&d->lock);
748 }
749 }
750 spin_unlock_irqrestore(&c->vc.lock, flags);
751
752 return 0;
753}
754
755static int sa11x0_dma_device_terminate_all(struct dma_chan *chan)
756{
757 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
758 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
759 struct sa11x0_dma_phy *p;
760 LIST_HEAD(head);
761 unsigned long flags;
762
763 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
764
765 spin_lock_irqsave(&c->vc.lock, flags);
766 vchan_get_all_descriptors(&c->vc, &head);
767
768 p = c->phy;
769 if (p) {
770 dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
771
772 writel(DCSR_RUN | DCSR_IE |
773 DCSR_STRTA | DCSR_DONEA |
774 DCSR_STRTB | DCSR_DONEB,
775 p->base + DMA_DCSR_C);
776
777 if (p->txd_load) {
778 if (p->txd_load != p->txd_done)
779 list_add_tail(&p->txd_load->vd.node, &head);
780 p->txd_load = NULL;
781 }
782 if (p->txd_done) {
783 list_add_tail(&p->txd_done->vd.node, &head);
784 p->txd_done = NULL;
785 }
786 c->phy = NULL;
787 spin_lock(&d->lock);
788 p->vchan = NULL;
789 spin_unlock(&d->lock);
790 tasklet_schedule(&d->task);
791 }
792 spin_unlock_irqrestore(&c->vc.lock, flags);
793 vchan_dma_desc_free_list(&c->vc, &head);
794
795 return 0;
796}
797
798struct sa11x0_dma_channel_desc {
799 u32 ddar;
800 const char *name;
801};
802
803#define CD(d1, d2) { .ddar = DDAR_##d1 | d2, .name = #d1 }
804static const struct sa11x0_dma_channel_desc chan_desc[] = {
805 CD(Ser0UDCTr, 0),
806 CD(Ser0UDCRc, DDAR_RW),
807 CD(Ser1SDLCTr, 0),
808 CD(Ser1SDLCRc, DDAR_RW),
809 CD(Ser1UARTTr, 0),
810 CD(Ser1UARTRc, DDAR_RW),
811 CD(Ser2ICPTr, 0),
812 CD(Ser2ICPRc, DDAR_RW),
813 CD(Ser3UARTTr, 0),
814 CD(Ser3UARTRc, DDAR_RW),
815 CD(Ser4MCP0Tr, 0),
816 CD(Ser4MCP0Rc, DDAR_RW),
817 CD(Ser4MCP1Tr, 0),
818 CD(Ser4MCP1Rc, DDAR_RW),
819 CD(Ser4SSPTr, 0),
820 CD(Ser4SSPRc, DDAR_RW),
821};
822
823static const struct dma_slave_map sa11x0_dma_map[] = {
824 { "sa11x0-ir", "tx", "Ser2ICPTr" },
825 { "sa11x0-ir", "rx", "Ser2ICPRc" },
826 { "sa11x0-ssp", "tx", "Ser4SSPTr" },
827 { "sa11x0-ssp", "rx", "Ser4SSPRc" },
828};
829
830static int sa11x0_dma_init_dmadev(struct dma_device *dmadev,
831 struct device *dev)
832{
833 unsigned i;
834
835 INIT_LIST_HEAD(&dmadev->channels);
836 dmadev->dev = dev;
837 dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources;
838 dmadev->device_config = sa11x0_dma_device_config;
839 dmadev->device_pause = sa11x0_dma_device_pause;
840 dmadev->device_resume = sa11x0_dma_device_resume;
841 dmadev->device_terminate_all = sa11x0_dma_device_terminate_all;
842 dmadev->device_tx_status = sa11x0_dma_tx_status;
843 dmadev->device_issue_pending = sa11x0_dma_issue_pending;
844
845 for (i = 0; i < ARRAY_SIZE(chan_desc); i++) {
846 struct sa11x0_dma_chan *c;
847
848 c = kzalloc(sizeof(*c), GFP_KERNEL);
849 if (!c) {
850 dev_err(dev, "no memory for channel %u\n", i);
851 return -ENOMEM;
852 }
853
854 c->status = DMA_IN_PROGRESS;
855 c->ddar = chan_desc[i].ddar;
856 c->name = chan_desc[i].name;
857 INIT_LIST_HEAD(&c->node);
858
859 c->vc.desc_free = sa11x0_dma_free_desc;
860 vchan_init(&c->vc, dmadev);
861 }
862
863 return dma_async_device_register(dmadev);
864}
865
866static int sa11x0_dma_request_irq(struct platform_device *pdev, int nr,
867 void *data)
868{
869 int irq = platform_get_irq(pdev, nr);
870
871 if (irq <= 0)
872 return -ENXIO;
873
874 return request_irq(irq, sa11x0_dma_irq, 0, dev_name(&pdev->dev), data);
875}
876
877static void sa11x0_dma_free_irq(struct platform_device *pdev, int nr,
878 void *data)
879{
880 int irq = platform_get_irq(pdev, nr);
881 if (irq > 0)
882 free_irq(irq, data);
883}
884
885static void sa11x0_dma_free_channels(struct dma_device *dmadev)
886{
887 struct sa11x0_dma_chan *c, *cn;
888
889 list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) {
890 list_del(&c->vc.chan.device_node);
891 tasklet_kill(&c->vc.task);
892 kfree(c);
893 }
894}
895
896static int sa11x0_dma_probe(struct platform_device *pdev)
897{
898 struct sa11x0_dma_dev *d;
899 struct resource *res;
900 unsigned i;
901 int ret;
902
903 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
904 if (!res)
905 return -ENXIO;
906
907 d = kzalloc(sizeof(*d), GFP_KERNEL);
908 if (!d) {
909 ret = -ENOMEM;
910 goto err_alloc;
911 }
912
913 spin_lock_init(&d->lock);
914 INIT_LIST_HEAD(&d->chan_pending);
915
916 d->slave.filter.fn = sa11x0_dma_filter_fn;
917 d->slave.filter.mapcnt = ARRAY_SIZE(sa11x0_dma_map);
918 d->slave.filter.map = sa11x0_dma_map;
919
920 d->base = ioremap(res->start, resource_size(res));
921 if (!d->base) {
922 ret = -ENOMEM;
923 goto err_ioremap;
924 }
925
926 tasklet_init(&d->task, sa11x0_dma_tasklet, (unsigned long)d);
927
928 for (i = 0; i < NR_PHY_CHAN; i++) {
929 struct sa11x0_dma_phy *p = &d->phy[i];
930
931 p->dev = d;
932 p->num = i;
933 p->base = d->base + i * DMA_SIZE;
934 writel_relaxed(DCSR_RUN | DCSR_IE | DCSR_ERROR |
935 DCSR_DONEA | DCSR_STRTA | DCSR_DONEB | DCSR_STRTB,
936 p->base + DMA_DCSR_C);
937 writel_relaxed(0, p->base + DMA_DDAR);
938
939 ret = sa11x0_dma_request_irq(pdev, i, p);
940 if (ret) {
941 while (i) {
942 i--;
943 sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
944 }
945 goto err_irq;
946 }
947 }
948
949 dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
950 dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
951 d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg;
952 d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic;
953 d->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
954 d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
955 d->slave.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
956 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES);
957 d->slave.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
958 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES);
959 ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev);
960 if (ret) {
961 dev_warn(d->slave.dev, "failed to register slave async device: %d\n",
962 ret);
963 goto err_slave_reg;
964 }
965
966 platform_set_drvdata(pdev, d);
967 return 0;
968
969 err_slave_reg:
970 sa11x0_dma_free_channels(&d->slave);
971 for (i = 0; i < NR_PHY_CHAN; i++)
972 sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
973 err_irq:
974 tasklet_kill(&d->task);
975 iounmap(d->base);
976 err_ioremap:
977 kfree(d);
978 err_alloc:
979 return ret;
980}
981
982static int sa11x0_dma_remove(struct platform_device *pdev)
983{
984 struct sa11x0_dma_dev *d = platform_get_drvdata(pdev);
985 unsigned pch;
986
987 dma_async_device_unregister(&d->slave);
988
989 sa11x0_dma_free_channels(&d->slave);
990 for (pch = 0; pch < NR_PHY_CHAN; pch++)
991 sa11x0_dma_free_irq(pdev, pch, &d->phy[pch]);
992 tasklet_kill(&d->task);
993 iounmap(d->base);
994 kfree(d);
995
996 return 0;
997}
998
999static int sa11x0_dma_suspend(struct device *dev)
1000{
1001 struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
1002 unsigned pch;
1003
1004 for (pch = 0; pch < NR_PHY_CHAN; pch++) {
1005 struct sa11x0_dma_phy *p = &d->phy[pch];
1006 u32 dcsr, saved_dcsr;
1007
1008 dcsr = saved_dcsr = readl_relaxed(p->base + DMA_DCSR_R);
1009 if (dcsr & DCSR_RUN) {
1010 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
1011 dcsr = readl_relaxed(p->base + DMA_DCSR_R);
1012 }
1013
1014 saved_dcsr &= DCSR_RUN | DCSR_IE;
1015 if (dcsr & DCSR_BIU) {
1016 p->dbs[0] = readl_relaxed(p->base + DMA_DBSB);
1017 p->dbt[0] = readl_relaxed(p->base + DMA_DBTB);
1018 p->dbs[1] = readl_relaxed(p->base + DMA_DBSA);
1019 p->dbt[1] = readl_relaxed(p->base + DMA_DBTA);
1020 saved_dcsr |= (dcsr & DCSR_STRTA ? DCSR_STRTB : 0) |
1021 (dcsr & DCSR_STRTB ? DCSR_STRTA : 0);
1022 } else {
1023 p->dbs[0] = readl_relaxed(p->base + DMA_DBSA);
1024 p->dbt[0] = readl_relaxed(p->base + DMA_DBTA);
1025 p->dbs[1] = readl_relaxed(p->base + DMA_DBSB);
1026 p->dbt[1] = readl_relaxed(p->base + DMA_DBTB);
1027 saved_dcsr |= dcsr & (DCSR_STRTA | DCSR_STRTB);
1028 }
1029 p->dcsr = saved_dcsr;
1030
1031 writel(DCSR_STRTA | DCSR_STRTB, p->base + DMA_DCSR_C);
1032 }
1033
1034 return 0;
1035}
1036
1037static int sa11x0_dma_resume(struct device *dev)
1038{
1039 struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
1040 unsigned pch;
1041
1042 for (pch = 0; pch < NR_PHY_CHAN; pch++) {
1043 struct sa11x0_dma_phy *p = &d->phy[pch];
1044 struct sa11x0_dma_desc *txd = NULL;
1045 u32 dcsr = readl_relaxed(p->base + DMA_DCSR_R);
1046
1047 WARN_ON(dcsr & (DCSR_BIU | DCSR_STRTA | DCSR_STRTB | DCSR_RUN));
1048
1049 if (p->txd_done)
1050 txd = p->txd_done;
1051 else if (p->txd_load)
1052 txd = p->txd_load;
1053
1054 if (!txd)
1055 continue;
1056
1057 writel_relaxed(txd->ddar, p->base + DMA_DDAR);
1058
1059 writel_relaxed(p->dbs[0], p->base + DMA_DBSA);
1060 writel_relaxed(p->dbt[0], p->base + DMA_DBTA);
1061 writel_relaxed(p->dbs[1], p->base + DMA_DBSB);
1062 writel_relaxed(p->dbt[1], p->base + DMA_DBTB);
1063 writel_relaxed(p->dcsr, p->base + DMA_DCSR_S);
1064 }
1065
1066 return 0;
1067}
1068
1069static const struct dev_pm_ops sa11x0_dma_pm_ops = {
1070 .suspend_noirq = sa11x0_dma_suspend,
1071 .resume_noirq = sa11x0_dma_resume,
1072 .freeze_noirq = sa11x0_dma_suspend,
1073 .thaw_noirq = sa11x0_dma_resume,
1074 .poweroff_noirq = sa11x0_dma_suspend,
1075 .restore_noirq = sa11x0_dma_resume,
1076};
1077
1078static struct platform_driver sa11x0_dma_driver = {
1079 .driver = {
1080 .name = "sa11x0-dma",
1081 .pm = &sa11x0_dma_pm_ops,
1082 },
1083 .probe = sa11x0_dma_probe,
1084 .remove = sa11x0_dma_remove,
1085};
1086
1087bool sa11x0_dma_filter_fn(struct dma_chan *chan, void *param)
1088{
1089 if (chan->device->dev->driver == &sa11x0_dma_driver.driver) {
1090 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
1091 const char *p = param;
1092
1093 return !strcmp(c->name, p);
1094 }
1095 return false;
1096}
1097EXPORT_SYMBOL(sa11x0_dma_filter_fn);
1098
1099static int __init sa11x0_dma_init(void)
1100{
1101 return platform_driver_register(&sa11x0_dma_driver);
1102}
1103subsys_initcall(sa11x0_dma_init);
1104
1105static void __exit sa11x0_dma_exit(void)
1106{
1107 platform_driver_unregister(&sa11x0_dma_driver);
1108}
1109module_exit(sa11x0_dma_exit);
1110
1111MODULE_AUTHOR("Russell King");
1112MODULE_DESCRIPTION("SA-11x0 DMA driver");
1113MODULE_LICENSE("GPL v2");
1114MODULE_ALIAS("platform:sa11x0-dma");
1115