1
2
3
4
5
6
7
8
9
10#include <linux/dma-mapping.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/module.h>
15#include <linux/platform_device.h>
16#include <linux/slab.h>
17#include <linux/scatterlist.h>
18
19#include "dmaengine.h"
20#include "txx9dmac.h"
21
22static struct txx9dmac_chan *to_txx9dmac_chan(struct dma_chan *chan)
23{
24 return container_of(chan, struct txx9dmac_chan, chan);
25}
26
27static struct txx9dmac_cregs __iomem *__dma_regs(const struct txx9dmac_chan *dc)
28{
29 return dc->ch_regs;
30}
31
32static struct txx9dmac_cregs32 __iomem *__dma_regs32(
33 const struct txx9dmac_chan *dc)
34{
35 return dc->ch_regs;
36}
37
38#define channel64_readq(dc, name) \
39 __raw_readq(&(__dma_regs(dc)->name))
40#define channel64_writeq(dc, name, val) \
41 __raw_writeq((val), &(__dma_regs(dc)->name))
42#define channel64_readl(dc, name) \
43 __raw_readl(&(__dma_regs(dc)->name))
44#define channel64_writel(dc, name, val) \
45 __raw_writel((val), &(__dma_regs(dc)->name))
46
47#define channel32_readl(dc, name) \
48 __raw_readl(&(__dma_regs32(dc)->name))
49#define channel32_writel(dc, name, val) \
50 __raw_writel((val), &(__dma_regs32(dc)->name))
51
52#define channel_readq(dc, name) channel64_readq(dc, name)
53#define channel_writeq(dc, name, val) channel64_writeq(dc, name, val)
54#define channel_readl(dc, name) \
55 (is_dmac64(dc) ? \
56 channel64_readl(dc, name) : channel32_readl(dc, name))
57#define channel_writel(dc, name, val) \
58 (is_dmac64(dc) ? \
59 channel64_writel(dc, name, val) : channel32_writel(dc, name, val))
60
61static dma_addr_t channel64_read_CHAR(const struct txx9dmac_chan *dc)
62{
63 if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64))
64 return channel64_readq(dc, CHAR);
65 else
66 return channel64_readl(dc, CHAR);
67}
68
69static void channel64_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val)
70{
71 if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64))
72 channel64_writeq(dc, CHAR, val);
73 else
74 channel64_writel(dc, CHAR, val);
75}
76
77static void channel64_clear_CHAR(const struct txx9dmac_chan *dc)
78{
79#if defined(CONFIG_32BIT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
80 channel64_writel(dc, CHAR, 0);
81 channel64_writel(dc, __pad_CHAR, 0);
82#else
83 channel64_writeq(dc, CHAR, 0);
84#endif
85}
86
87static dma_addr_t channel_read_CHAR(const struct txx9dmac_chan *dc)
88{
89 if (is_dmac64(dc))
90 return channel64_read_CHAR(dc);
91 else
92 return channel32_readl(dc, CHAR);
93}
94
95static void channel_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val)
96{
97 if (is_dmac64(dc))
98 channel64_write_CHAR(dc, val);
99 else
100 channel32_writel(dc, CHAR, val);
101}
102
103static struct txx9dmac_regs __iomem *__txx9dmac_regs(
104 const struct txx9dmac_dev *ddev)
105{
106 return ddev->regs;
107}
108
109static struct txx9dmac_regs32 __iomem *__txx9dmac_regs32(
110 const struct txx9dmac_dev *ddev)
111{
112 return ddev->regs;
113}
114
115#define dma64_readl(ddev, name) \
116 __raw_readl(&(__txx9dmac_regs(ddev)->name))
117#define dma64_writel(ddev, name, val) \
118 __raw_writel((val), &(__txx9dmac_regs(ddev)->name))
119
120#define dma32_readl(ddev, name) \
121 __raw_readl(&(__txx9dmac_regs32(ddev)->name))
122#define dma32_writel(ddev, name, val) \
123 __raw_writel((val), &(__txx9dmac_regs32(ddev)->name))
124
125#define dma_readl(ddev, name) \
126 (__is_dmac64(ddev) ? \
127 dma64_readl(ddev, name) : dma32_readl(ddev, name))
128#define dma_writel(ddev, name, val) \
129 (__is_dmac64(ddev) ? \
130 dma64_writel(ddev, name, val) : dma32_writel(ddev, name, val))
131
132static struct device *chan2dev(struct dma_chan *chan)
133{
134 return &chan->dev->device;
135}
136static struct device *chan2parent(struct dma_chan *chan)
137{
138 return chan->dev->device.parent;
139}
140
141static struct txx9dmac_desc *
142txd_to_txx9dmac_desc(struct dma_async_tx_descriptor *txd)
143{
144 return container_of(txd, struct txx9dmac_desc, txd);
145}
146
147static dma_addr_t desc_read_CHAR(const struct txx9dmac_chan *dc,
148 const struct txx9dmac_desc *desc)
149{
150 return is_dmac64(dc) ? desc->hwdesc.CHAR : desc->hwdesc32.CHAR;
151}
152
153static void desc_write_CHAR(const struct txx9dmac_chan *dc,
154 struct txx9dmac_desc *desc, dma_addr_t val)
155{
156 if (is_dmac64(dc))
157 desc->hwdesc.CHAR = val;
158 else
159 desc->hwdesc32.CHAR = val;
160}
161
162#define TXX9_DMA_MAX_COUNT 0x04000000
163
164#define TXX9_DMA_INITIAL_DESC_COUNT 64
165
166static struct txx9dmac_desc *txx9dmac_first_active(struct txx9dmac_chan *dc)
167{
168 return list_entry(dc->active_list.next,
169 struct txx9dmac_desc, desc_node);
170}
171
172static struct txx9dmac_desc *txx9dmac_last_active(struct txx9dmac_chan *dc)
173{
174 return list_entry(dc->active_list.prev,
175 struct txx9dmac_desc, desc_node);
176}
177
178static struct txx9dmac_desc *txx9dmac_first_queued(struct txx9dmac_chan *dc)
179{
180 return list_entry(dc->queue.next, struct txx9dmac_desc, desc_node);
181}
182
183static struct txx9dmac_desc *txx9dmac_last_child(struct txx9dmac_desc *desc)
184{
185 if (!list_empty(&desc->tx_list))
186 desc = list_entry(desc->tx_list.prev, typeof(*desc), desc_node);
187 return desc;
188}
189
190static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx);
191
192static struct txx9dmac_desc *txx9dmac_desc_alloc(struct txx9dmac_chan *dc,
193 gfp_t flags)
194{
195 struct txx9dmac_dev *ddev = dc->ddev;
196 struct txx9dmac_desc *desc;
197
198 desc = kzalloc(sizeof(*desc), flags);
199 if (!desc)
200 return NULL;
201 INIT_LIST_HEAD(&desc->tx_list);
202 dma_async_tx_descriptor_init(&desc->txd, &dc->chan);
203 desc->txd.tx_submit = txx9dmac_tx_submit;
204
205 desc->txd.flags = DMA_CTRL_ACK;
206 desc->txd.phys = dma_map_single(chan2parent(&dc->chan), &desc->hwdesc,
207 ddev->descsize, DMA_TO_DEVICE);
208 return desc;
209}
210
211static struct txx9dmac_desc *txx9dmac_desc_get(struct txx9dmac_chan *dc)
212{
213 struct txx9dmac_desc *desc, *_desc;
214 struct txx9dmac_desc *ret = NULL;
215 unsigned int i = 0;
216
217 spin_lock_bh(&dc->lock);
218 list_for_each_entry_safe(desc, _desc, &dc->free_list, desc_node) {
219 if (async_tx_test_ack(&desc->txd)) {
220 list_del(&desc->desc_node);
221 ret = desc;
222 break;
223 }
224 dev_dbg(chan2dev(&dc->chan), "desc %p not ACKed\n", desc);
225 i++;
226 }
227 spin_unlock_bh(&dc->lock);
228
229 dev_vdbg(chan2dev(&dc->chan), "scanned %u descriptors on freelist\n",
230 i);
231 if (!ret) {
232 ret = txx9dmac_desc_alloc(dc, GFP_ATOMIC);
233 if (ret) {
234 spin_lock_bh(&dc->lock);
235 dc->descs_allocated++;
236 spin_unlock_bh(&dc->lock);
237 } else
238 dev_err(chan2dev(&dc->chan),
239 "not enough descriptors available\n");
240 }
241 return ret;
242}
243
244static void txx9dmac_sync_desc_for_cpu(struct txx9dmac_chan *dc,
245 struct txx9dmac_desc *desc)
246{
247 struct txx9dmac_dev *ddev = dc->ddev;
248 struct txx9dmac_desc *child;
249
250 list_for_each_entry(child, &desc->tx_list, desc_node)
251 dma_sync_single_for_cpu(chan2parent(&dc->chan),
252 child->txd.phys, ddev->descsize,
253 DMA_TO_DEVICE);
254 dma_sync_single_for_cpu(chan2parent(&dc->chan),
255 desc->txd.phys, ddev->descsize,
256 DMA_TO_DEVICE);
257}
258
259
260
261
262
263static void txx9dmac_desc_put(struct txx9dmac_chan *dc,
264 struct txx9dmac_desc *desc)
265{
266 if (desc) {
267 struct txx9dmac_desc *child;
268
269 txx9dmac_sync_desc_for_cpu(dc, desc);
270
271 spin_lock_bh(&dc->lock);
272 list_for_each_entry(child, &desc->tx_list, desc_node)
273 dev_vdbg(chan2dev(&dc->chan),
274 "moving child desc %p to freelist\n",
275 child);
276 list_splice_init(&desc->tx_list, &dc->free_list);
277 dev_vdbg(chan2dev(&dc->chan), "moving desc %p to freelist\n",
278 desc);
279 list_add(&desc->desc_node, &dc->free_list);
280 spin_unlock_bh(&dc->lock);
281 }
282}
283
284
285
286static void txx9dmac_dump_regs(struct txx9dmac_chan *dc)
287{
288 if (is_dmac64(dc))
289 dev_err(chan2dev(&dc->chan),
290 " CHAR: %#llx SAR: %#llx DAR: %#llx CNTR: %#x"
291 " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n",
292 (u64)channel64_read_CHAR(dc),
293 channel64_readq(dc, SAR),
294 channel64_readq(dc, DAR),
295 channel64_readl(dc, CNTR),
296 channel64_readl(dc, SAIR),
297 channel64_readl(dc, DAIR),
298 channel64_readl(dc, CCR),
299 channel64_readl(dc, CSR));
300 else
301 dev_err(chan2dev(&dc->chan),
302 " CHAR: %#x SAR: %#x DAR: %#x CNTR: %#x"
303 " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n",
304 channel32_readl(dc, CHAR),
305 channel32_readl(dc, SAR),
306 channel32_readl(dc, DAR),
307 channel32_readl(dc, CNTR),
308 channel32_readl(dc, SAIR),
309 channel32_readl(dc, DAIR),
310 channel32_readl(dc, CCR),
311 channel32_readl(dc, CSR));
312}
313
314static void txx9dmac_reset_chan(struct txx9dmac_chan *dc)
315{
316 channel_writel(dc, CCR, TXX9_DMA_CCR_CHRST);
317 if (is_dmac64(dc)) {
318 channel64_clear_CHAR(dc);
319 channel_writeq(dc, SAR, 0);
320 channel_writeq(dc, DAR, 0);
321 } else {
322 channel_writel(dc, CHAR, 0);
323 channel_writel(dc, SAR, 0);
324 channel_writel(dc, DAR, 0);
325 }
326 channel_writel(dc, CNTR, 0);
327 channel_writel(dc, SAIR, 0);
328 channel_writel(dc, DAIR, 0);
329 channel_writel(dc, CCR, 0);
330}
331
332
333static void txx9dmac_dostart(struct txx9dmac_chan *dc,
334 struct txx9dmac_desc *first)
335{
336 struct txx9dmac_slave *ds = dc->chan.private;
337 u32 sai, dai;
338
339 dev_vdbg(chan2dev(&dc->chan), "dostart %u %p\n",
340 first->txd.cookie, first);
341
342 if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) {
343 dev_err(chan2dev(&dc->chan),
344 "BUG: Attempted to start non-idle channel\n");
345 txx9dmac_dump_regs(dc);
346
347 return;
348 }
349
350 if (is_dmac64(dc)) {
351 channel64_writel(dc, CNTR, 0);
352 channel64_writel(dc, CSR, 0xffffffff);
353 if (ds) {
354 if (ds->tx_reg) {
355 sai = ds->reg_width;
356 dai = 0;
357 } else {
358 sai = 0;
359 dai = ds->reg_width;
360 }
361 } else {
362 sai = 8;
363 dai = 8;
364 }
365 channel64_writel(dc, SAIR, sai);
366 channel64_writel(dc, DAIR, dai);
367
368 channel64_writel(dc, CCR, dc->ccr);
369
370 channel64_write_CHAR(dc, first->txd.phys);
371 } else {
372 channel32_writel(dc, CNTR, 0);
373 channel32_writel(dc, CSR, 0xffffffff);
374 if (ds) {
375 if (ds->tx_reg) {
376 sai = ds->reg_width;
377 dai = 0;
378 } else {
379 sai = 0;
380 dai = ds->reg_width;
381 }
382 } else {
383 sai = 4;
384 dai = 4;
385 }
386 channel32_writel(dc, SAIR, sai);
387 channel32_writel(dc, DAIR, dai);
388 if (txx9_dma_have_SMPCHN()) {
389 channel32_writel(dc, CCR, dc->ccr);
390
391 channel32_writel(dc, CHAR, first->txd.phys);
392 } else {
393 channel32_writel(dc, CHAR, first->txd.phys);
394 channel32_writel(dc, CCR, dc->ccr);
395 }
396 }
397}
398
399
400
401static void
402txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
403 struct txx9dmac_desc *desc)
404{
405 struct dmaengine_desc_callback cb;
406 struct dma_async_tx_descriptor *txd = &desc->txd;
407
408 dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n",
409 txd->cookie, desc);
410
411 dma_cookie_complete(txd);
412 dmaengine_desc_get_callback(txd, &cb);
413
414 txx9dmac_sync_desc_for_cpu(dc, desc);
415 list_splice_init(&desc->tx_list, &dc->free_list);
416 list_move(&desc->desc_node, &dc->free_list);
417
418 dma_descriptor_unmap(txd);
419
420
421
422
423 dmaengine_desc_callback_invoke(&cb, NULL);
424 dma_run_dependencies(txd);
425}
426
427static void txx9dmac_dequeue(struct txx9dmac_chan *dc, struct list_head *list)
428{
429 struct txx9dmac_dev *ddev = dc->ddev;
430 struct txx9dmac_desc *desc;
431 struct txx9dmac_desc *prev = NULL;
432
433 BUG_ON(!list_empty(list));
434 do {
435 desc = txx9dmac_first_queued(dc);
436 if (prev) {
437 desc_write_CHAR(dc, prev, desc->txd.phys);
438 dma_sync_single_for_device(chan2parent(&dc->chan),
439 prev->txd.phys, ddev->descsize,
440 DMA_TO_DEVICE);
441 }
442 prev = txx9dmac_last_child(desc);
443 list_move_tail(&desc->desc_node, list);
444
445 if ((desc->txd.flags & DMA_PREP_INTERRUPT) &&
446 !txx9dmac_chan_INTENT(dc))
447 break;
448 } while (!list_empty(&dc->queue));
449}
450
451static void txx9dmac_complete_all(struct txx9dmac_chan *dc)
452{
453 struct txx9dmac_desc *desc, *_desc;
454 LIST_HEAD(list);
455
456
457
458
459
460 list_splice_init(&dc->active_list, &list);
461 if (!list_empty(&dc->queue)) {
462 txx9dmac_dequeue(dc, &dc->active_list);
463 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
464 }
465
466 list_for_each_entry_safe(desc, _desc, &list, desc_node)
467 txx9dmac_descriptor_complete(dc, desc);
468}
469
470static void txx9dmac_dump_desc(struct txx9dmac_chan *dc,
471 struct txx9dmac_hwdesc *desc)
472{
473 if (is_dmac64(dc)) {
474#ifdef TXX9_DMA_USE_SIMPLE_CHAIN
475 dev_crit(chan2dev(&dc->chan),
476 " desc: ch%#llx s%#llx d%#llx c%#x\n",
477 (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR);
478#else
479 dev_crit(chan2dev(&dc->chan),
480 " desc: ch%#llx s%#llx d%#llx c%#x"
481 " si%#x di%#x cc%#x cs%#x\n",
482 (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR,
483 desc->SAIR, desc->DAIR, desc->CCR, desc->CSR);
484#endif
485 } else {
486 struct txx9dmac_hwdesc32 *d = (struct txx9dmac_hwdesc32 *)desc;
487#ifdef TXX9_DMA_USE_SIMPLE_CHAIN
488 dev_crit(chan2dev(&dc->chan),
489 " desc: ch%#x s%#x d%#x c%#x\n",
490 d->CHAR, d->SAR, d->DAR, d->CNTR);
491#else
492 dev_crit(chan2dev(&dc->chan),
493 " desc: ch%#x s%#x d%#x c%#x"
494 " si%#x di%#x cc%#x cs%#x\n",
495 d->CHAR, d->SAR, d->DAR, d->CNTR,
496 d->SAIR, d->DAIR, d->CCR, d->CSR);
497#endif
498 }
499}
500
501static void txx9dmac_handle_error(struct txx9dmac_chan *dc, u32 csr)
502{
503 struct txx9dmac_desc *bad_desc;
504 struct txx9dmac_desc *child;
505 u32 errors;
506
507
508
509
510
511
512 dev_crit(chan2dev(&dc->chan), "Abnormal Chain Completion\n");
513 txx9dmac_dump_regs(dc);
514
515 bad_desc = txx9dmac_first_active(dc);
516 list_del_init(&bad_desc->desc_node);
517
518
519 errors = csr & (TXX9_DMA_CSR_ABCHC |
520 TXX9_DMA_CSR_CFERR | TXX9_DMA_CSR_CHERR |
521 TXX9_DMA_CSR_DESERR | TXX9_DMA_CSR_SORERR);
522 channel_writel(dc, CSR, errors);
523
524 if (list_empty(&dc->active_list) && !list_empty(&dc->queue))
525 txx9dmac_dequeue(dc, &dc->active_list);
526 if (!list_empty(&dc->active_list))
527 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
528
529 dev_crit(chan2dev(&dc->chan),
530 "Bad descriptor submitted for DMA! (cookie: %d)\n",
531 bad_desc->txd.cookie);
532 txx9dmac_dump_desc(dc, &bad_desc->hwdesc);
533 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
534 txx9dmac_dump_desc(dc, &child->hwdesc);
535
536 txx9dmac_descriptor_complete(dc, bad_desc);
537}
538
539static void txx9dmac_scan_descriptors(struct txx9dmac_chan *dc)
540{
541 dma_addr_t chain;
542 struct txx9dmac_desc *desc, *_desc;
543 struct txx9dmac_desc *child;
544 u32 csr;
545
546 if (is_dmac64(dc)) {
547 chain = channel64_read_CHAR(dc);
548 csr = channel64_readl(dc, CSR);
549 channel64_writel(dc, CSR, csr);
550 } else {
551 chain = channel32_readl(dc, CHAR);
552 csr = channel32_readl(dc, CSR);
553 channel32_writel(dc, CSR, csr);
554 }
555
556 if (!(csr & (TXX9_DMA_CSR_XFACT | TXX9_DMA_CSR_ABCHC))) {
557
558 txx9dmac_complete_all(dc);
559 return;
560 }
561 if (!(csr & TXX9_DMA_CSR_CHNEN))
562 chain = 0;
563
564 dev_vdbg(chan2dev(&dc->chan), "scan_descriptors: char=%#llx\n",
565 (u64)chain);
566
567 list_for_each_entry_safe(desc, _desc, &dc->active_list, desc_node) {
568 if (desc_read_CHAR(dc, desc) == chain) {
569
570 if (csr & TXX9_DMA_CSR_ABCHC)
571 goto scan_done;
572 return;
573 }
574
575 list_for_each_entry(child, &desc->tx_list, desc_node)
576 if (desc_read_CHAR(dc, child) == chain) {
577
578 if (csr & TXX9_DMA_CSR_ABCHC)
579 goto scan_done;
580 return;
581 }
582
583
584
585
586
587 txx9dmac_descriptor_complete(dc, desc);
588 }
589scan_done:
590 if (csr & TXX9_DMA_CSR_ABCHC) {
591 txx9dmac_handle_error(dc, csr);
592 return;
593 }
594
595 dev_err(chan2dev(&dc->chan),
596 "BUG: All descriptors done, but channel not idle!\n");
597
598
599 txx9dmac_reset_chan(dc);
600
601 if (!list_empty(&dc->queue)) {
602 txx9dmac_dequeue(dc, &dc->active_list);
603 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
604 }
605}
606
607static void txx9dmac_chan_tasklet(unsigned long data)
608{
609 int irq;
610 u32 csr;
611 struct txx9dmac_chan *dc;
612
613 dc = (struct txx9dmac_chan *)data;
614 csr = channel_readl(dc, CSR);
615 dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n", csr);
616
617 spin_lock(&dc->lock);
618 if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC |
619 TXX9_DMA_CSR_NTRNFC))
620 txx9dmac_scan_descriptors(dc);
621 spin_unlock(&dc->lock);
622 irq = dc->irq;
623
624 enable_irq(irq);
625}
626
627static irqreturn_t txx9dmac_chan_interrupt(int irq, void *dev_id)
628{
629 struct txx9dmac_chan *dc = dev_id;
630
631 dev_vdbg(chan2dev(&dc->chan), "interrupt: status=%#x\n",
632 channel_readl(dc, CSR));
633
634 tasklet_schedule(&dc->tasklet);
635
636
637
638
639 disable_irq_nosync(irq);
640
641 return IRQ_HANDLED;
642}
643
644static void txx9dmac_tasklet(unsigned long data)
645{
646 int irq;
647 u32 csr;
648 struct txx9dmac_chan *dc;
649
650 struct txx9dmac_dev *ddev = (struct txx9dmac_dev *)data;
651 u32 mcr;
652 int i;
653
654 mcr = dma_readl(ddev, MCR);
655 dev_vdbg(ddev->chan[0]->dma.dev, "tasklet: mcr=%x\n", mcr);
656 for (i = 0; i < TXX9_DMA_MAX_NR_CHANNELS; i++) {
657 if ((mcr >> (24 + i)) & 0x11) {
658 dc = ddev->chan[i];
659 csr = channel_readl(dc, CSR);
660 dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n",
661 csr);
662 spin_lock(&dc->lock);
663 if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC |
664 TXX9_DMA_CSR_NTRNFC))
665 txx9dmac_scan_descriptors(dc);
666 spin_unlock(&dc->lock);
667 }
668 }
669 irq = ddev->irq;
670
671 enable_irq(irq);
672}
673
674static irqreturn_t txx9dmac_interrupt(int irq, void *dev_id)
675{
676 struct txx9dmac_dev *ddev = dev_id;
677
678 dev_vdbg(ddev->chan[0]->dma.dev, "interrupt: status=%#x\n",
679 dma_readl(ddev, MCR));
680
681 tasklet_schedule(&ddev->tasklet);
682
683
684
685
686 disable_irq_nosync(irq);
687
688 return IRQ_HANDLED;
689}
690
691
692
693static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx)
694{
695 struct txx9dmac_desc *desc = txd_to_txx9dmac_desc(tx);
696 struct txx9dmac_chan *dc = to_txx9dmac_chan(tx->chan);
697 dma_cookie_t cookie;
698
699 spin_lock_bh(&dc->lock);
700 cookie = dma_cookie_assign(tx);
701
702 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u %p\n",
703 desc->txd.cookie, desc);
704
705 list_add_tail(&desc->desc_node, &dc->queue);
706 spin_unlock_bh(&dc->lock);
707
708 return cookie;
709}
710
711static struct dma_async_tx_descriptor *
712txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
713 size_t len, unsigned long flags)
714{
715 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
716 struct txx9dmac_dev *ddev = dc->ddev;
717 struct txx9dmac_desc *desc;
718 struct txx9dmac_desc *first;
719 struct txx9dmac_desc *prev;
720 size_t xfer_count;
721 size_t offset;
722
723 dev_vdbg(chan2dev(chan), "prep_dma_memcpy d%#llx s%#llx l%#zx f%#lx\n",
724 (u64)dest, (u64)src, len, flags);
725
726 if (unlikely(!len)) {
727 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
728 return NULL;
729 }
730
731 prev = first = NULL;
732
733 for (offset = 0; offset < len; offset += xfer_count) {
734 xfer_count = min_t(size_t, len - offset, TXX9_DMA_MAX_COUNT);
735
736
737
738
739 if (__is_dmac64(ddev)) {
740 if (xfer_count > 0x100 &&
741 (xfer_count & 0xff) >= 0xfa &&
742 (xfer_count & 0xff) <= 0xff)
743 xfer_count -= 0x20;
744 } else {
745 if (xfer_count > 0x80 &&
746 (xfer_count & 0x7f) >= 0x7e &&
747 (xfer_count & 0x7f) <= 0x7f)
748 xfer_count -= 0x20;
749 }
750
751 desc = txx9dmac_desc_get(dc);
752 if (!desc) {
753 txx9dmac_desc_put(dc, first);
754 return NULL;
755 }
756
757 if (__is_dmac64(ddev)) {
758 desc->hwdesc.SAR = src + offset;
759 desc->hwdesc.DAR = dest + offset;
760 desc->hwdesc.CNTR = xfer_count;
761 txx9dmac_desc_set_nosimple(ddev, desc, 8, 8,
762 dc->ccr | TXX9_DMA_CCR_XFACT);
763 } else {
764 desc->hwdesc32.SAR = src + offset;
765 desc->hwdesc32.DAR = dest + offset;
766 desc->hwdesc32.CNTR = xfer_count;
767 txx9dmac_desc_set_nosimple(ddev, desc, 4, 4,
768 dc->ccr | TXX9_DMA_CCR_XFACT);
769 }
770
771
772
773
774
775
776
777
778 if (!first) {
779 first = desc;
780 } else {
781 desc_write_CHAR(dc, prev, desc->txd.phys);
782 dma_sync_single_for_device(chan2parent(&dc->chan),
783 prev->txd.phys, ddev->descsize,
784 DMA_TO_DEVICE);
785 list_add_tail(&desc->desc_node, &first->tx_list);
786 }
787 prev = desc;
788 }
789
790
791 if (flags & DMA_PREP_INTERRUPT)
792 txx9dmac_desc_set_INTENT(ddev, prev);
793
794 desc_write_CHAR(dc, prev, 0);
795 dma_sync_single_for_device(chan2parent(&dc->chan),
796 prev->txd.phys, ddev->descsize,
797 DMA_TO_DEVICE);
798
799 first->txd.flags = flags;
800 first->len = len;
801
802 return &first->txd;
803}
804
805static struct dma_async_tx_descriptor *
806txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
807 unsigned int sg_len, enum dma_transfer_direction direction,
808 unsigned long flags, void *context)
809{
810 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
811 struct txx9dmac_dev *ddev = dc->ddev;
812 struct txx9dmac_slave *ds = chan->private;
813 struct txx9dmac_desc *prev;
814 struct txx9dmac_desc *first;
815 unsigned int i;
816 struct scatterlist *sg;
817
818 dev_vdbg(chan2dev(chan), "prep_dma_slave\n");
819
820 BUG_ON(!ds || !ds->reg_width);
821 if (ds->tx_reg)
822 BUG_ON(direction != DMA_MEM_TO_DEV);
823 else
824 BUG_ON(direction != DMA_DEV_TO_MEM);
825 if (unlikely(!sg_len))
826 return NULL;
827
828 prev = first = NULL;
829
830 for_each_sg(sgl, sg, sg_len, i) {
831 struct txx9dmac_desc *desc;
832 dma_addr_t mem;
833 u32 sai, dai;
834
835 desc = txx9dmac_desc_get(dc);
836 if (!desc) {
837 txx9dmac_desc_put(dc, first);
838 return NULL;
839 }
840
841 mem = sg_dma_address(sg);
842
843 if (__is_dmac64(ddev)) {
844 if (direction == DMA_MEM_TO_DEV) {
845 desc->hwdesc.SAR = mem;
846 desc->hwdesc.DAR = ds->tx_reg;
847 } else {
848 desc->hwdesc.SAR = ds->rx_reg;
849 desc->hwdesc.DAR = mem;
850 }
851 desc->hwdesc.CNTR = sg_dma_len(sg);
852 } else {
853 if (direction == DMA_MEM_TO_DEV) {
854 desc->hwdesc32.SAR = mem;
855 desc->hwdesc32.DAR = ds->tx_reg;
856 } else {
857 desc->hwdesc32.SAR = ds->rx_reg;
858 desc->hwdesc32.DAR = mem;
859 }
860 desc->hwdesc32.CNTR = sg_dma_len(sg);
861 }
862 if (direction == DMA_MEM_TO_DEV) {
863 sai = ds->reg_width;
864 dai = 0;
865 } else {
866 sai = 0;
867 dai = ds->reg_width;
868 }
869 txx9dmac_desc_set_nosimple(ddev, desc, sai, dai,
870 dc->ccr | TXX9_DMA_CCR_XFACT);
871
872 if (!first) {
873 first = desc;
874 } else {
875 desc_write_CHAR(dc, prev, desc->txd.phys);
876 dma_sync_single_for_device(chan2parent(&dc->chan),
877 prev->txd.phys,
878 ddev->descsize,
879 DMA_TO_DEVICE);
880 list_add_tail(&desc->desc_node, &first->tx_list);
881 }
882 prev = desc;
883 }
884
885
886 if (flags & DMA_PREP_INTERRUPT)
887 txx9dmac_desc_set_INTENT(ddev, prev);
888
889 desc_write_CHAR(dc, prev, 0);
890 dma_sync_single_for_device(chan2parent(&dc->chan),
891 prev->txd.phys, ddev->descsize,
892 DMA_TO_DEVICE);
893
894 first->txd.flags = flags;
895 first->len = 0;
896
897 return &first->txd;
898}
899
900static int txx9dmac_terminate_all(struct dma_chan *chan)
901{
902 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
903 struct txx9dmac_desc *desc, *_desc;
904 LIST_HEAD(list);
905
906 dev_vdbg(chan2dev(chan), "terminate_all\n");
907 spin_lock_bh(&dc->lock);
908
909 txx9dmac_reset_chan(dc);
910
911
912 list_splice_init(&dc->queue, &list);
913 list_splice_init(&dc->active_list, &list);
914
915 spin_unlock_bh(&dc->lock);
916
917
918 list_for_each_entry_safe(desc, _desc, &list, desc_node)
919 txx9dmac_descriptor_complete(dc, desc);
920
921 return 0;
922}
923
924static enum dma_status
925txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
926 struct dma_tx_state *txstate)
927{
928 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
929 enum dma_status ret;
930
931 ret = dma_cookie_status(chan, cookie, txstate);
932 if (ret == DMA_COMPLETE)
933 return DMA_COMPLETE;
934
935 spin_lock_bh(&dc->lock);
936 txx9dmac_scan_descriptors(dc);
937 spin_unlock_bh(&dc->lock);
938
939 return dma_cookie_status(chan, cookie, txstate);
940}
941
942static void txx9dmac_chain_dynamic(struct txx9dmac_chan *dc,
943 struct txx9dmac_desc *prev)
944{
945 struct txx9dmac_dev *ddev = dc->ddev;
946 struct txx9dmac_desc *desc;
947 LIST_HEAD(list);
948
949 prev = txx9dmac_last_child(prev);
950 txx9dmac_dequeue(dc, &list);
951 desc = list_entry(list.next, struct txx9dmac_desc, desc_node);
952 desc_write_CHAR(dc, prev, desc->txd.phys);
953 dma_sync_single_for_device(chan2parent(&dc->chan),
954 prev->txd.phys, ddev->descsize,
955 DMA_TO_DEVICE);
956 if (!(channel_readl(dc, CSR) & TXX9_DMA_CSR_CHNEN) &&
957 channel_read_CHAR(dc) == prev->txd.phys)
958
959 channel_write_CHAR(dc, desc->txd.phys);
960 list_splice_tail(&list, &dc->active_list);
961}
962
963static void txx9dmac_issue_pending(struct dma_chan *chan)
964{
965 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
966
967 spin_lock_bh(&dc->lock);
968
969 if (!list_empty(&dc->active_list))
970 txx9dmac_scan_descriptors(dc);
971 if (!list_empty(&dc->queue)) {
972 if (list_empty(&dc->active_list)) {
973 txx9dmac_dequeue(dc, &dc->active_list);
974 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
975 } else if (txx9_dma_have_SMPCHN()) {
976 struct txx9dmac_desc *prev = txx9dmac_last_active(dc);
977
978 if (!(prev->txd.flags & DMA_PREP_INTERRUPT) ||
979 txx9dmac_chan_INTENT(dc))
980 txx9dmac_chain_dynamic(dc, prev);
981 }
982 }
983
984 spin_unlock_bh(&dc->lock);
985}
986
987static int txx9dmac_alloc_chan_resources(struct dma_chan *chan)
988{
989 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
990 struct txx9dmac_slave *ds = chan->private;
991 struct txx9dmac_desc *desc;
992 int i;
993
994 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
995
996
997 if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) {
998 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
999 return -EIO;
1000 }
1001
1002 dma_cookie_init(chan);
1003
1004 dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE;
1005 txx9dmac_chan_set_SMPCHN(dc);
1006 if (!txx9_dma_have_SMPCHN() || (dc->ccr & TXX9_DMA_CCR_SMPCHN))
1007 dc->ccr |= TXX9_DMA_CCR_INTENC;
1008 if (chan->device->device_prep_dma_memcpy) {
1009 if (ds)
1010 return -EINVAL;
1011 dc->ccr |= TXX9_DMA_CCR_XFSZ_X8;
1012 } else {
1013 if (!ds ||
1014 (ds->tx_reg && ds->rx_reg) || (!ds->tx_reg && !ds->rx_reg))
1015 return -EINVAL;
1016 dc->ccr |= TXX9_DMA_CCR_EXTRQ |
1017 TXX9_DMA_CCR_XFSZ(__ffs(ds->reg_width));
1018 txx9dmac_chan_set_INTENT(dc);
1019 }
1020
1021 spin_lock_bh(&dc->lock);
1022 i = dc->descs_allocated;
1023 while (dc->descs_allocated < TXX9_DMA_INITIAL_DESC_COUNT) {
1024 spin_unlock_bh(&dc->lock);
1025
1026 desc = txx9dmac_desc_alloc(dc, GFP_KERNEL);
1027 if (!desc) {
1028 dev_info(chan2dev(chan),
1029 "only allocated %d descriptors\n", i);
1030 spin_lock_bh(&dc->lock);
1031 break;
1032 }
1033 txx9dmac_desc_put(dc, desc);
1034
1035 spin_lock_bh(&dc->lock);
1036 i = ++dc->descs_allocated;
1037 }
1038 spin_unlock_bh(&dc->lock);
1039
1040 dev_dbg(chan2dev(chan),
1041 "alloc_chan_resources allocated %d descriptors\n", i);
1042
1043 return i;
1044}
1045
1046static void txx9dmac_free_chan_resources(struct dma_chan *chan)
1047{
1048 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
1049 struct txx9dmac_dev *ddev = dc->ddev;
1050 struct txx9dmac_desc *desc, *_desc;
1051 LIST_HEAD(list);
1052
1053 dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
1054 dc->descs_allocated);
1055
1056
1057 BUG_ON(!list_empty(&dc->active_list));
1058 BUG_ON(!list_empty(&dc->queue));
1059 BUG_ON(channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT);
1060
1061 spin_lock_bh(&dc->lock);
1062 list_splice_init(&dc->free_list, &list);
1063 dc->descs_allocated = 0;
1064 spin_unlock_bh(&dc->lock);
1065
1066 list_for_each_entry_safe(desc, _desc, &list, desc_node) {
1067 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1068 dma_unmap_single(chan2parent(chan), desc->txd.phys,
1069 ddev->descsize, DMA_TO_DEVICE);
1070 kfree(desc);
1071 }
1072
1073 dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
1074}
1075
1076
1077
1078static void txx9dmac_off(struct txx9dmac_dev *ddev)
1079{
1080 dma_writel(ddev, MCR, 0);
1081}
1082
1083static int __init txx9dmac_chan_probe(struct platform_device *pdev)
1084{
1085 struct txx9dmac_chan_platform_data *cpdata =
1086 dev_get_platdata(&pdev->dev);
1087 struct platform_device *dmac_dev = cpdata->dmac_dev;
1088 struct txx9dmac_platform_data *pdata = dev_get_platdata(&dmac_dev->dev);
1089 struct txx9dmac_chan *dc;
1090 int err;
1091 int ch = pdev->id % TXX9_DMA_MAX_NR_CHANNELS;
1092 int irq;
1093
1094 dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL);
1095 if (!dc)
1096 return -ENOMEM;
1097
1098 dc->dma.dev = &pdev->dev;
1099 dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources;
1100 dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources;
1101 dc->dma.device_terminate_all = txx9dmac_terminate_all;
1102 dc->dma.device_tx_status = txx9dmac_tx_status;
1103 dc->dma.device_issue_pending = txx9dmac_issue_pending;
1104 if (pdata && pdata->memcpy_chan == ch) {
1105 dc->dma.device_prep_dma_memcpy = txx9dmac_prep_dma_memcpy;
1106 dma_cap_set(DMA_MEMCPY, dc->dma.cap_mask);
1107 } else {
1108 dc->dma.device_prep_slave_sg = txx9dmac_prep_slave_sg;
1109 dma_cap_set(DMA_SLAVE, dc->dma.cap_mask);
1110 dma_cap_set(DMA_PRIVATE, dc->dma.cap_mask);
1111 }
1112
1113 INIT_LIST_HEAD(&dc->dma.channels);
1114 dc->ddev = platform_get_drvdata(dmac_dev);
1115 if (dc->ddev->irq < 0) {
1116 irq = platform_get_irq(pdev, 0);
1117 if (irq < 0)
1118 return irq;
1119 tasklet_init(&dc->tasklet, txx9dmac_chan_tasklet,
1120 (unsigned long)dc);
1121 dc->irq = irq;
1122 err = devm_request_irq(&pdev->dev, dc->irq,
1123 txx9dmac_chan_interrupt, 0, dev_name(&pdev->dev), dc);
1124 if (err)
1125 return err;
1126 } else
1127 dc->irq = -1;
1128 dc->ddev->chan[ch] = dc;
1129 dc->chan.device = &dc->dma;
1130 list_add_tail(&dc->chan.device_node, &dc->chan.device->channels);
1131 dma_cookie_init(&dc->chan);
1132
1133 if (is_dmac64(dc))
1134 dc->ch_regs = &__txx9dmac_regs(dc->ddev)->CHAN[ch];
1135 else
1136 dc->ch_regs = &__txx9dmac_regs32(dc->ddev)->CHAN[ch];
1137 spin_lock_init(&dc->lock);
1138
1139 INIT_LIST_HEAD(&dc->active_list);
1140 INIT_LIST_HEAD(&dc->queue);
1141 INIT_LIST_HEAD(&dc->free_list);
1142
1143 txx9dmac_reset_chan(dc);
1144
1145 platform_set_drvdata(pdev, dc);
1146
1147 err = dma_async_device_register(&dc->dma);
1148 if (err)
1149 return err;
1150 dev_dbg(&pdev->dev, "TXx9 DMA Channel (dma%d%s%s)\n",
1151 dc->dma.dev_id,
1152 dma_has_cap(DMA_MEMCPY, dc->dma.cap_mask) ? " memcpy" : "",
1153 dma_has_cap(DMA_SLAVE, dc->dma.cap_mask) ? " slave" : "");
1154
1155 return 0;
1156}
1157
1158static int txx9dmac_chan_remove(struct platform_device *pdev)
1159{
1160 struct txx9dmac_chan *dc = platform_get_drvdata(pdev);
1161
1162
1163 dma_async_device_unregister(&dc->dma);
1164 if (dc->irq >= 0) {
1165 devm_free_irq(&pdev->dev, dc->irq, dc);
1166 tasklet_kill(&dc->tasklet);
1167 }
1168 dc->ddev->chan[pdev->id % TXX9_DMA_MAX_NR_CHANNELS] = NULL;
1169 return 0;
1170}
1171
1172static int __init txx9dmac_probe(struct platform_device *pdev)
1173{
1174 struct txx9dmac_platform_data *pdata = dev_get_platdata(&pdev->dev);
1175 struct resource *io;
1176 struct txx9dmac_dev *ddev;
1177 u32 mcr;
1178 int err;
1179
1180 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1181 if (!io)
1182 return -EINVAL;
1183
1184 ddev = devm_kzalloc(&pdev->dev, sizeof(*ddev), GFP_KERNEL);
1185 if (!ddev)
1186 return -ENOMEM;
1187
1188 if (!devm_request_mem_region(&pdev->dev, io->start, resource_size(io),
1189 dev_name(&pdev->dev)))
1190 return -EBUSY;
1191
1192 ddev->regs = devm_ioremap(&pdev->dev, io->start, resource_size(io));
1193 if (!ddev->regs)
1194 return -ENOMEM;
1195 ddev->have_64bit_regs = pdata->have_64bit_regs;
1196 if (__is_dmac64(ddev))
1197 ddev->descsize = sizeof(struct txx9dmac_hwdesc);
1198 else
1199 ddev->descsize = sizeof(struct txx9dmac_hwdesc32);
1200
1201
1202 txx9dmac_off(ddev);
1203
1204 ddev->irq = platform_get_irq(pdev, 0);
1205 if (ddev->irq >= 0) {
1206 tasklet_init(&ddev->tasklet, txx9dmac_tasklet,
1207 (unsigned long)ddev);
1208 err = devm_request_irq(&pdev->dev, ddev->irq,
1209 txx9dmac_interrupt, 0, dev_name(&pdev->dev), ddev);
1210 if (err)
1211 return err;
1212 }
1213
1214 mcr = TXX9_DMA_MCR_MSTEN | MCR_LE;
1215 if (pdata && pdata->memcpy_chan >= 0)
1216 mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan);
1217 dma_writel(ddev, MCR, mcr);
1218
1219 platform_set_drvdata(pdev, ddev);
1220 return 0;
1221}
1222
1223static int txx9dmac_remove(struct platform_device *pdev)
1224{
1225 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
1226
1227 txx9dmac_off(ddev);
1228 if (ddev->irq >= 0) {
1229 devm_free_irq(&pdev->dev, ddev->irq, ddev);
1230 tasklet_kill(&ddev->tasklet);
1231 }
1232 return 0;
1233}
1234
1235static void txx9dmac_shutdown(struct platform_device *pdev)
1236{
1237 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
1238
1239 txx9dmac_off(ddev);
1240}
1241
1242static int txx9dmac_suspend_noirq(struct device *dev)
1243{
1244 struct txx9dmac_dev *ddev = dev_get_drvdata(dev);
1245
1246 txx9dmac_off(ddev);
1247 return 0;
1248}
1249
1250static int txx9dmac_resume_noirq(struct device *dev)
1251{
1252 struct txx9dmac_dev *ddev = dev_get_drvdata(dev);
1253 struct txx9dmac_platform_data *pdata = dev_get_platdata(dev);
1254 u32 mcr;
1255
1256 mcr = TXX9_DMA_MCR_MSTEN | MCR_LE;
1257 if (pdata && pdata->memcpy_chan >= 0)
1258 mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan);
1259 dma_writel(ddev, MCR, mcr);
1260 return 0;
1261
1262}
1263
1264static const struct dev_pm_ops txx9dmac_dev_pm_ops = {
1265 .suspend_noirq = txx9dmac_suspend_noirq,
1266 .resume_noirq = txx9dmac_resume_noirq,
1267};
1268
1269static struct platform_driver txx9dmac_chan_driver = {
1270 .remove = txx9dmac_chan_remove,
1271 .driver = {
1272 .name = "txx9dmac-chan",
1273 },
1274};
1275
1276static struct platform_driver txx9dmac_driver = {
1277 .remove = txx9dmac_remove,
1278 .shutdown = txx9dmac_shutdown,
1279 .driver = {
1280 .name = "txx9dmac",
1281 .pm = &txx9dmac_dev_pm_ops,
1282 },
1283};
1284
1285static int __init txx9dmac_init(void)
1286{
1287 int rc;
1288
1289 rc = platform_driver_probe(&txx9dmac_driver, txx9dmac_probe);
1290 if (!rc) {
1291 rc = platform_driver_probe(&txx9dmac_chan_driver,
1292 txx9dmac_chan_probe);
1293 if (rc)
1294 platform_driver_unregister(&txx9dmac_driver);
1295 }
1296 return rc;
1297}
1298module_init(txx9dmac_init);
1299
1300static void __exit txx9dmac_exit(void)
1301{
1302 platform_driver_unregister(&txx9dmac_chan_driver);
1303 platform_driver_unregister(&txx9dmac_driver);
1304}
1305module_exit(txx9dmac_exit);
1306
1307MODULE_LICENSE("GPL");
1308MODULE_DESCRIPTION("TXx9 DMA Controller driver");
1309MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
1310MODULE_ALIAS("platform:txx9dmac");
1311MODULE_ALIAS("platform:txx9dmac-chan");
1312