1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/init.h>
21#include <linux/module.h>
22#include <linux/slab.h>
23#include <linux/interrupt.h>
24#include <linux/dmaengine.h>
25#include <linux/delay.h>
26#include <linux/dma-mapping.h>
27#include <linux/platform_device.h>
28#include <linux/pm_runtime.h>
29#include <linux/sh_dma.h>
30#include <linux/notifier.h>
31#include <linux/kdebug.h>
32#include <linux/spinlock.h>
33#include <linux/rculist.h>
34#include "shdma.h"
35
36
37enum sh_dmae_desc_status {
38 DESC_IDLE,
39 DESC_PREPARED,
40 DESC_SUBMITTED,
41 DESC_COMPLETED,
42 DESC_WAITING,
43};
44
45#define NR_DESCS_PER_CHANNEL 32
46
47#define LOG2_DEFAULT_XFER_SIZE 2
48
49
50
51
52
53static DEFINE_SPINLOCK(sh_dmae_lock);
54static LIST_HEAD(sh_dmae_devices);
55
56
57static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
58
59static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
60
61static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
62{
63 __raw_writel(data, sh_dc->base + reg / sizeof(u32));
64}
65
66static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
67{
68 return __raw_readl(sh_dc->base + reg / sizeof(u32));
69}
70
71static u16 dmaor_read(struct sh_dmae_device *shdev)
72{
73 return __raw_readw(shdev->chan_reg + DMAOR / sizeof(u32));
74}
75
76static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
77{
78 __raw_writew(data, shdev->chan_reg + DMAOR / sizeof(u32));
79}
80
81
82
83
84
85
86static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
87{
88 unsigned short dmaor = dmaor_read(shdev);
89
90 dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
91}
92
93static int sh_dmae_rst(struct sh_dmae_device *shdev)
94{
95 unsigned short dmaor;
96
97 sh_dmae_ctl_stop(shdev);
98 dmaor = dmaor_read(shdev) | shdev->pdata->dmaor_init;
99
100 dmaor_write(shdev, dmaor);
101 if (dmaor_read(shdev) & (DMAOR_AE | DMAOR_NMIF)) {
102 pr_warning("dma-sh: Can't initialize DMAOR.\n");
103 return -EINVAL;
104 }
105 return 0;
106}
107
108static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
109{
110 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
111
112 if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
113 return true;
114
115 return false;
116}
117
118static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
119{
120 struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
121 struct sh_dmae_device, common);
122 struct sh_dmae_pdata *pdata = shdev->pdata;
123 int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
124 ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
125
126 if (cnt >= pdata->ts_shift_num)
127 cnt = 0;
128
129 return pdata->ts_shift[cnt];
130}
131
132static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
133{
134 struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
135 struct sh_dmae_device, common);
136 struct sh_dmae_pdata *pdata = shdev->pdata;
137 int i;
138
139 for (i = 0; i < pdata->ts_shift_num; i++)
140 if (pdata->ts_shift[i] == l2size)
141 break;
142
143 if (i == pdata->ts_shift_num)
144 i = 0;
145
146 return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
147 ((i << pdata->ts_high_shift) & pdata->ts_high_mask);
148}
149
150static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
151{
152 sh_dmae_writel(sh_chan, hw->sar, SAR);
153 sh_dmae_writel(sh_chan, hw->dar, DAR);
154 sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
155}
156
157static void dmae_start(struct sh_dmae_chan *sh_chan)
158{
159 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
160
161 chcr |= CHCR_DE | CHCR_IE;
162 sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR);
163}
164
165static void dmae_halt(struct sh_dmae_chan *sh_chan)
166{
167 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
168
169 chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
170 sh_dmae_writel(sh_chan, chcr, CHCR);
171}
172
173static void dmae_init(struct sh_dmae_chan *sh_chan)
174{
175
176
177
178
179 u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
180 LOG2_DEFAULT_XFER_SIZE);
181 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
182 sh_dmae_writel(sh_chan, chcr, CHCR);
183}
184
185static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
186{
187
188 if (dmae_is_busy(sh_chan))
189 return -EBUSY;
190
191 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
192 sh_dmae_writel(sh_chan, val, CHCR);
193
194 return 0;
195}
196
197static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
198{
199 struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
200 struct sh_dmae_device, common);
201 struct sh_dmae_pdata *pdata = shdev->pdata;
202 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
203 u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16);
204 int shift = chan_pdata->dmars_bit;
205
206 if (dmae_is_busy(sh_chan))
207 return -EBUSY;
208
209 __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
210 addr);
211
212 return 0;
213}
214
215static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
216{
217 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
218 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
219 dma_async_tx_callback callback = tx->callback;
220 dma_cookie_t cookie;
221
222 spin_lock_bh(&sh_chan->desc_lock);
223
224 cookie = sh_chan->common.cookie;
225 cookie++;
226 if (cookie < 0)
227 cookie = 1;
228
229 sh_chan->common.cookie = cookie;
230 tx->cookie = cookie;
231
232
233 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
234
235
236
237
238 if (chunk != desc && (chunk->mark == DESC_IDLE ||
239 chunk->async_tx.cookie > 0 ||
240 chunk->async_tx.cookie == -EBUSY ||
241 &chunk->node == &sh_chan->ld_free))
242 break;
243 chunk->mark = DESC_SUBMITTED;
244
245 chunk->async_tx.callback = NULL;
246 chunk->cookie = cookie;
247 list_move_tail(&chunk->node, &sh_chan->ld_queue);
248 last = chunk;
249 }
250
251 last->async_tx.callback = callback;
252 last->async_tx.callback_param = tx->callback_param;
253
254 dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n",
255 tx->cookie, &last->async_tx, sh_chan->id,
256 desc->hw.sar, desc->hw.tcr, desc->hw.dar);
257
258 spin_unlock_bh(&sh_chan->desc_lock);
259
260 return cookie;
261}
262
263
264static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
265{
266 struct sh_desc *desc;
267
268 list_for_each_entry(desc, &sh_chan->ld_free, node)
269 if (desc->mark != DESC_PREPARED) {
270 BUG_ON(desc->mark != DESC_IDLE);
271 list_del(&desc->node);
272 return desc;
273 }
274
275 return NULL;
276}
277
278static const struct sh_dmae_slave_config *sh_dmae_find_slave(
279 struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param)
280{
281 struct dma_device *dma_dev = sh_chan->common.device;
282 struct sh_dmae_device *shdev = container_of(dma_dev,
283 struct sh_dmae_device, common);
284 struct sh_dmae_pdata *pdata = shdev->pdata;
285 int i;
286
287 if (param->slave_id >= SH_DMA_SLAVE_NUMBER)
288 return NULL;
289
290 for (i = 0; i < pdata->slave_num; i++)
291 if (pdata->slave[i].slave_id == param->slave_id)
292 return pdata->slave + i;
293
294 return NULL;
295}
296
297static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
298{
299 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
300 struct sh_desc *desc;
301 struct sh_dmae_slave *param = chan->private;
302 int ret;
303
304 pm_runtime_get_sync(sh_chan->dev);
305
306
307
308
309
310 if (param) {
311 const struct sh_dmae_slave_config *cfg;
312
313 cfg = sh_dmae_find_slave(sh_chan, param);
314 if (!cfg) {
315 ret = -EINVAL;
316 goto efindslave;
317 }
318
319 if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) {
320 ret = -EBUSY;
321 goto etestused;
322 }
323
324 param->config = cfg;
325
326 dmae_set_dmars(sh_chan, cfg->mid_rid);
327 dmae_set_chcr(sh_chan, cfg->chcr);
328 } else if ((sh_dmae_readl(sh_chan, CHCR) & 0xf00) != 0x400) {
329 dmae_init(sh_chan);
330 }
331
332 spin_lock_bh(&sh_chan->desc_lock);
333 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
334 spin_unlock_bh(&sh_chan->desc_lock);
335 desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
336 if (!desc) {
337 spin_lock_bh(&sh_chan->desc_lock);
338 break;
339 }
340 dma_async_tx_descriptor_init(&desc->async_tx,
341 &sh_chan->common);
342 desc->async_tx.tx_submit = sh_dmae_tx_submit;
343 desc->mark = DESC_IDLE;
344
345 spin_lock_bh(&sh_chan->desc_lock);
346 list_add(&desc->node, &sh_chan->ld_free);
347 sh_chan->descs_allocated++;
348 }
349 spin_unlock_bh(&sh_chan->desc_lock);
350
351 if (!sh_chan->descs_allocated) {
352 ret = -ENOMEM;
353 goto edescalloc;
354 }
355
356 return sh_chan->descs_allocated;
357
358edescalloc:
359 if (param)
360 clear_bit(param->slave_id, sh_dmae_slave_used);
361etestused:
362efindslave:
363 pm_runtime_put(sh_chan->dev);
364 return ret;
365}
366
367
368
369
370static void sh_dmae_free_chan_resources(struct dma_chan *chan)
371{
372 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
373 struct sh_desc *desc, *_desc;
374 LIST_HEAD(list);
375 int descs = sh_chan->descs_allocated;
376
377 dmae_halt(sh_chan);
378
379
380 if (!list_empty(&sh_chan->ld_queue))
381 sh_dmae_chan_ld_cleanup(sh_chan, true);
382
383 if (chan->private) {
384
385 struct sh_dmae_slave *param = chan->private;
386 clear_bit(param->slave_id, sh_dmae_slave_used);
387 }
388
389 spin_lock_bh(&sh_chan->desc_lock);
390
391 list_splice_init(&sh_chan->ld_free, &list);
392 sh_chan->descs_allocated = 0;
393
394 spin_unlock_bh(&sh_chan->desc_lock);
395
396 if (descs > 0)
397 pm_runtime_put(sh_chan->dev);
398
399 list_for_each_entry_safe(desc, _desc, &list, node)
400 kfree(desc);
401}
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
419 unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
420 struct sh_desc **first, enum dma_data_direction direction)
421{
422 struct sh_desc *new;
423 size_t copy_size;
424
425 if (!*len)
426 return NULL;
427
428
429 new = sh_dmae_get_desc(sh_chan);
430 if (!new) {
431 dev_err(sh_chan->dev, "No free link descriptor available\n");
432 return NULL;
433 }
434
435 copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1);
436
437 new->hw.sar = *src;
438 new->hw.dar = *dest;
439 new->hw.tcr = copy_size;
440
441 if (!*first) {
442
443 new->async_tx.cookie = -EBUSY;
444 *first = new;
445 } else {
446
447 new->async_tx.cookie = -EINVAL;
448 }
449
450 dev_dbg(sh_chan->dev,
451 "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n",
452 copy_size, *len, *src, *dest, &new->async_tx,
453 new->async_tx.cookie, sh_chan->xmit_shift);
454
455 new->mark = DESC_PREPARED;
456 new->async_tx.flags = flags;
457 new->direction = direction;
458
459 *len -= copy_size;
460 if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
461 *src += copy_size;
462 if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE)
463 *dest += copy_size;
464
465 return new;
466}
467
468
469
470
471
472
473
474
475
476
477
478static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
479 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
480 enum dma_data_direction direction, unsigned long flags)
481{
482 struct scatterlist *sg;
483 struct sh_desc *first = NULL, *new = NULL ;
484 LIST_HEAD(tx_list);
485 int chunks = 0;
486 int i;
487
488 if (!sg_len)
489 return NULL;
490
491 for_each_sg(sgl, sg, sg_len, i)
492 chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) /
493 (SH_DMA_TCR_MAX + 1);
494
495
496 spin_lock_bh(&sh_chan->desc_lock);
497
498
499
500
501
502
503
504
505
506
507
508
509 for_each_sg(sgl, sg, sg_len, i) {
510 dma_addr_t sg_addr = sg_dma_address(sg);
511 size_t len = sg_dma_len(sg);
512
513 if (!len)
514 goto err_get_desc;
515
516 do {
517 dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
518 i, sg, len, (unsigned long long)sg_addr);
519
520 if (direction == DMA_FROM_DEVICE)
521 new = sh_dmae_add_desc(sh_chan, flags,
522 &sg_addr, addr, &len, &first,
523 direction);
524 else
525 new = sh_dmae_add_desc(sh_chan, flags,
526 addr, &sg_addr, &len, &first,
527 direction);
528 if (!new)
529 goto err_get_desc;
530
531 new->chunks = chunks--;
532 list_add_tail(&new->node, &tx_list);
533 } while (len);
534 }
535
536 if (new != first)
537 new->async_tx.cookie = -ENOSPC;
538
539
540 list_splice_tail(&tx_list, &sh_chan->ld_free);
541
542 spin_unlock_bh(&sh_chan->desc_lock);
543
544 return &first->async_tx;
545
546err_get_desc:
547 list_for_each_entry(new, &tx_list, node)
548 new->mark = DESC_IDLE;
549 list_splice(&tx_list, &sh_chan->ld_free);
550
551 spin_unlock_bh(&sh_chan->desc_lock);
552
553 return NULL;
554}
555
556static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
557 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
558 size_t len, unsigned long flags)
559{
560 struct sh_dmae_chan *sh_chan;
561 struct scatterlist sg;
562
563 if (!chan || !len)
564 return NULL;
565
566 chan->private = NULL;
567
568 sh_chan = to_sh_chan(chan);
569
570 sg_init_table(&sg, 1);
571 sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
572 offset_in_page(dma_src));
573 sg_dma_address(&sg) = dma_src;
574 sg_dma_len(&sg) = len;
575
576 return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL,
577 flags);
578}
579
580static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
581 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
582 enum dma_data_direction direction, unsigned long flags)
583{
584 struct sh_dmae_slave *param;
585 struct sh_dmae_chan *sh_chan;
586 dma_addr_t slave_addr;
587
588 if (!chan)
589 return NULL;
590
591 sh_chan = to_sh_chan(chan);
592 param = chan->private;
593
594
595 if (!param || !sg_len) {
596 dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n",
597 __func__, param, sg_len, param ? param->slave_id : -1);
598 return NULL;
599 }
600
601 slave_addr = param->config->addr;
602
603
604
605
606
607 return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr,
608 direction, flags);
609}
610
611static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
612 unsigned long arg)
613{
614 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
615
616
617 if (cmd != DMA_TERMINATE_ALL)
618 return -ENXIO;
619
620 if (!chan)
621 return -EINVAL;
622
623 dmae_halt(sh_chan);
624
625 spin_lock_bh(&sh_chan->desc_lock);
626 if (!list_empty(&sh_chan->ld_queue)) {
627
628 struct sh_desc *desc = list_entry(sh_chan->ld_queue.next,
629 struct sh_desc, node);
630 desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
631 sh_chan->xmit_shift;
632
633 }
634 spin_unlock_bh(&sh_chan->desc_lock);
635
636 sh_dmae_chan_ld_cleanup(sh_chan, true);
637
638 return 0;
639}
640
641static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
642{
643 struct sh_desc *desc, *_desc;
644
645 bool head_acked = false;
646 dma_cookie_t cookie = 0;
647 dma_async_tx_callback callback = NULL;
648 void *param = NULL;
649
650 spin_lock_bh(&sh_chan->desc_lock);
651 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
652 struct dma_async_tx_descriptor *tx = &desc->async_tx;
653
654 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
655 BUG_ON(desc->mark != DESC_SUBMITTED &&
656 desc->mark != DESC_COMPLETED &&
657 desc->mark != DESC_WAITING);
658
659
660
661
662
663
664 if (!all && desc->mark == DESC_SUBMITTED &&
665 desc->cookie != cookie)
666 break;
667
668 if (tx->cookie > 0)
669 cookie = tx->cookie;
670
671 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
672 if (sh_chan->completed_cookie != desc->cookie - 1)
673 dev_dbg(sh_chan->dev,
674 "Completing cookie %d, expected %d\n",
675 desc->cookie,
676 sh_chan->completed_cookie + 1);
677 sh_chan->completed_cookie = desc->cookie;
678 }
679
680
681 if (desc->mark == DESC_COMPLETED && tx->callback) {
682 desc->mark = DESC_WAITING;
683 callback = tx->callback;
684 param = tx->callback_param;
685 dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n",
686 tx->cookie, tx, sh_chan->id);
687 BUG_ON(desc->chunks != 1);
688 break;
689 }
690
691 if (tx->cookie > 0 || tx->cookie == -EBUSY) {
692 if (desc->mark == DESC_COMPLETED) {
693 BUG_ON(tx->cookie < 0);
694 desc->mark = DESC_WAITING;
695 }
696 head_acked = async_tx_test_ack(tx);
697 } else {
698 switch (desc->mark) {
699 case DESC_COMPLETED:
700 desc->mark = DESC_WAITING;
701
702 case DESC_WAITING:
703 if (head_acked)
704 async_tx_ack(&desc->async_tx);
705 }
706 }
707
708 dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n",
709 tx, tx->cookie);
710
711 if (((desc->mark == DESC_COMPLETED ||
712 desc->mark == DESC_WAITING) &&
713 async_tx_test_ack(&desc->async_tx)) || all) {
714
715 desc->mark = DESC_IDLE;
716 list_move(&desc->node, &sh_chan->ld_free);
717 }
718 }
719 spin_unlock_bh(&sh_chan->desc_lock);
720
721 if (callback)
722 callback(param);
723
724 return callback;
725}
726
727
728
729
730
731
732static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
733{
734 while (__ld_cleanup(sh_chan, all))
735 ;
736
737 if (all)
738
739 sh_chan->completed_cookie = sh_chan->common.cookie;
740}
741
742static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
743{
744 struct sh_desc *desc;
745
746 spin_lock_bh(&sh_chan->desc_lock);
747
748 if (dmae_is_busy(sh_chan)) {
749 spin_unlock_bh(&sh_chan->desc_lock);
750 return;
751 }
752
753
754 list_for_each_entry(desc, &sh_chan->ld_queue, node)
755 if (desc->mark == DESC_SUBMITTED) {
756 dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n",
757 desc->async_tx.cookie, sh_chan->id,
758 desc->hw.tcr, desc->hw.sar, desc->hw.dar);
759
760 dmae_set_reg(sh_chan, &desc->hw);
761 dmae_start(sh_chan);
762 break;
763 }
764
765 spin_unlock_bh(&sh_chan->desc_lock);
766}
767
768static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
769{
770 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
771 sh_chan_xfer_ld_queue(sh_chan);
772}
773
774static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
775 dma_cookie_t cookie,
776 struct dma_tx_state *txstate)
777{
778 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
779 dma_cookie_t last_used;
780 dma_cookie_t last_complete;
781 enum dma_status status;
782
783 sh_dmae_chan_ld_cleanup(sh_chan, false);
784
785 last_used = chan->cookie;
786 last_complete = sh_chan->completed_cookie;
787 BUG_ON(last_complete < 0);
788 dma_set_tx_state(txstate, last_complete, last_used, 0);
789
790 spin_lock_bh(&sh_chan->desc_lock);
791
792 status = dma_async_is_complete(cookie, last_complete, last_used);
793
794
795
796
797
798 if (status != DMA_SUCCESS) {
799 struct sh_desc *desc;
800 status = DMA_ERROR;
801 list_for_each_entry(desc, &sh_chan->ld_queue, node)
802 if (desc->cookie == cookie) {
803 status = DMA_IN_PROGRESS;
804 break;
805 }
806 }
807
808 spin_unlock_bh(&sh_chan->desc_lock);
809
810 return status;
811}
812
813static irqreturn_t sh_dmae_interrupt(int irq, void *data)
814{
815 irqreturn_t ret = IRQ_NONE;
816 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
817 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
818
819 if (chcr & CHCR_TE) {
820
821 dmae_halt(sh_chan);
822
823 ret = IRQ_HANDLED;
824 tasklet_schedule(&sh_chan->tasklet);
825 }
826
827 return ret;
828}
829
830static unsigned int sh_dmae_reset(struct sh_dmae_device *shdev)
831{
832 unsigned int handled = 0;
833 int i;
834
835
836 sh_dmae_ctl_stop(shdev);
837
838
839 for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) {
840 struct sh_dmae_chan *sh_chan = shdev->chan[i];
841 struct sh_desc *desc;
842
843 if (!sh_chan)
844 continue;
845
846
847 dmae_halt(sh_chan);
848
849
850 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
851 struct dma_async_tx_descriptor *tx = &desc->async_tx;
852 desc->mark = DESC_IDLE;
853 if (tx->callback)
854 tx->callback(tx->callback_param);
855 }
856
857 list_splice_init(&sh_chan->ld_queue, &sh_chan->ld_free);
858 handled++;
859 }
860
861 sh_dmae_rst(shdev);
862
863 return !!handled;
864}
865
866static irqreturn_t sh_dmae_err(int irq, void *data)
867{
868 return IRQ_RETVAL(sh_dmae_reset(data));
869}
870
871static void dmae_do_tasklet(unsigned long data)
872{
873 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
874 struct sh_desc *desc;
875 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
876 u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
877
878 spin_lock(&sh_chan->desc_lock);
879 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
880 if (desc->mark == DESC_SUBMITTED &&
881 ((desc->direction == DMA_FROM_DEVICE &&
882 (desc->hw.dar + desc->hw.tcr) == dar_buf) ||
883 (desc->hw.sar + desc->hw.tcr) == sar_buf)) {
884 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
885 desc->async_tx.cookie, &desc->async_tx,
886 desc->hw.dar);
887 desc->mark = DESC_COMPLETED;
888 break;
889 }
890 }
891 spin_unlock(&sh_chan->desc_lock);
892
893
894 sh_chan_xfer_ld_queue(sh_chan);
895 sh_dmae_chan_ld_cleanup(sh_chan, false);
896}
897
898static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
899{
900 unsigned int handled;
901
902
903 if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
904 return false;
905
906 handled = sh_dmae_reset(shdev);
907 if (handled)
908 return true;
909
910 return false;
911}
912
913static int sh_dmae_nmi_handler(struct notifier_block *self,
914 unsigned long cmd, void *data)
915{
916 struct sh_dmae_device *shdev;
917 int ret = NOTIFY_DONE;
918 bool triggered;
919
920
921
922
923
924
925
926 if (!in_nmi())
927 return NOTIFY_DONE;
928
929 rcu_read_lock();
930 list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
931
932
933
934
935
936 triggered = sh_dmae_nmi_notify(shdev);
937 if (triggered == true)
938 ret = NOTIFY_OK;
939 }
940 rcu_read_unlock();
941
942 return ret;
943}
944
945static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
946 .notifier_call = sh_dmae_nmi_handler,
947
948
949 .priority = 1,
950};
951
952static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
953 int irq, unsigned long flags)
954{
955 int err;
956 const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
957 struct platform_device *pdev = to_platform_device(shdev->common.dev);
958 struct sh_dmae_chan *new_sh_chan;
959
960
961 new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
962 if (!new_sh_chan) {
963 dev_err(shdev->common.dev,
964 "No free memory for allocating dma channels!\n");
965 return -ENOMEM;
966 }
967
968
969 new_sh_chan->common.device = &shdev->common;
970
971 new_sh_chan->dev = shdev->common.dev;
972 new_sh_chan->id = id;
973 new_sh_chan->irq = irq;
974 new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
975
976
977 tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
978 (unsigned long)new_sh_chan);
979
980
981 dmae_init(new_sh_chan);
982
983 spin_lock_init(&new_sh_chan->desc_lock);
984
985
986 INIT_LIST_HEAD(&new_sh_chan->ld_queue);
987 INIT_LIST_HEAD(&new_sh_chan->ld_free);
988
989
990 list_add_tail(&new_sh_chan->common.device_node,
991 &shdev->common.channels);
992 shdev->common.chancnt++;
993
994 if (pdev->id >= 0)
995 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
996 "sh-dmae%d.%d", pdev->id, new_sh_chan->id);
997 else
998 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
999 "sh-dma%d", new_sh_chan->id);
1000
1001
1002 err = request_irq(irq, &sh_dmae_interrupt, flags,
1003 new_sh_chan->dev_id, new_sh_chan);
1004 if (err) {
1005 dev_err(shdev->common.dev, "DMA channel %d request_irq error "
1006 "with return %d\n", id, err);
1007 goto err_no_irq;
1008 }
1009
1010 shdev->chan[id] = new_sh_chan;
1011 return 0;
1012
1013err_no_irq:
1014
1015 list_del(&new_sh_chan->common.device_node);
1016 kfree(new_sh_chan);
1017 return err;
1018}
1019
1020static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
1021{
1022 int i;
1023
1024 for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) {
1025 if (shdev->chan[i]) {
1026 struct sh_dmae_chan *sh_chan = shdev->chan[i];
1027
1028 free_irq(sh_chan->irq, sh_chan);
1029
1030 list_del(&sh_chan->common.device_node);
1031 kfree(sh_chan);
1032 shdev->chan[i] = NULL;
1033 }
1034 }
1035 shdev->common.chancnt = 0;
1036}
1037
1038static int __init sh_dmae_probe(struct platform_device *pdev)
1039{
1040 struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
1041 unsigned long irqflags = IRQF_DISABLED,
1042 chan_flag[SH_DMAC_MAX_CHANNELS] = {};
1043 unsigned long flags;
1044 int errirq, chan_irq[SH_DMAC_MAX_CHANNELS];
1045 int err, i, irq_cnt = 0, irqres = 0;
1046 struct sh_dmae_device *shdev;
1047 struct resource *chan, *dmars, *errirq_res, *chanirq_res;
1048
1049
1050 if (!pdata || !pdata->channel_num)
1051 return -ENODEV;
1052
1053 chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1054
1055 dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072 errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1073 if (!chan || !errirq_res)
1074 return -ENODEV;
1075
1076 if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
1077 dev_err(&pdev->dev, "DMAC register region already claimed\n");
1078 return -EBUSY;
1079 }
1080
1081 if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
1082 dev_err(&pdev->dev, "DMAC DMARS region already claimed\n");
1083 err = -EBUSY;
1084 goto ermrdmars;
1085 }
1086
1087 err = -ENOMEM;
1088 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
1089 if (!shdev) {
1090 dev_err(&pdev->dev, "Not enough memory\n");
1091 goto ealloc;
1092 }
1093
1094 shdev->chan_reg = ioremap(chan->start, resource_size(chan));
1095 if (!shdev->chan_reg)
1096 goto emapchan;
1097 if (dmars) {
1098 shdev->dmars = ioremap(dmars->start, resource_size(dmars));
1099 if (!shdev->dmars)
1100 goto emapdmars;
1101 }
1102
1103
1104 shdev->pdata = pdata;
1105
1106 pm_runtime_enable(&pdev->dev);
1107 pm_runtime_get_sync(&pdev->dev);
1108
1109 spin_lock_irqsave(&sh_dmae_lock, flags);
1110 list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
1111 spin_unlock_irqrestore(&sh_dmae_lock, flags);
1112
1113
1114 err = sh_dmae_rst(shdev);
1115 if (err)
1116 goto rst_err;
1117
1118 INIT_LIST_HEAD(&shdev->common.channels);
1119
1120 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
1121 if (dmars)
1122 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
1123
1124 shdev->common.device_alloc_chan_resources
1125 = sh_dmae_alloc_chan_resources;
1126 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
1127 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
1128 shdev->common.device_tx_status = sh_dmae_tx_status;
1129 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
1130
1131
1132 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
1133 shdev->common.device_control = sh_dmae_control;
1134
1135 shdev->common.dev = &pdev->dev;
1136
1137 shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
1138
1139#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
1140 chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1141
1142 if (!chanirq_res)
1143 chanirq_res = errirq_res;
1144 else
1145 irqres++;
1146
1147 if (chanirq_res == errirq_res ||
1148 (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
1149 irqflags = IRQF_SHARED;
1150
1151 errirq = errirq_res->start;
1152
1153 err = request_irq(errirq, sh_dmae_err, irqflags,
1154 "DMAC Address Error", shdev);
1155 if (err) {
1156 dev_err(&pdev->dev,
1157 "DMA failed requesting irq #%d, error %d\n",
1158 errirq, err);
1159 goto eirq_err;
1160 }
1161
1162#else
1163 chanirq_res = errirq_res;
1164#endif
1165
1166 if (chanirq_res->start == chanirq_res->end &&
1167 !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
1168
1169 for (; irq_cnt < pdata->channel_num; irq_cnt++) {
1170 chan_irq[irq_cnt] = chanirq_res->start;
1171 chan_flag[irq_cnt] = IRQF_SHARED;
1172 }
1173 } else {
1174 do {
1175 for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
1176 if ((errirq_res->flags & IORESOURCE_BITS) ==
1177 IORESOURCE_IRQ_SHAREABLE)
1178 chan_flag[irq_cnt] = IRQF_SHARED;
1179 else
1180 chan_flag[irq_cnt] = IRQF_DISABLED;
1181 dev_dbg(&pdev->dev,
1182 "Found IRQ %d for channel %d\n",
1183 i, irq_cnt);
1184 chan_irq[irq_cnt++] = i;
1185 }
1186 chanirq_res = platform_get_resource(pdev,
1187 IORESOURCE_IRQ, ++irqres);
1188 } while (irq_cnt < pdata->channel_num && chanirq_res);
1189 }
1190
1191 if (irq_cnt < pdata->channel_num)
1192 goto eirqres;
1193
1194
1195 for (i = 0; i < pdata->channel_num; i++) {
1196 err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
1197 if (err)
1198 goto chan_probe_err;
1199 }
1200
1201 pm_runtime_put(&pdev->dev);
1202
1203 platform_set_drvdata(pdev, shdev);
1204 dma_async_device_register(&shdev->common);
1205
1206 return err;
1207
1208chan_probe_err:
1209 sh_dmae_chan_remove(shdev);
1210eirqres:
1211#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
1212 free_irq(errirq, shdev);
1213eirq_err:
1214#endif
1215rst_err:
1216 spin_lock_irqsave(&sh_dmae_lock, flags);
1217 list_del_rcu(&shdev->node);
1218 spin_unlock_irqrestore(&sh_dmae_lock, flags);
1219
1220 pm_runtime_put(&pdev->dev);
1221 if (dmars)
1222 iounmap(shdev->dmars);
1223emapdmars:
1224 iounmap(shdev->chan_reg);
1225emapchan:
1226 kfree(shdev);
1227ealloc:
1228 if (dmars)
1229 release_mem_region(dmars->start, resource_size(dmars));
1230ermrdmars:
1231 release_mem_region(chan->start, resource_size(chan));
1232
1233 return err;
1234}
1235
1236static int __exit sh_dmae_remove(struct platform_device *pdev)
1237{
1238 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
1239 struct resource *res;
1240 unsigned long flags;
1241 int errirq = platform_get_irq(pdev, 0);
1242
1243 dma_async_device_unregister(&shdev->common);
1244
1245 if (errirq > 0)
1246 free_irq(errirq, shdev);
1247
1248 spin_lock_irqsave(&sh_dmae_lock, flags);
1249 list_del_rcu(&shdev->node);
1250 spin_unlock_irqrestore(&sh_dmae_lock, flags);
1251
1252
1253 sh_dmae_chan_remove(shdev);
1254
1255 pm_runtime_disable(&pdev->dev);
1256
1257 if (shdev->dmars)
1258 iounmap(shdev->dmars);
1259 iounmap(shdev->chan_reg);
1260
1261 kfree(shdev);
1262
1263 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1264 if (res)
1265 release_mem_region(res->start, resource_size(res));
1266 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1267 if (res)
1268 release_mem_region(res->start, resource_size(res));
1269
1270 return 0;
1271}
1272
1273static void sh_dmae_shutdown(struct platform_device *pdev)
1274{
1275 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
1276 sh_dmae_ctl_stop(shdev);
1277}
1278
1279static struct platform_driver sh_dmae_driver = {
1280 .remove = __exit_p(sh_dmae_remove),
1281 .shutdown = sh_dmae_shutdown,
1282 .driver = {
1283 .owner = THIS_MODULE,
1284 .name = "sh-dma-engine",
1285 },
1286};
1287
1288static int __init sh_dmae_init(void)
1289{
1290
1291 int err = register_die_notifier(&sh_dmae_nmi_notifier);
1292 if (err)
1293 return err;
1294
1295 return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
1296}
1297module_init(sh_dmae_init);
1298
1299static void __exit sh_dmae_exit(void)
1300{
1301 platform_driver_unregister(&sh_dmae_driver);
1302
1303 unregister_die_notifier(&sh_dmae_nmi_notifier);
1304}
1305module_exit(sh_dmae_exit);
1306
1307MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
1308MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
1309MODULE_LICENSE("GPL");
1310MODULE_ALIAS("platform:sh-dma-engine");
1311