1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/init.h>
21#include <linux/module.h>
22#include <linux/platform_device.h>
23#include <linux/interrupt.h>
24#include <linux/dmapool.h>
25#include <linux/slab.h>
26#include <linux/dma-mapping.h>
27#include <linux/pagemap.h>
28#include <linux/device.h>
29#include <linux/types.h>
30#include <linux/pm.h>
31#include <linux/fs.h>
32#include <linux/gfp.h>
33#include <linux/string.h>
34#include <linux/uaccess.h>
35#include <asm/cacheflush.h>
36#include <linux/sched.h>
37#include <linux/dma-buf.h>
38
39#include <linux/of.h>
40#include <linux/irq.h>
41#include <linux/of_irq.h>
42
43#include "xilinx-dma-apf.h"
44
45#include "xlnk.h"
46
47static DEFINE_MUTEX(dma_list_mutex);
48static LIST_HEAD(dma_device_list);
49
50#define DMA_OUT_64(addr, val) (writeq(val, addr))
51#define DMA_OUT(addr, val) (iowrite32(val, addr))
52#define DMA_IN(addr) (ioread32(addr))
53
54#define GET_LOW(x) ((u32)((x) & 0xFFFFFFFF))
55#define GET_HI(x) ((u32)((x) / 0x100000000))
56
57static int unpin_user_pages(struct scatterlist *sglist, unsigned int cnt);
58
59static void xdma_clean_bd(struct xdma_desc_hw *bd)
60{
61 bd->src_addr = 0x0;
62 bd->control = 0x0;
63 bd->status = 0x0;
64 bd->app[0] = 0x0;
65 bd->app[1] = 0x0;
66 bd->app[2] = 0x0;
67 bd->app[3] = 0x0;
68 bd->app[4] = 0x0;
69 bd->dmahead = 0x0;
70 bd->sw_flag = 0x0;
71}
72
73static int dma_is_running(struct xdma_chan *chan)
74{
75 return !(DMA_IN(&chan->regs->sr) & XDMA_SR_HALTED_MASK) &&
76 (DMA_IN(&chan->regs->cr) & XDMA_CR_RUNSTOP_MASK);
77}
78
79static int dma_is_idle(struct xdma_chan *chan)
80{
81 return DMA_IN(&chan->regs->sr) & XDMA_SR_IDLE_MASK;
82}
83
84static void dma_halt(struct xdma_chan *chan)
85{
86 DMA_OUT(&chan->regs->cr,
87 (DMA_IN(&chan->regs->cr) & ~XDMA_CR_RUNSTOP_MASK));
88}
89
90static void dma_start(struct xdma_chan *chan)
91{
92 DMA_OUT(&chan->regs->cr,
93 (DMA_IN(&chan->regs->cr) | XDMA_CR_RUNSTOP_MASK));
94}
95
96static int dma_init(struct xdma_chan *chan)
97{
98 int loop = XDMA_RESET_LOOP;
99
100 DMA_OUT(&chan->regs->cr,
101 (DMA_IN(&chan->regs->cr) | XDMA_CR_RESET_MASK));
102
103
104
105 while (loop) {
106 if (!(DMA_IN(&chan->regs->cr) & XDMA_CR_RESET_MASK))
107 break;
108
109 loop -= 1;
110 }
111
112 if (!loop)
113 return 1;
114
115 return 0;
116}
117
118static int xdma_alloc_chan_descriptors(struct xdma_chan *chan)
119{
120 int i;
121 u8 *ptr;
122
123
124
125
126
127 ptr = (u8 *)dma_alloc_coherent(chan->dev,
128 (sizeof(struct xdma_desc_hw) * XDMA_MAX_BD_CNT),
129 &chan->bd_phys_addr,
130 GFP_KERNEL);
131
132 if (!ptr) {
133 dev_err(chan->dev,
134 "unable to allocate channel %d descriptor pool\n",
135 chan->id);
136 return -ENOMEM;
137 }
138
139 memset(ptr, 0, (sizeof(struct xdma_desc_hw) * XDMA_MAX_BD_CNT));
140 chan->bd_cur = 0;
141 chan->bd_tail = 0;
142 chan->bd_used = 0;
143 chan->bd_chain_size = sizeof(struct xdma_desc_hw) * XDMA_MAX_BD_CNT;
144
145
146
147
148 for (i = 0; i < XDMA_MAX_BD_CNT; i++) {
149 chan->bds[i] = (struct xdma_desc_hw *)
150 (ptr + (sizeof(struct xdma_desc_hw) * i));
151 chan->bds[i]->next_desc = chan->bd_phys_addr +
152 (sizeof(struct xdma_desc_hw) *
153 ((i + 1) % XDMA_MAX_BD_CNT));
154 }
155
156
157 return 0;
158}
159
160static void xdma_free_chan_resources(struct xdma_chan *chan)
161{
162 dev_dbg(chan->dev, "Free all channel resources.\n");
163 dma_free_coherent(chan->dev, (sizeof(struct xdma_desc_hw) *
164 XDMA_MAX_BD_CNT), chan->bds[0], chan->bd_phys_addr);
165}
166
167static void xilinx_chan_desc_reinit(struct xdma_chan *chan)
168{
169 struct xdma_desc_hw *desc;
170 unsigned int start, end;
171 unsigned long flags;
172
173 spin_lock_irqsave(&chan->lock, flags);
174 start = 0;
175 end = XDMA_MAX_BD_CNT;
176
177 while (start < end) {
178 desc = chan->bds[start];
179 xdma_clean_bd(desc);
180 start++;
181 }
182
183 chan->bd_cur = 0;
184 chan->bd_tail = 0;
185 chan->bd_used = 0;
186 spin_unlock_irqrestore(&chan->lock, flags);
187}
188
189static void xilinx_chan_desc_cleanup(struct xdma_chan *chan)
190{
191 struct xdma_head *dmahead;
192 struct xdma_desc_hw *desc;
193 struct completion *cmp;
194 unsigned long flags;
195
196 spin_lock_irqsave(&chan->lock, flags);
197#define XDMA_BD_STS_RXEOF_MASK 0x04000000
198 desc = chan->bds[chan->bd_cur];
199 while (desc->status & XDMA_BD_STS_ALL_MASK) {
200 if ((desc->status & XDMA_BD_STS_RXEOF_MASK) &&
201 !(desc->dmahead)) {
202 pr_info("ERROR: premature EOF on DMA\n");
203 dma_init(chan);
204 while (!(desc->dmahead)) {
205 xdma_clean_bd(desc);
206 chan->bd_used--;
207 chan->bd_cur++;
208 if (chan->bd_cur >= XDMA_MAX_BD_CNT)
209 chan->bd_cur = 0;
210 desc = chan->bds[chan->bd_cur];
211 }
212 }
213 if (desc->dmahead) {
214 if ((desc->sw_flag & XDMA_BD_SF_POLL_MODE_MASK))
215 if (!(desc->sw_flag & XDMA_BD_SF_SW_DONE_MASK))
216 break;
217
218 dmahead = (struct xdma_head *)desc->dmahead;
219 cmp = (struct completion *)&dmahead->cmp;
220 if (dmahead->nappwords_o)
221 memcpy(dmahead->appwords_o, desc->app,
222 dmahead->nappwords_o * sizeof(u32));
223
224 if (chan->poll_mode)
225 cmp->done = 1;
226 else
227 complete(cmp);
228 }
229 xdma_clean_bd(desc);
230 chan->bd_used--;
231 chan->bd_cur++;
232 if (chan->bd_cur >= XDMA_MAX_BD_CNT)
233 chan->bd_cur = 0;
234 desc = chan->bds[chan->bd_cur];
235 }
236 spin_unlock_irqrestore(&chan->lock, flags);
237}
238
239static void xdma_err_tasklet(unsigned long data)
240{
241 struct xdma_chan *chan = (struct xdma_chan *)data;
242
243 if (chan->err) {
244
245
246
247 if (!dma_init(chan))
248 chan->err = 0;
249 else
250 dev_err(chan->dev, "DMA channel reset failed, please reset system\n");
251 }
252
253
254 rmb();
255 xilinx_chan_desc_cleanup(chan);
256
257 xilinx_chan_desc_reinit(chan);
258}
259
260static void xdma_tasklet(unsigned long data)
261{
262 struct xdma_chan *chan = (struct xdma_chan *)data;
263
264 xilinx_chan_desc_cleanup(chan);
265}
266
267static void dump_cur_bd(struct xdma_chan *chan)
268{
269 u32 index;
270
271 index = (((u32)DMA_IN(&chan->regs->cdr)) - chan->bd_phys_addr) /
272 sizeof(struct xdma_desc_hw);
273
274 dev_err(chan->dev, "cur bd @ %08x\n", (u32)DMA_IN(&chan->regs->cdr));
275 dev_err(chan->dev, " buf = %p\n",
276 (void *)chan->bds[index]->src_addr);
277 dev_err(chan->dev, " ctrl = 0x%08x\n", chan->bds[index]->control);
278 dev_err(chan->dev, " sts = 0x%08x\n", chan->bds[index]->status);
279 dev_err(chan->dev, " next = %p\n",
280 (void *)chan->bds[index]->next_desc);
281}
282
283static irqreturn_t xdma_rx_intr_handler(int irq, void *data)
284{
285 struct xdma_chan *chan = data;
286 u32 stat;
287
288 stat = DMA_IN(&chan->regs->sr);
289
290 if (!(stat & XDMA_XR_IRQ_ALL_MASK))
291 return IRQ_NONE;
292
293
294 DMA_OUT(&chan->regs->sr, (stat & XDMA_XR_IRQ_ALL_MASK));
295
296 if (stat & XDMA_XR_IRQ_ERROR_MASK) {
297 dev_err(chan->dev, "Channel %s has errors %x, cdr %x tdr %x\n",
298 chan->name, (unsigned int)stat,
299 (unsigned int)DMA_IN(&chan->regs->cdr),
300 (unsigned int)DMA_IN(&chan->regs->tdr));
301
302 dump_cur_bd(chan);
303
304 chan->err = 1;
305 tasklet_schedule(&chan->dma_err_tasklet);
306 }
307
308 if (!(chan->poll_mode) && ((stat & XDMA_XR_IRQ_DELAY_MASK) ||
309 (stat & XDMA_XR_IRQ_IOC_MASK)))
310 tasklet_schedule(&chan->tasklet);
311
312 return IRQ_HANDLED;
313}
314
315static irqreturn_t xdma_tx_intr_handler(int irq, void *data)
316{
317 struct xdma_chan *chan = data;
318 u32 stat;
319
320 stat = DMA_IN(&chan->regs->sr);
321
322 if (!(stat & XDMA_XR_IRQ_ALL_MASK))
323 return IRQ_NONE;
324
325
326 DMA_OUT(&chan->regs->sr, (stat & XDMA_XR_IRQ_ALL_MASK));
327
328 if (stat & XDMA_XR_IRQ_ERROR_MASK) {
329 dev_err(chan->dev, "Channel %s has errors %x, cdr %x tdr %x\n",
330 chan->name, (unsigned int)stat,
331 (unsigned int)DMA_IN(&chan->regs->cdr),
332 (unsigned int)DMA_IN(&chan->regs->tdr));
333
334 dump_cur_bd(chan);
335
336 chan->err = 1;
337 tasklet_schedule(&chan->dma_err_tasklet);
338 }
339
340 if (!(chan->poll_mode) && ((stat & XDMA_XR_IRQ_DELAY_MASK) ||
341 (stat & XDMA_XR_IRQ_IOC_MASK)))
342 tasklet_schedule(&chan->tasklet);
343
344 return IRQ_HANDLED;
345}
346
347static void xdma_start_transfer(struct xdma_chan *chan,
348 int start_index,
349 int end_index)
350{
351 xlnk_intptr_type cur_phys;
352 xlnk_intptr_type tail_phys;
353 u32 regval;
354
355 if (chan->err)
356 return;
357
358 cur_phys = chan->bd_phys_addr + (start_index *
359 sizeof(struct xdma_desc_hw));
360 tail_phys = chan->bd_phys_addr + (end_index *
361 sizeof(struct xdma_desc_hw));
362
363 if (dma_is_running(chan) || dma_is_idle(chan)) {
364#if XLNK_SYS_BIT_WIDTH == 32
365 DMA_OUT(&chan->regs->tdr, tail_phys);
366#else
367 DMA_OUT_64(&chan->regs->tdr, tail_phys);
368#endif
369 return;
370 }
371
372#if XLNK_SYS_BIT_WIDTH == 32
373 DMA_OUT(&chan->regs->cdr, cur_phys);
374#else
375 DMA_OUT_64(&chan->regs->cdr, cur_phys);
376#endif
377
378 dma_start(chan);
379
380
381 regval = DMA_IN(&chan->regs->cr);
382 regval |= (chan->poll_mode ? XDMA_XR_IRQ_ERROR_MASK
383 : XDMA_XR_IRQ_ALL_MASK);
384 DMA_OUT(&chan->regs->cr, regval);
385
386
387#if XLNK_SYS_BIT_WIDTH == 32
388 DMA_OUT(&chan->regs->tdr, tail_phys);
389#else
390 DMA_OUT_64(&chan->regs->tdr, tail_phys);
391#endif
392}
393
394static int xdma_setup_hw_desc(struct xdma_chan *chan,
395 struct xdma_head *dmahead,
396 struct scatterlist *sgl,
397 unsigned int sg_len,
398 enum dma_data_direction direction,
399 unsigned int nappwords_i,
400 u32 *appwords_i)
401{
402 struct xdma_desc_hw *bd = NULL;
403 size_t copy;
404 struct scatterlist *sg;
405 size_t sg_used;
406 dma_addr_t dma_src;
407 int i, start_index = -1, end_index1 = 0, end_index2 = -1;
408 int status;
409 unsigned long flags;
410 unsigned int bd_used_saved;
411
412 if (!chan) {
413 pr_err("Requested transfer on invalid channel\n");
414 return -ENODEV;
415 }
416
417
418 if ((chan->poll_mode) && (chan->bd_used >= XDMA_BD_CLEANUP_THRESHOLD))
419 xilinx_chan_desc_cleanup(chan);
420
421 spin_lock_irqsave(&chan->lock, flags);
422
423 bd_used_saved = chan->bd_used;
424
425
426
427 for_each_sg(sgl, sg, sg_len, i) {
428 sg_used = 0;
429
430
431 while (sg_used < sg_dma_len(sg)) {
432
433 bd = chan->bds[chan->bd_tail];
434 if ((bd->control) & (XDMA_BD_STS_ACTUAL_LEN_MASK)) {
435 end_index2 = chan->bd_tail;
436 status = -ENOMEM;
437
438
439
440
441 if (start_index == -1)
442 goto out_unlock;
443 else
444 goto out_clean;
445 }
446
447
448
449
450 copy = min((size_t)(sg_dma_len(sg) - sg_used),
451 (size_t)chan->max_len);
452
453
454
455 dma_src = sg_dma_address(sg) + sg_used;
456 bd->src_addr = dma_src;
457
458
459 bd->control = copy;
460
461
462
463
464
465
466
467 if (start_index == -1) {
468 start_index = chan->bd_tail;
469
470 if (nappwords_i)
471 memcpy(bd->app, appwords_i,
472 nappwords_i * sizeof(u32));
473
474 if (direction == DMA_TO_DEVICE)
475 bd->control |= XDMA_BD_SOP;
476 }
477
478 sg_used += copy;
479 end_index2 = chan->bd_tail;
480 chan->bd_tail++;
481 chan->bd_used++;
482 if (chan->bd_tail >= XDMA_MAX_BD_CNT) {
483 end_index1 = XDMA_MAX_BD_CNT;
484 chan->bd_tail = 0;
485 }
486 }
487 }
488
489 if (start_index == -1) {
490 status = -EINVAL;
491 goto out_unlock;
492 }
493
494 bd->dmahead = (xlnk_intptr_type)dmahead;
495 bd->sw_flag = chan->poll_mode ? XDMA_BD_SF_POLL_MODE_MASK : 0;
496 dmahead->last_bd_index = end_index2;
497
498 if (direction == DMA_TO_DEVICE)
499 bd->control |= XDMA_BD_EOP;
500
501
502 wmb();
503
504 xdma_start_transfer(chan, start_index, end_index2);
505
506 spin_unlock_irqrestore(&chan->lock, flags);
507 return 0;
508
509out_clean:
510 if (!end_index1) {
511 for (i = start_index; i < end_index2; i++)
512 xdma_clean_bd(chan->bds[i]);
513 } else {
514
515 for (i = start_index; i < end_index1; i++)
516 xdma_clean_bd(chan->bds[i]);
517
518 end_index1 = 0;
519 for (i = end_index1; i < end_index2; i++)
520 xdma_clean_bd(chan->bds[i]);
521 }
522
523 chan->bd_tail = start_index;
524 chan->bd_used = bd_used_saved;
525
526out_unlock:
527 spin_unlock_irqrestore(&chan->lock, flags);
528
529 return status;
530}
531
532
533
534
535
536static unsigned int phy_buf_to_sgl(xlnk_intptr_type phy_buf,
537 unsigned int phy_buf_len,
538 struct scatterlist *sgl)
539{
540 unsigned int sgl_cnt = 0;
541 struct scatterlist *sgl_head;
542 unsigned int dma_len;
543 unsigned int num_bd;
544
545 if (!phy_buf || !phy_buf_len) {
546 pr_err("phy_buf is NULL or phy_buf_len = 0\n");
547 return sgl_cnt;
548 }
549
550 num_bd = (phy_buf_len + (XDMA_MAX_TRANS_LEN - 1))
551 / XDMA_MAX_TRANS_LEN;
552 sgl_head = sgl;
553 sg_init_table(sgl, num_bd);
554
555 while (phy_buf_len > 0) {
556 xlnk_intptr_type page_id = phy_buf >> PAGE_SHIFT;
557 unsigned int offset = phy_buf - (page_id << PAGE_SHIFT);
558
559 sgl_cnt++;
560 if (sgl_cnt > XDMA_MAX_BD_CNT)
561 return 0;
562
563 dma_len = (phy_buf_len > XDMA_MAX_TRANS_LEN) ?
564 XDMA_MAX_TRANS_LEN : phy_buf_len;
565
566 sg_set_page(sgl_head, pfn_to_page(page_id), dma_len, offset);
567 sg_dma_address(sgl_head) = (dma_addr_t)phy_buf;
568 sg_dma_len(sgl_head) = dma_len;
569 sgl_head = sg_next(sgl_head);
570
571 phy_buf += dma_len;
572 phy_buf_len -= dma_len;
573 }
574
575 return sgl_cnt;
576}
577
578
579static unsigned int sgl_merge(struct scatterlist *sgl,
580 unsigned int sgl_len,
581 struct scatterlist *sgl_merged)
582{
583 struct scatterlist *sghead, *sgend, *sgnext, *sg_merged_head;
584 unsigned int sg_visited_cnt = 0, sg_merged_num = 0;
585 unsigned int dma_len = 0;
586
587 sg_init_table(sgl_merged, sgl_len);
588 sg_merged_head = sgl_merged;
589 sghead = sgl;
590
591 while (sghead && (sg_visited_cnt < sgl_len)) {
592 dma_len = sg_dma_len(sghead);
593 sgend = sghead;
594 sg_visited_cnt++;
595 sgnext = sg_next(sgend);
596
597 while (sgnext && (sg_visited_cnt < sgl_len)) {
598 if ((sg_dma_address(sgend) + sg_dma_len(sgend)) !=
599 sg_dma_address(sgnext))
600 break;
601
602 if (dma_len + sg_dma_len(sgnext) >= XDMA_MAX_TRANS_LEN)
603 break;
604
605 sgend = sgnext;
606 dma_len += sg_dma_len(sgend);
607 sg_visited_cnt++;
608 sgnext = sg_next(sgnext);
609 }
610
611 sg_merged_num++;
612 if (sg_merged_num > XDMA_MAX_BD_CNT)
613 return 0;
614
615 memcpy(sg_merged_head, sghead, sizeof(struct scatterlist));
616
617 sg_dma_len(sg_merged_head) = dma_len;
618
619 sg_merged_head = sg_next(sg_merged_head);
620 sghead = sg_next(sgend);
621 }
622
623 return sg_merged_num;
624}
625
626static int pin_user_pages(xlnk_intptr_type uaddr,
627 unsigned int ulen,
628 int write,
629 struct scatterlist **scatterpp,
630 unsigned int *cntp,
631 unsigned int user_flags)
632{
633 int status;
634 struct mm_struct *mm = current->mm;
635 unsigned int first_page;
636 unsigned int last_page;
637 unsigned int num_pages;
638 struct scatterlist *sglist;
639 struct page **mapped_pages;
640
641 unsigned int pgidx;
642 unsigned int pglen;
643 unsigned int pgoff;
644 unsigned int sublen;
645
646 first_page = uaddr / PAGE_SIZE;
647 last_page = (uaddr + ulen - 1) / PAGE_SIZE;
648 num_pages = last_page - first_page + 1;
649 mapped_pages = vmalloc(sizeof(*mapped_pages) * num_pages);
650 if (!mapped_pages)
651 return -ENOMEM;
652
653 down_read(&mm->mmap_sem);
654 status = get_user_pages(uaddr, num_pages,
655 (write ? FOLL_WRITE : 0) | FOLL_FORCE,
656 mapped_pages, NULL);
657 up_read(&mm->mmap_sem);
658
659 if (status == num_pages) {
660 sglist = kcalloc(num_pages,
661 sizeof(struct scatterlist),
662 GFP_KERNEL);
663 if (!sglist) {
664 pr_err("%s: kcalloc failed to create sg list\n",
665 __func__);
666 vfree(mapped_pages);
667 return -ENOMEM;
668 }
669 sg_init_table(sglist, num_pages);
670 sublen = 0;
671 for (pgidx = 0; pgidx < status; pgidx++) {
672 if (pgidx == 0 && num_pages != 1) {
673 pgoff = uaddr & (~PAGE_MASK);
674 pglen = PAGE_SIZE - pgoff;
675 } else if (pgidx == 0 && num_pages == 1) {
676 pgoff = uaddr & (~PAGE_MASK);
677 pglen = ulen;
678 } else if (pgidx == num_pages - 1) {
679 pgoff = 0;
680 pglen = ulen - sublen;
681 } else {
682 pgoff = 0;
683 pglen = PAGE_SIZE;
684 }
685
686 sublen += pglen;
687
688 sg_set_page(&sglist[pgidx],
689 mapped_pages[pgidx],
690 pglen, pgoff);
691
692 sg_dma_len(&sglist[pgidx]) = pglen;
693 }
694
695 *scatterpp = sglist;
696 *cntp = num_pages;
697
698 vfree(mapped_pages);
699 return 0;
700 }
701 pr_err("Failed to pin user pages\n");
702 for (pgidx = 0; pgidx < status; pgidx++)
703 put_page(mapped_pages[pgidx]);
704 vfree(mapped_pages);
705 return -ENOMEM;
706}
707
708static int unpin_user_pages(struct scatterlist *sglist, unsigned int cnt)
709{
710 struct page *pg;
711 unsigned int i;
712
713 if (!sglist)
714 return 0;
715
716 for (i = 0; i < cnt; i++) {
717 pg = sg_page(sglist + i);
718 if (pg)
719 put_page(pg);
720 }
721
722 kfree(sglist);
723 return 0;
724}
725
726struct xdma_chan *xdma_request_channel(char *name)
727{
728 int i;
729 struct xdma_device *device, *tmp;
730
731 list_for_each_entry_safe(device, tmp, &dma_device_list, node) {
732 for (i = 0; i < device->channel_count; i++) {
733 if (!strcmp(device->chan[i]->name, name))
734 return device->chan[i];
735 }
736 }
737 return NULL;
738}
739EXPORT_SYMBOL(xdma_request_channel);
740
741void xdma_release_channel(struct xdma_chan *chan)
742{ }
743EXPORT_SYMBOL(xdma_release_channel);
744
745void xdma_release_all_channels(void)
746{
747 int i;
748 struct xdma_device *device, *tmp;
749
750 list_for_each_entry_safe(device, tmp, &dma_device_list, node) {
751 for (i = 0; i < device->channel_count; i++) {
752 if (device->chan[i]->client_count) {
753 dma_halt(device->chan[i]);
754 xilinx_chan_desc_reinit(device->chan[i]);
755 pr_info("%s: chan %s freed\n",
756 __func__,
757 device->chan[i]->name);
758 }
759 }
760 }
761}
762EXPORT_SYMBOL(xdma_release_all_channels);
763
764static void xdma_release(struct device *dev)
765{
766}
767
768int xdma_submit(struct xdma_chan *chan,
769 xlnk_intptr_type userbuf,
770 void *kaddr,
771 unsigned int size,
772 unsigned int nappwords_i,
773 u32 *appwords_i,
774 unsigned int nappwords_o,
775 unsigned int user_flags,
776 struct xdma_head **dmaheadpp,
777 struct xlnk_dmabuf_reg *dp)
778{
779 struct xdma_head *dmahead;
780 struct scatterlist *pagelist = NULL;
781 struct scatterlist *sglist = NULL;
782 unsigned int pagecnt = 0;
783 unsigned int sgcnt = 0;
784 enum dma_data_direction dmadir;
785 int status;
786 unsigned long attrs = 0;
787
788 dmahead = kzalloc(sizeof(*dmahead), GFP_KERNEL);
789 if (!dmahead)
790 return -ENOMEM;
791
792 dmahead->chan = chan;
793 dmahead->userbuf = userbuf;
794 dmahead->size = size;
795 dmahead->dmadir = chan->direction;
796 dmahead->userflag = user_flags;
797 dmahead->dmabuf = dp;
798 dmadir = chan->direction;
799
800 if (!(user_flags & CF_FLAG_CACHE_FLUSH_INVALIDATE))
801 attrs |= DMA_ATTR_SKIP_CPU_SYNC;
802
803 if (dp) {
804 int i;
805 struct scatterlist *sg;
806 unsigned int remaining_size = size;
807
808 if (IS_ERR_OR_NULL(dp->dbuf_sg_table)) {
809 pr_err("%s dmabuf not mapped: %p\n",
810 __func__, dp->dbuf_sg_table);
811 return -EINVAL;
812 }
813 if (dp->dbuf_sg_table->nents == 0) {
814 pr_err("%s: cannot map a scatterlist with 0 entries\n",
815 __func__);
816 return -EINVAL;
817 }
818 sglist = kmalloc_array(dp->dbuf_sg_table->nents,
819 sizeof(*sglist),
820 GFP_KERNEL);
821 if (!sglist)
822 return -ENOMEM;
823
824 sg_init_table(sglist, dp->dbuf_sg_table->nents);
825 sgcnt = 0;
826 for_each_sg(dp->dbuf_sg_table->sgl,
827 sg,
828 dp->dbuf_sg_table->nents,
829 i) {
830 sg_set_page(sglist + i,
831 sg_page(sg),
832 sg_dma_len(sg),
833 sg->offset);
834 sg_dma_address(sglist + i) = sg_dma_address(sg);
835 if (remaining_size == 0) {
836 sg_dma_len(sglist + i) = 0;
837 } else if (sg_dma_len(sg) > remaining_size) {
838 sg_dma_len(sglist + i) = remaining_size;
839 sgcnt++;
840 } else {
841 sg_dma_len(sglist + i) = sg_dma_len(sg);
842 remaining_size -= sg_dma_len(sg);
843 sgcnt++;
844 }
845 }
846 dmahead->userbuf = (xlnk_intptr_type)sglist->dma_address;
847 pagelist = NULL;
848 pagecnt = 0;
849 } else if (user_flags & CF_FLAG_PHYSICALLY_CONTIGUOUS) {
850 size_t elem_cnt;
851
852 elem_cnt = DIV_ROUND_UP(size, XDMA_MAX_TRANS_LEN);
853 sglist = kmalloc_array(elem_cnt, sizeof(*sglist), GFP_KERNEL);
854 sgcnt = phy_buf_to_sgl(userbuf, size, sglist);
855 if (!sgcnt)
856 return -ENOMEM;
857
858 status = get_dma_ops(chan->dev)->map_sg(chan->dev,
859 sglist,
860 sgcnt,
861 dmadir,
862 attrs);
863
864 if (!status) {
865 pr_err("sg contiguous mapping failed\n");
866 return -ENOMEM;
867 }
868 pagelist = NULL;
869 pagecnt = 0;
870 } else {
871 status = pin_user_pages(userbuf,
872 size,
873 dmadir != DMA_TO_DEVICE,
874 &pagelist,
875 &pagecnt,
876 user_flags);
877 if (status < 0) {
878 pr_err("pin_user_pages failed\n");
879 return status;
880 }
881
882 status = get_dma_ops(chan->dev)->map_sg(chan->dev,
883 pagelist,
884 pagecnt,
885 dmadir,
886 attrs);
887 if (!status) {
888 pr_err("dma_map_sg failed\n");
889 unpin_user_pages(pagelist, pagecnt);
890 return -ENOMEM;
891 }
892
893 sglist = kmalloc_array(pagecnt, sizeof(*sglist), GFP_KERNEL);
894 if (sglist)
895 sgcnt = sgl_merge(pagelist, pagecnt, sglist);
896 if (!sgcnt) {
897 get_dma_ops(chan->dev)->unmap_sg(chan->dev,
898 pagelist,
899 pagecnt,
900 dmadir,
901 attrs);
902 unpin_user_pages(pagelist, pagecnt);
903 kfree(sglist);
904 return -ENOMEM;
905 }
906 }
907 dmahead->sglist = sglist;
908 dmahead->sgcnt = sgcnt;
909 dmahead->pagelist = pagelist;
910 dmahead->pagecnt = pagecnt;
911
912
913 init_completion(&dmahead->cmp);
914
915 if (nappwords_i > XDMA_MAX_APPWORDS)
916 nappwords_i = XDMA_MAX_APPWORDS;
917
918 if (nappwords_o > XDMA_MAX_APPWORDS)
919 nappwords_o = XDMA_MAX_APPWORDS;
920
921 dmahead->nappwords_o = nappwords_o;
922
923 status = xdma_setup_hw_desc(chan, dmahead, sglist, sgcnt,
924 dmadir, nappwords_i, appwords_i);
925 if (status) {
926 pr_err("setup hw desc failed\n");
927 if (dmahead->pagelist) {
928 get_dma_ops(chan->dev)->unmap_sg(chan->dev,
929 pagelist,
930 pagecnt,
931 dmadir,
932 attrs);
933 unpin_user_pages(pagelist, pagecnt);
934 } else if (!dp) {
935 get_dma_ops(chan->dev)->unmap_sg(chan->dev,
936 sglist,
937 sgcnt,
938 dmadir,
939 attrs);
940 }
941 kfree(dmahead->sglist);
942 return -ENOMEM;
943 }
944
945 *dmaheadpp = dmahead;
946 return 0;
947}
948EXPORT_SYMBOL(xdma_submit);
949
950int xdma_wait(struct xdma_head *dmahead,
951 unsigned int user_flags,
952 unsigned int *operating_flags)
953{
954 struct xdma_chan *chan = dmahead->chan;
955 unsigned long attrs = 0;
956
957 if (chan->poll_mode) {
958 xilinx_chan_desc_cleanup(chan);
959 *operating_flags |= XDMA_FLAGS_WAIT_COMPLETE;
960 } else {
961 if (*operating_flags & XDMA_FLAGS_TRYWAIT) {
962 if (!try_wait_for_completion(&dmahead->cmp))
963 return 0;
964 *operating_flags |= XDMA_FLAGS_WAIT_COMPLETE;
965 } else {
966 wait_for_completion(&dmahead->cmp);
967 *operating_flags |= XDMA_FLAGS_WAIT_COMPLETE;
968 }
969 }
970
971 if (!dmahead->dmabuf) {
972 if (!(user_flags & CF_FLAG_CACHE_FLUSH_INVALIDATE))
973 attrs |= DMA_ATTR_SKIP_CPU_SYNC;
974
975 if (user_flags & CF_FLAG_PHYSICALLY_CONTIGUOUS) {
976 get_dma_ops(chan->dev)->unmap_sg(chan->dev,
977 dmahead->sglist,
978 dmahead->sgcnt,
979 dmahead->dmadir,
980 attrs);
981 } else {
982 get_dma_ops(chan->dev)->unmap_sg(chan->dev,
983 dmahead->pagelist,
984 dmahead->pagecnt,
985 dmahead->dmadir,
986 attrs);
987 unpin_user_pages(dmahead->pagelist, dmahead->pagecnt);
988 }
989 }
990 kfree(dmahead->sglist);
991
992 return 0;
993}
994EXPORT_SYMBOL(xdma_wait);
995
996int xdma_getconfig(struct xdma_chan *chan,
997 unsigned char *irq_thresh,
998 unsigned char *irq_delay)
999{
1000 *irq_thresh = (DMA_IN(&chan->regs->cr) >> XDMA_COALESCE_SHIFT) & 0xff;
1001 *irq_delay = (DMA_IN(&chan->regs->cr) >> XDMA_DELAY_SHIFT) & 0xff;
1002 return 0;
1003}
1004EXPORT_SYMBOL(xdma_getconfig);
1005
1006int xdma_setconfig(struct xdma_chan *chan,
1007 unsigned char irq_thresh,
1008 unsigned char irq_delay)
1009{
1010 unsigned long val;
1011
1012 if (dma_is_running(chan))
1013 return -EBUSY;
1014
1015 val = DMA_IN(&chan->regs->cr);
1016 val &= ~((0xff << XDMA_COALESCE_SHIFT) |
1017 (0xff << XDMA_DELAY_SHIFT));
1018 val |= ((irq_thresh << XDMA_COALESCE_SHIFT) |
1019 (irq_delay << XDMA_DELAY_SHIFT));
1020
1021 DMA_OUT(&chan->regs->cr, val);
1022 return 0;
1023}
1024EXPORT_SYMBOL(xdma_setconfig);
1025
1026static const struct of_device_id gic_match[] = {
1027 { .compatible = "arm,cortex-a9-gic", },
1028 { .compatible = "arm,cortex-a15-gic", },
1029 { },
1030};
1031
1032static struct device_node *gic_node;
1033
1034unsigned int xlate_irq(unsigned int hwirq)
1035{
1036 struct of_phandle_args irq_data;
1037 unsigned int irq;
1038
1039 if (!gic_node)
1040 gic_node = of_find_matching_node(NULL, gic_match);
1041
1042 if (WARN_ON(!gic_node))
1043 return hwirq;
1044
1045 irq_data.np = gic_node;
1046 irq_data.args_count = 3;
1047 irq_data.args[0] = 0;
1048#if XLNK_SYS_BIT_WIDTH == 32
1049 irq_data.args[1] = hwirq - 32;
1050#else
1051 irq_data.args[1] = hwirq;
1052#endif
1053 irq_data.args[2] = IRQ_TYPE_LEVEL_HIGH;
1054
1055 irq = irq_create_of_mapping(&irq_data);
1056 if (WARN_ON(!irq))
1057 irq = hwirq;
1058
1059 pr_info("%s: hwirq %d, irq %d\n", __func__, hwirq, irq);
1060
1061 return irq;
1062}
1063
1064
1065
1066static int xdma_probe(struct platform_device *pdev)
1067{
1068 struct xdma_device *xdev;
1069 struct resource *res;
1070 int err, i, j;
1071 struct xdma_chan *chan;
1072 struct xdma_device_config *dma_config;
1073 int dma_chan_dir;
1074 int dma_chan_reg_offset;
1075
1076 pr_info("%s: probe dma %p, nres %d, id %d\n", __func__,
1077 &pdev->dev, pdev->num_resources, pdev->id);
1078
1079 xdev = devm_kzalloc(&pdev->dev, sizeof(struct xdma_device), GFP_KERNEL);
1080 if (!xdev)
1081 return -ENOMEM;
1082 xdev->dev = &pdev->dev;
1083
1084
1085 arch_setup_dma_ops(&pdev->dev, 0, 0, NULL, false);
1086 dma_set_mask(&pdev->dev, 0xFFFFFFFFFFFFFFFFull);
1087
1088 dma_config = (struct xdma_device_config *)xdev->dev->platform_data;
1089 if (dma_config->channel_count < 1 || dma_config->channel_count > 2)
1090 return -EFAULT;
1091
1092
1093 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1094 xdev->regs = devm_ioremap_resource(&pdev->dev, res);
1095 if (!xdev->regs) {
1096 dev_err(&pdev->dev, "unable to iomap registers\n");
1097 return -EFAULT;
1098 }
1099
1100 dev_info(&pdev->dev, "AXIDMA device %d physical base address=%pa\n",
1101 pdev->id, &res->start);
1102 dev_info(&pdev->dev, "AXIDMA device %d remapped to %pa\n",
1103 pdev->id, &xdev->regs);
1104
1105
1106
1107 dev_info(&pdev->dev, "has %d channel(s)\n", dma_config->channel_count);
1108 for (i = 0; i < dma_config->channel_count; i++) {
1109 chan = devm_kzalloc(&pdev->dev, sizeof(*chan), GFP_KERNEL);
1110 if (!chan)
1111 return -ENOMEM;
1112
1113 dma_chan_dir = strcmp(dma_config->channel_config[i].type,
1114 "axi-dma-mm2s-channel") ?
1115 DMA_FROM_DEVICE :
1116 DMA_TO_DEVICE;
1117 dma_chan_reg_offset = (dma_chan_dir == DMA_TO_DEVICE) ?
1118 0 :
1119 0x30;
1120
1121
1122 chan->id = i;
1123 chan->regs = xdev->regs + dma_chan_reg_offset;
1124
1125 chan->dev = xdev->dev;
1126 chan->max_len = XDMA_MAX_TRANS_LEN;
1127 chan->direction = dma_chan_dir;
1128 sprintf(chan->name, "%s:%d", dma_config->name, chan->id);
1129 pr_info(" chan %d name: %s\n", chan->id, chan->name);
1130 pr_info(" chan %d direction: %s\n", chan->id,
1131 dma_chan_dir == DMA_FROM_DEVICE ?
1132 "FROM_DEVICE" : "TO_DEVICE");
1133
1134 spin_lock_init(&chan->lock);
1135 tasklet_init(&chan->tasklet,
1136 xdma_tasklet,
1137 (unsigned long)chan);
1138 tasklet_init(&chan->dma_err_tasklet,
1139 xdma_err_tasklet,
1140 (unsigned long)chan);
1141
1142 xdev->chan[chan->id] = chan;
1143
1144
1145 chan->irq = xlate_irq(dma_config->channel_config[i].irq);
1146 if (chan->irq <= 0) {
1147 pr_err("get_resource for IRQ for dev %d failed\n",
1148 pdev->id);
1149 return -ENODEV;
1150 }
1151
1152 err = devm_request_irq(&pdev->dev,
1153 chan->irq,
1154 dma_chan_dir == DMA_TO_DEVICE ?
1155 xdma_tx_intr_handler :
1156 xdma_rx_intr_handler,
1157 IRQF_SHARED,
1158 pdev->name,
1159 chan);
1160 if (err) {
1161 dev_err(&pdev->dev, "unable to request IRQ\n");
1162 return err;
1163 }
1164 pr_info(" chan%d irq: %d\n", chan->id, chan->irq);
1165
1166 chan->poll_mode = dma_config->channel_config[i].poll_mode;
1167 pr_info(" chan%d poll mode: %s\n",
1168 chan->id,
1169 chan->poll_mode ? "on" : "off");
1170
1171
1172 err = xdma_alloc_chan_descriptors(xdev->chan[chan->id]);
1173 if (err) {
1174 dev_err(&pdev->dev, "unable to allocate BD's\n");
1175 return -ENOMEM;
1176 }
1177 pr_info(" chan%d bd ring @ 0x%p (size: 0x%x bytes)\n",
1178 chan->id,
1179 (void *)chan->bd_phys_addr,
1180 chan->bd_chain_size);
1181
1182 err = dma_init(xdev->chan[chan->id]);
1183 if (err) {
1184 dev_err(&pdev->dev, "DMA init failed\n");
1185
1186 for (j = 0; j <= i; j++)
1187 xdma_free_chan_resources(xdev->chan[j]);
1188 return -EIO;
1189 }
1190 }
1191 xdev->channel_count = dma_config->channel_count;
1192 pdev->dev.release = xdma_release;
1193
1194 mutex_lock(&dma_list_mutex);
1195 list_add_tail(&xdev->node, &dma_device_list);
1196 mutex_unlock(&dma_list_mutex);
1197
1198 platform_set_drvdata(pdev, xdev);
1199
1200 return 0;
1201}
1202
1203static int xdma_remove(struct platform_device *pdev)
1204{
1205 int i;
1206 struct xdma_device *xdev = platform_get_drvdata(pdev);
1207
1208
1209 mutex_lock(&dma_list_mutex);
1210 list_del(&xdev->node);
1211 mutex_unlock(&dma_list_mutex);
1212
1213 for (i = 0; i < XDMA_MAX_CHANS_PER_DEVICE; i++) {
1214 if (xdev->chan[i])
1215 xdma_free_chan_resources(xdev->chan[i]);
1216 }
1217
1218 return 0;
1219}
1220
1221static struct platform_driver xdma_driver = {
1222 .probe = xdma_probe,
1223 .remove = xdma_remove,
1224 .driver = {
1225 .name = "xilinx-axidma",
1226 },
1227};
1228
1229module_platform_driver(xdma_driver);
1230
1231MODULE_DESCRIPTION("Xilinx DMA driver");
1232MODULE_LICENSE("GPL");
1233