1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/module.h>
20#include <linux/types.h>
21#include <linux/platform_device.h>
22#include <linux/pm.h>
23#include <linux/fs.h>
24#include <linux/slab.h>
25#include <linux/gfp.h>
26#include <linux/mm.h>
27#include <asm/cacheflush.h>
28#include <linux/io.h>
29#include <linux/dma-buf.h>
30
31#include <linux/string.h>
32
33#include <linux/uaccess.h>
34
35#include <linux/dmaengine.h>
36#include <linux/completion.h>
37#include <linux/wait.h>
38
39#include <linux/device.h>
40#include <linux/init.h>
41#include <linux/cdev.h>
42
43#include <linux/sched.h>
44#include <linux/pagemap.h>
45#include <linux/errno.h>
46#include <linux/dma-mapping.h>
47#include <linux/of.h>
48#include <linux/list.h>
49#include <linux/dma/xilinx_dma.h>
50#include <linux/uio_driver.h>
51#include <asm/cacheflush.h>
52#include <linux/semaphore.h>
53
54#include "xlnk-ioctl.h"
55#include "xlnk-sysdef.h"
56#include "xlnk.h"
57
58#ifdef CONFIG_XILINX_DMA_APF
59#include "xilinx-dma-apf.h"
60#endif
61
62#define DRIVER_NAME "xlnk"
63#define DRIVER_VERSION "0.2"
64
65static struct platform_device *xlnk_pdev;
66static struct device *xlnk_dev;
67
68static struct cdev xlnk_cdev;
69
70static struct class *xlnk_class;
71
72static s32 driver_major;
73
74static char *driver_name = DRIVER_NAME;
75
76static void *xlnk_dev_buf;
77static ssize_t xlnk_dev_size;
78static int xlnk_dev_vmas;
79
80#define XLNK_BUF_POOL_SIZE 4096
81static unsigned int xlnk_bufpool_size = XLNK_BUF_POOL_SIZE;
82static void *xlnk_bufpool[XLNK_BUF_POOL_SIZE];
83static void *xlnk_bufpool_alloc_point[XLNK_BUF_POOL_SIZE];
84static xlnk_intptr_type xlnk_userbuf[XLNK_BUF_POOL_SIZE];
85static int xlnk_buf_process[XLNK_BUF_POOL_SIZE];
86static dma_addr_t xlnk_phyaddr[XLNK_BUF_POOL_SIZE];
87static size_t xlnk_buflen[XLNK_BUF_POOL_SIZE];
88static unsigned int xlnk_bufcacheable[XLNK_BUF_POOL_SIZE];
89static spinlock_t xlnk_buf_lock;
90
91#define XLNK_IRQ_POOL_SIZE 256
92static struct xlnk_irq_control *xlnk_irq_set[XLNK_IRQ_POOL_SIZE];
93static spinlock_t xlnk_irq_lock;
94
95static int xlnk_open(struct inode *ip, struct file *filp);
96static int xlnk_release(struct inode *ip, struct file *filp);
97static long xlnk_ioctl(struct file *filp, unsigned int code,
98 unsigned long args);
99static ssize_t xlnk_read(struct file *filp, char __user *buf,
100 size_t count, loff_t *offp);
101static ssize_t xlnk_write(struct file *filp, const char __user *buf,
102 size_t count, loff_t *offp);
103static int xlnk_mmap(struct file *filp, struct vm_area_struct *vma);
104static void xlnk_vma_open(struct vm_area_struct *vma);
105static void xlnk_vma_close(struct vm_area_struct *vma);
106
107static int xlnk_init_bufpool(void);
108static void xlnk_init_irqpool(void);
109
110LIST_HEAD(xlnk_dmabuf_list);
111
112static int xlnk_shutdown(unsigned long buf);
113static int xlnk_recover_resource(unsigned long buf);
114
115static const struct file_operations xlnk_fops = {
116 .open = xlnk_open,
117 .release = xlnk_release,
118 .read = xlnk_read,
119 .write = xlnk_write,
120 .unlocked_ioctl = xlnk_ioctl,
121 .mmap = xlnk_mmap,
122};
123
124#define MAX_XLNK_DMAS 128
125
126struct xlnk_device_pack {
127 char name[64];
128 struct platform_device pdev;
129 struct resource res[8];
130 struct uio_info *io_ptr;
131 int refs;
132
133#ifdef CONFIG_XILINX_DMA_APF
134 struct xdma_channel_config dma_chan_cfg[4];
135 struct xdma_device_config dma_dev_cfg;
136#endif
137};
138
139static struct semaphore xlnk_devpack_sem;
140static struct xlnk_device_pack *xlnk_devpacks[MAX_XLNK_DMAS];
141static void xlnk_devpacks_init(void)
142{
143 unsigned int i;
144
145 sema_init(&xlnk_devpack_sem, 1);
146 for (i = 0; i < MAX_XLNK_DMAS; i++)
147 xlnk_devpacks[i] = NULL;
148}
149
150static struct xlnk_device_pack *xlnk_devpacks_alloc(void)
151{
152 unsigned int i;
153
154 for (i = 0; i < MAX_XLNK_DMAS; i++) {
155 if (!xlnk_devpacks[i]) {
156 struct xlnk_device_pack *ret;
157
158 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
159 ret->pdev.id = i;
160 xlnk_devpacks[i] = ret;
161
162 return ret;
163 }
164 }
165
166 return NULL;
167}
168
169static void xlnk_devpacks_delete(struct xlnk_device_pack *devpack)
170{
171 unsigned int i;
172
173 for (i = 0; i < MAX_XLNK_DMAS; i++)
174 if (xlnk_devpacks[i] == devpack)
175 xlnk_devpacks[i] = NULL;
176 kfree(devpack);
177}
178
179static struct xlnk_device_pack *xlnk_devpacks_find(xlnk_intptr_type base)
180{
181 unsigned int i;
182
183 for (i = 0; i < MAX_XLNK_DMAS; i++) {
184 if (xlnk_devpacks[i] &&
185 xlnk_devpacks[i]->res[0].start == base)
186 return xlnk_devpacks[i];
187 }
188 return NULL;
189}
190
191static void xlnk_devpacks_free(xlnk_intptr_type base)
192{
193 struct xlnk_device_pack *devpack;
194
195 down(&xlnk_devpack_sem);
196 devpack = xlnk_devpacks_find(base);
197 if (!devpack) {
198 up(&xlnk_devpack_sem);
199 return;
200 }
201 devpack->refs--;
202 if (devpack->refs) {
203 up(&xlnk_devpack_sem);
204 return;
205 }
206 platform_device_unregister(&devpack->pdev);
207 xlnk_devpacks_delete(devpack);
208 kfree(devpack);
209 up(&xlnk_devpack_sem);
210}
211
212static void xlnk_devpacks_free_all(void)
213{
214 struct xlnk_device_pack *devpack;
215 unsigned int i;
216
217 for (i = 0; i < MAX_XLNK_DMAS; i++) {
218 devpack = xlnk_devpacks[i];
219 if (devpack) {
220 if (devpack->io_ptr) {
221 uio_unregister_device(devpack->io_ptr);
222 kfree(devpack->io_ptr);
223 } else {
224 platform_device_unregister(&devpack->pdev);
225 }
226 xlnk_devpacks_delete(devpack);
227 kfree(devpack);
228 }
229 }
230}
231
232static int xlnk_probe(struct platform_device *pdev)
233{
234 int err;
235 dev_t dev = 0;
236
237 xlnk_dev_buf = NULL;
238 xlnk_dev_size = 0;
239 xlnk_dev_vmas = 0;
240
241
242 err = alloc_chrdev_region(&dev, 0, 1, driver_name);
243 if (err) {
244 dev_err(&pdev->dev, "%s: Can't get major %d\n",
245 __func__, driver_major);
246 goto err1;
247 }
248
249 cdev_init(&xlnk_cdev, &xlnk_fops);
250
251 xlnk_cdev.owner = THIS_MODULE;
252
253 err = cdev_add(&xlnk_cdev, dev, 1);
254
255 if (err) {
256 dev_err(&pdev->dev, "%s: Failed to add XLNK device\n",
257 __func__);
258 goto err3;
259 }
260
261
262 xlnk_class = class_create(THIS_MODULE, "xlnk");
263 if (IS_ERR(xlnk_class)) {
264 dev_err(xlnk_dev, "%s: Error creating xlnk class\n", __func__);
265 goto err3;
266 }
267
268 driver_major = MAJOR(dev);
269
270 dev_info(&pdev->dev, "Major %d\n", driver_major);
271
272 device_create(xlnk_class, NULL, MKDEV(driver_major, 0),
273 NULL, "xlnk");
274
275 err = xlnk_init_bufpool();
276 if (err) {
277 dev_err(&pdev->dev, "%s: Failed to allocate buffer pool\n",
278 __func__);
279 goto err3;
280 }
281
282 xlnk_init_irqpool();
283
284 dev_info(&pdev->dev, "%s driver loaded\n", DRIVER_NAME);
285
286 xlnk_pdev = pdev;
287 xlnk_dev = &pdev->dev;
288
289 if (xlnk_pdev)
290 dev_info(&pdev->dev, "xlnk_pdev is not null\n");
291 else
292 dev_info(&pdev->dev, "xlnk_pdev is null\n");
293
294 xlnk_devpacks_init();
295
296 return 0;
297err3:
298 cdev_del(&xlnk_cdev);
299 unregister_chrdev_region(dev, 1);
300err1:
301 return err;
302}
303
304static int xlnk_buf_findnull(void)
305{
306 int i;
307
308 for (i = 1; i < xlnk_bufpool_size; i++) {
309 if (!xlnk_bufpool[i])
310 return i;
311 }
312
313 return 0;
314}
315
316static int xlnk_buf_find_by_phys_addr(xlnk_intptr_type addr)
317{
318 int i;
319
320 for (i = 1; i < xlnk_bufpool_size; i++) {
321 if (xlnk_bufpool[i] &&
322 xlnk_phyaddr[i] <= addr &&
323 xlnk_phyaddr[i] + xlnk_buflen[i] > addr)
324 return i;
325 }
326
327 return 0;
328}
329
330static int xlnk_buf_find_by_user_addr(xlnk_intptr_type addr, int pid)
331{
332 int i;
333
334 for (i = 1; i < xlnk_bufpool_size; i++) {
335 if (xlnk_bufpool[i] &&
336 xlnk_buf_process[i] == pid &&
337 xlnk_userbuf[i] <= addr &&
338 xlnk_userbuf[i] + xlnk_buflen[i] > addr)
339 return i;
340 }
341
342 return 0;
343}
344
345
346
347
348
349static int xlnk_allocbuf(unsigned int len, unsigned int cacheable)
350{
351 int id;
352 void *kaddr;
353 dma_addr_t phys_addr_anchor;
354 unsigned long attrs;
355
356 attrs = cacheable ? DMA_ATTR_NON_CONSISTENT : 0;
357
358 kaddr = dma_alloc_attrs(xlnk_dev,
359 len,
360 &phys_addr_anchor,
361 GFP_KERNEL | GFP_DMA,
362 attrs);
363 if (!kaddr)
364 return -ENOMEM;
365
366 spin_lock(&xlnk_buf_lock);
367 id = xlnk_buf_findnull();
368 if (id > 0 && id < XLNK_BUF_POOL_SIZE) {
369 xlnk_bufpool_alloc_point[id] = kaddr;
370 xlnk_bufpool[id] = kaddr;
371 xlnk_buflen[id] = len;
372 xlnk_bufcacheable[id] = cacheable;
373 xlnk_phyaddr[id] = phys_addr_anchor;
374 }
375 spin_unlock(&xlnk_buf_lock);
376
377 if (id <= 0 || id >= XLNK_BUF_POOL_SIZE)
378 return -ENOMEM;
379
380 return id;
381}
382
383static int xlnk_init_bufpool(void)
384{
385 unsigned int i;
386
387 spin_lock_init(&xlnk_buf_lock);
388 xlnk_dev_buf = kmalloc(8192, GFP_KERNEL | GFP_DMA);
389 *((char *)xlnk_dev_buf) = '\0';
390
391 if (!xlnk_dev_buf) {
392 dev_err(xlnk_dev, "%s: malloc failed\n", __func__);
393 return -ENOMEM;
394 }
395
396 xlnk_bufpool[0] = xlnk_dev_buf;
397 for (i = 1; i < xlnk_bufpool_size; i++)
398 xlnk_bufpool[i] = NULL;
399
400 return 0;
401}
402
403static void xlnk_init_irqpool(void)
404{
405 int i;
406
407 spin_lock_init(&xlnk_irq_lock);
408 for (i = 0; i < XLNK_IRQ_POOL_SIZE; i++)
409 xlnk_irq_set[i] = NULL;
410}
411
412#define XLNK_SUSPEND NULL
413#define XLNK_RESUME NULL
414
415static int xlnk_remove(struct platform_device *pdev)
416{
417 dev_t devno;
418
419 kfree(xlnk_dev_buf);
420 xlnk_dev_buf = NULL;
421
422 devno = MKDEV(driver_major, 0);
423 cdev_del(&xlnk_cdev);
424 unregister_chrdev_region(devno, 1);
425 if (xlnk_class) {
426
427 device_destroy(xlnk_class, MKDEV(driver_major, 0));
428 class_destroy(xlnk_class);
429 }
430
431 xlnk_devpacks_free_all();
432
433 return 0;
434}
435
436static const struct of_device_id xlnk_match[] = {
437 { .compatible = "xlnx,xlnk-1.0", },
438 {}
439};
440MODULE_DEVICE_TABLE(of, xlnk_match);
441
442static struct platform_driver xlnk_driver = {
443 .driver = {
444 .name = DRIVER_NAME,
445 .of_match_table = xlnk_match,
446 },
447 .probe = xlnk_probe,
448 .remove = xlnk_remove,
449 .suspend = XLNK_SUSPEND,
450 .resume = XLNK_RESUME,
451};
452
453static u64 dma_mask = 0xFFFFFFFFFFFFFFFFull;
454
455
456
457
458
459static int xlnk_open(struct inode *ip, struct file *filp)
460{
461 if ((filp->f_flags & O_ACCMODE) == O_WRONLY)
462 xlnk_dev_size = 0;
463
464 return 0;
465}
466
467static ssize_t xlnk_read(struct file *filp,
468 char __user *buf,
469 size_t count,
470 loff_t *offp)
471{
472 ssize_t retval = 0;
473
474 if (*offp >= xlnk_dev_size)
475 goto out;
476
477 if (*offp + count > xlnk_dev_size)
478 count = xlnk_dev_size - *offp;
479
480 if (copy_to_user(buf, xlnk_dev_buf + *offp, count)) {
481 retval = -EFAULT;
482 goto out;
483 }
484 *offp += count;
485 retval = count;
486
487 out:
488 return retval;
489}
490
491static ssize_t xlnk_write(struct file *filp, const char __user *buf,
492 size_t count, loff_t *offp)
493{
494 ssize_t retval = 0;
495
496 if (copy_from_user(xlnk_dev_buf + *offp, buf, count)) {
497 retval = -EFAULT;
498 goto out;
499 }
500 *offp += count;
501 retval = count;
502
503 if (xlnk_dev_size < *offp)
504 xlnk_dev_size = *offp;
505
506 out:
507 return retval;
508}
509
510
511
512
513
514static int xlnk_release(struct inode *ip, struct file *filp)
515{
516 return 0;
517}
518
519static int xlnk_devregister(char *name,
520 unsigned int id,
521 xlnk_intptr_type base,
522 unsigned int size,
523 unsigned int *irqs,
524 xlnk_intptr_type *handle)
525{
526 unsigned int nres;
527 unsigned int nirq;
528 unsigned int *irqptr;
529 struct xlnk_device_pack *devpack;
530 unsigned int i;
531 int status;
532
533 down(&xlnk_devpack_sem);
534 devpack = xlnk_devpacks_find(base);
535 if (devpack) {
536 *handle = (xlnk_intptr_type)devpack;
537 devpack->refs++;
538 status = 0;
539 } else {
540 nirq = 0;
541 irqptr = irqs;
542
543 while (*irqptr) {
544 nirq++;
545 irqptr++;
546 }
547
548 if (nirq > 7) {
549 up(&xlnk_devpack_sem);
550 return -ENOMEM;
551 }
552
553 nres = nirq + 1;
554
555 devpack = xlnk_devpacks_alloc();
556 if (!devpack) {
557 up(&xlnk_devpack_sem);
558 pr_err("Failed to allocate device %s\n", name);
559 return -ENOMEM;
560 }
561 devpack->io_ptr = NULL;
562 strcpy(devpack->name, name);
563 devpack->pdev.name = devpack->name;
564
565 devpack->pdev.dev.dma_mask = &dma_mask;
566 devpack->pdev.dev.coherent_dma_mask = dma_mask;
567
568 devpack->res[0].start = base;
569 devpack->res[0].end = base + size - 1;
570 devpack->res[0].flags = IORESOURCE_MEM;
571
572 for (i = 0; i < nirq; i++) {
573 devpack->res[i + 1].start = irqs[i];
574 devpack->res[i + 1].end = irqs[i];
575 devpack->res[i + 1].flags = IORESOURCE_IRQ;
576 }
577
578 devpack->pdev.resource = devpack->res;
579 devpack->pdev.num_resources = nres;
580
581 status = platform_device_register(&devpack->pdev);
582 if (status) {
583 xlnk_devpacks_delete(devpack);
584 *handle = 0;
585 } else {
586 *handle = (xlnk_intptr_type)devpack;
587 }
588 }
589 up(&xlnk_devpack_sem);
590
591 return status;
592}
593
594static int xlnk_dmaregister(char *name,
595 unsigned int id,
596 xlnk_intptr_type base,
597 unsigned int size,
598 unsigned int chan_num,
599 unsigned int chan0_dir,
600 unsigned int chan0_irq,
601 unsigned int chan0_poll_mode,
602 unsigned int chan0_include_dre,
603 unsigned int chan0_data_width,
604 unsigned int chan1_dir,
605 unsigned int chan1_irq,
606 unsigned int chan1_poll_mode,
607 unsigned int chan1_include_dre,
608 unsigned int chan1_data_width,
609 xlnk_intptr_type *handle)
610{
611 int status = 0;
612
613#ifdef CONFIG_XILINX_DMA_APF
614
615 struct xlnk_device_pack *devpack;
616
617 if (chan_num < 1 || chan_num > 2) {
618 pr_err("%s: Expected either 1 or 2 channels, got %d\n",
619 __func__, chan_num);
620 return -EINVAL;
621 }
622
623 down(&xlnk_devpack_sem);
624 devpack = xlnk_devpacks_find(base);
625 if (devpack) {
626 *handle = (xlnk_intptr_type)devpack;
627 devpack->refs++;
628 status = 0;
629 } else {
630 devpack = xlnk_devpacks_alloc();
631 if (!devpack) {
632 up(&xlnk_devpack_sem);
633 return -ENOMEM;
634 }
635 strcpy(devpack->name, name);
636 devpack->pdev.name = "xilinx-axidma";
637
638 devpack->io_ptr = NULL;
639
640 devpack->dma_chan_cfg[0].include_dre = chan0_include_dre;
641 devpack->dma_chan_cfg[0].datawidth = chan0_data_width;
642 devpack->dma_chan_cfg[0].irq = chan0_irq;
643 devpack->dma_chan_cfg[0].poll_mode = chan0_poll_mode;
644 devpack->dma_chan_cfg[0].type =
645 (chan0_dir == XLNK_DMA_FROM_DEVICE) ?
646 "axi-dma-s2mm-channel" :
647 "axi-dma-mm2s-channel";
648
649 if (chan_num > 1) {
650 devpack->dma_chan_cfg[1].include_dre =
651 chan1_include_dre;
652 devpack->dma_chan_cfg[1].datawidth = chan1_data_width;
653 devpack->dma_chan_cfg[1].irq = chan1_irq;
654 devpack->dma_chan_cfg[1].poll_mode = chan1_poll_mode;
655 devpack->dma_chan_cfg[1].type =
656 (chan1_dir == XLNK_DMA_FROM_DEVICE) ?
657 "axi-dma-s2mm-channel" :
658 "axi-dma-mm2s-channel";
659 }
660
661 devpack->dma_dev_cfg.name = devpack->name;
662 devpack->dma_dev_cfg.type = "axi-dma";
663 devpack->dma_dev_cfg.include_sg = 1;
664 devpack->dma_dev_cfg.sg_include_stscntrl_strm = 1;
665 devpack->dma_dev_cfg.channel_count = chan_num;
666 devpack->dma_dev_cfg.channel_config = &devpack->dma_chan_cfg[0];
667
668 devpack->pdev.dev.platform_data = &devpack->dma_dev_cfg;
669
670 devpack->pdev.dev.dma_mask = &dma_mask;
671 devpack->pdev.dev.coherent_dma_mask = dma_mask;
672
673 devpack->res[0].start = base;
674 devpack->res[0].end = base + size - 1;
675 devpack->res[0].flags = IORESOURCE_MEM;
676
677 devpack->pdev.resource = devpack->res;
678 devpack->pdev.num_resources = 1;
679 status = platform_device_register(&devpack->pdev);
680 if (status) {
681 xlnk_devpacks_delete(devpack);
682 *handle = 0;
683 } else {
684 *handle = (xlnk_intptr_type)devpack;
685 }
686 }
687 up(&xlnk_devpack_sem);
688
689#endif
690 return status;
691}
692
693static int xlnk_allocbuf_ioctl(struct file *filp,
694 unsigned int code,
695 unsigned long args)
696{
697 union xlnk_args temp_args;
698 int status;
699 xlnk_int_type id;
700
701 status = copy_from_user(&temp_args, (void __user *)args,
702 sizeof(union xlnk_args));
703
704 if (status)
705 return -ENOMEM;
706
707 id = xlnk_allocbuf(temp_args.allocbuf.len,
708 temp_args.allocbuf.cacheable);
709
710 if (id <= 0)
711 return -ENOMEM;
712
713 temp_args.allocbuf.id = id;
714 temp_args.allocbuf.phyaddr = (xlnk_intptr_type)(xlnk_phyaddr[id]);
715 status = copy_to_user((void __user *)args,
716 &temp_args,
717 sizeof(union xlnk_args));
718
719 return status;
720}
721
722static int xlnk_freebuf(int id)
723{
724 void *alloc_point;
725 dma_addr_t p_addr;
726 size_t buf_len;
727 int cacheable;
728 unsigned long attrs;
729
730 if (id <= 0 || id >= xlnk_bufpool_size)
731 return -ENOMEM;
732
733 if (!xlnk_bufpool[id])
734 return -ENOMEM;
735
736 spin_lock(&xlnk_buf_lock);
737 alloc_point = xlnk_bufpool_alloc_point[id];
738 p_addr = xlnk_phyaddr[id];
739 buf_len = xlnk_buflen[id];
740 xlnk_bufpool[id] = NULL;
741 xlnk_phyaddr[id] = (dma_addr_t)NULL;
742 xlnk_buflen[id] = 0;
743 cacheable = xlnk_bufcacheable[id];
744 xlnk_bufcacheable[id] = 0;
745 spin_unlock(&xlnk_buf_lock);
746
747 attrs = cacheable ? DMA_ATTR_NON_CONSISTENT : 0;
748
749 dma_free_attrs(xlnk_dev,
750 buf_len,
751 alloc_point,
752 p_addr,
753 attrs);
754
755 return 0;
756}
757
758static void xlnk_free_all_buf(void)
759{
760 int i;
761
762 for (i = 1; i < xlnk_bufpool_size; i++)
763 xlnk_freebuf(i);
764}
765
766static int xlnk_freebuf_ioctl(struct file *filp,
767 unsigned int code,
768 unsigned long args)
769{
770 union xlnk_args temp_args;
771 int status;
772 int id;
773
774 status = copy_from_user(&temp_args, (void __user *)args,
775 sizeof(union xlnk_args));
776
777 if (status)
778 return -ENOMEM;
779
780 id = temp_args.freebuf.id;
781 return xlnk_freebuf(id);
782}
783
784static int xlnk_adddmabuf_ioctl(struct file *filp,
785 unsigned int code,
786 unsigned long args)
787{
788 union xlnk_args temp_args;
789 struct xlnk_dmabuf_reg *db;
790 int status;
791
792 status = copy_from_user(&temp_args, (void __user *)args,
793 sizeof(union xlnk_args));
794
795 if (status)
796 return -ENOMEM;
797
798 spin_lock(&xlnk_buf_lock);
799 list_for_each_entry(db, &xlnk_dmabuf_list, list) {
800 if (db->user_vaddr == temp_args.dmasubmit.buf) {
801 pr_err("Attempting to register DMA-BUF for addr %llx that is already registered\n",
802 (unsigned long long)temp_args.dmabuf.user_addr);
803 spin_unlock(&xlnk_buf_lock);
804 return -EINVAL;
805 }
806 }
807 spin_unlock(&xlnk_buf_lock);
808
809 db = kzalloc(sizeof(*db), GFP_KERNEL);
810 if (!db)
811 return -ENOMEM;
812
813 db->dmabuf_fd = temp_args.dmabuf.dmabuf_fd;
814 db->user_vaddr = temp_args.dmabuf.user_addr;
815 db->dbuf = dma_buf_get(db->dmabuf_fd);
816 db->dbuf_attach = dma_buf_attach(db->dbuf, xlnk_dev);
817 if (IS_ERR(db->dbuf_attach)) {
818 dma_buf_put(db->dbuf);
819 pr_err("Failed DMA-BUF attach\n");
820 return -EINVAL;
821 }
822
823 db->dbuf_sg_table = dma_buf_map_attachment(db->dbuf_attach,
824 DMA_BIDIRECTIONAL);
825
826 if (!db->dbuf_sg_table) {
827 pr_err("Failed DMA-BUF map_attachment\n");
828 dma_buf_detach(db->dbuf, db->dbuf_attach);
829 dma_buf_put(db->dbuf);
830 return -EINVAL;
831 }
832
833 spin_lock(&xlnk_buf_lock);
834 INIT_LIST_HEAD(&db->list);
835 list_add_tail(&db->list, &xlnk_dmabuf_list);
836 spin_unlock(&xlnk_buf_lock);
837
838 return 0;
839}
840
841static int xlnk_cleardmabuf_ioctl(struct file *filp,
842 unsigned int code,
843 unsigned long args)
844{
845 union xlnk_args temp_args;
846 struct xlnk_dmabuf_reg *dp, *dp_temp;
847 int status;
848
849 status = copy_from_user(&temp_args, (void __user *)args,
850 sizeof(union xlnk_args));
851
852 if (status)
853 return -ENOMEM;
854
855 spin_lock(&xlnk_buf_lock);
856 list_for_each_entry_safe(dp, dp_temp, &xlnk_dmabuf_list, list) {
857 if (dp->user_vaddr == temp_args.dmabuf.user_addr) {
858 dma_buf_unmap_attachment(dp->dbuf_attach,
859 dp->dbuf_sg_table,
860 DMA_BIDIRECTIONAL);
861 dma_buf_detach(dp->dbuf, dp->dbuf_attach);
862 dma_buf_put(dp->dbuf);
863 list_del(&dp->list);
864 spin_unlock(&xlnk_buf_lock);
865 kfree(dp);
866 return 0;
867 }
868 }
869 spin_unlock(&xlnk_buf_lock);
870 pr_err("Attempting to unregister a DMA-BUF that was not registered at addr %llx\n",
871 (unsigned long long)temp_args.dmabuf.user_addr);
872
873 return 1;
874}
875
876static int xlnk_dmarequest_ioctl(struct file *filp, unsigned int code,
877 unsigned long args)
878{
879#ifdef CONFIG_XILINX_DMA_APF
880 union xlnk_args temp_args;
881 int status;
882 struct xdma_chan *chan;
883
884 status = copy_from_user(&temp_args, (void __user *)args,
885 sizeof(union xlnk_args));
886
887 if (status)
888 return -ENOMEM;
889
890 if (!temp_args.dmarequest.name[0])
891 return 0;
892
893 down(&xlnk_devpack_sem);
894 chan = xdma_request_channel(temp_args.dmarequest.name);
895 up(&xlnk_devpack_sem);
896 if (!chan)
897 return -ENOMEM;
898 temp_args.dmarequest.dmachan = (xlnk_intptr_type)chan;
899 temp_args.dmarequest.bd_space_phys_addr = chan->bd_phys_addr;
900 temp_args.dmarequest.bd_space_size = chan->bd_chain_size;
901
902 if (copy_to_user((void __user *)args,
903 &temp_args,
904 sizeof(union xlnk_args)))
905 return -EFAULT;
906
907 return 0;
908#else
909 return -1;
910#endif
911}
912
913static int xlnk_dmasubmit_ioctl(struct file *filp, unsigned int code,
914 unsigned long args)
915{
916#ifdef CONFIG_XILINX_DMA_APF
917 union xlnk_args temp_args;
918 struct xdma_head *dmahead;
919 struct xlnk_dmabuf_reg *dp, *cp = NULL;
920 int buf_id;
921 void *kaddr = NULL;
922 int status = -1;
923
924 status = copy_from_user(&temp_args, (void __user *)args,
925 sizeof(union xlnk_args));
926
927 if (status)
928 return -ENOMEM;
929
930 if (!temp_args.dmasubmit.dmachan)
931 return -ENODEV;
932
933 spin_lock(&xlnk_buf_lock);
934 buf_id = xlnk_buf_find_by_phys_addr(temp_args.dmasubmit.buf);
935 if (buf_id) {
936 xlnk_intptr_type addr_delta =
937 temp_args.dmasubmit.buf -
938 xlnk_phyaddr[buf_id];
939 kaddr = (u8 *)(xlnk_bufpool[buf_id]) + addr_delta;
940 } else {
941 list_for_each_entry(dp, &xlnk_dmabuf_list, list) {
942 if (dp->user_vaddr == temp_args.dmasubmit.buf) {
943 cp = dp;
944 break;
945 }
946 }
947 }
948 spin_unlock(&xlnk_buf_lock);
949
950 status = xdma_submit((struct xdma_chan *)
951 (temp_args.dmasubmit.dmachan),
952 temp_args.dmasubmit.buf,
953 kaddr,
954 temp_args.dmasubmit.len,
955 temp_args.dmasubmit.nappwords_i,
956 temp_args.dmasubmit.appwords_i,
957 temp_args.dmasubmit.nappwords_o,
958 temp_args.dmasubmit.flag,
959 &dmahead,
960 cp);
961
962 temp_args.dmasubmit.dmahandle = (xlnk_intptr_type)dmahead;
963 temp_args.dmasubmit.last_bd_index =
964 (xlnk_intptr_type)dmahead->last_bd_index;
965
966 if (!status) {
967 if (copy_to_user((void __user *)args,
968 &temp_args,
969 sizeof(union xlnk_args)))
970 return -EFAULT;
971 }
972 return status;
973#endif
974 return -ENOMEM;
975}
976
977static int xlnk_dmawait_ioctl(struct file *filp,
978 unsigned int code,
979 unsigned long args)
980{
981 int status = -1;
982#ifdef CONFIG_XILINX_DMA_APF
983 union xlnk_args temp_args;
984 struct xdma_head *dmahead;
985
986 status = copy_from_user(&temp_args, (void __user *)args,
987 sizeof(union xlnk_args));
988
989 if (status)
990 return -ENOMEM;
991
992 dmahead = (struct xdma_head *)temp_args.dmawait.dmahandle;
993 status = xdma_wait(dmahead,
994 dmahead->userflag,
995 &temp_args.dmawait.flags);
996 if (temp_args.dmawait.flags & XDMA_FLAGS_WAIT_COMPLETE) {
997 if (temp_args.dmawait.nappwords) {
998 memcpy(temp_args.dmawait.appwords,
999 dmahead->appwords_o,
1000 dmahead->nappwords_o * sizeof(u32));
1001 }
1002 kfree(dmahead);
1003 }
1004 if (copy_to_user((void __user *)args,
1005 &temp_args,
1006 sizeof(union xlnk_args)))
1007 return -EFAULT;
1008#endif
1009
1010 return status;
1011}
1012
1013static int xlnk_dmarelease_ioctl(struct file *filp, unsigned int code,
1014 unsigned long args)
1015{
1016 int status = -1;
1017#ifdef CONFIG_XILINX_DMA_APF
1018 union xlnk_args temp_args;
1019
1020 status = copy_from_user(&temp_args, (void __user *)args,
1021 sizeof(union xlnk_args));
1022
1023 if (status)
1024 return -ENOMEM;
1025 down(&xlnk_devpack_sem);
1026 xdma_release_channel((struct xdma_chan *)
1027 (temp_args.dmarelease.dmachan));
1028 up(&xlnk_devpack_sem);
1029#endif
1030
1031 return status;
1032}
1033
1034static int xlnk_devregister_ioctl(struct file *filp, unsigned int code,
1035 unsigned long args)
1036{
1037 union xlnk_args temp_args;
1038 int status;
1039 xlnk_intptr_type handle;
1040
1041 status = copy_from_user(&temp_args, (void __user *)args,
1042 sizeof(union xlnk_args));
1043
1044 if (status)
1045 return -ENOMEM;
1046
1047 status = xlnk_devregister(temp_args.devregister.name,
1048 temp_args.devregister.id,
1049 temp_args.devregister.base,
1050 temp_args.devregister.size,
1051 temp_args.devregister.irqs,
1052 &handle);
1053
1054 return status;
1055}
1056
1057static int xlnk_dmaregister_ioctl(struct file *filp, unsigned int code,
1058 unsigned long args)
1059{
1060 union xlnk_args temp_args;
1061 int status;
1062 xlnk_intptr_type handle;
1063
1064 status = copy_from_user(&temp_args, (void __user *)args,
1065 sizeof(union xlnk_args));
1066
1067 if (status)
1068 return -ENOMEM;
1069
1070 status = xlnk_dmaregister(temp_args.dmaregister.name,
1071 temp_args.dmaregister.id,
1072 temp_args.dmaregister.base,
1073 temp_args.dmaregister.size,
1074 temp_args.dmaregister.chan_num,
1075 temp_args.dmaregister.chan0_dir,
1076 temp_args.dmaregister.chan0_irq,
1077 temp_args.dmaregister.chan0_poll_mode,
1078 temp_args.dmaregister.chan0_include_dre,
1079 temp_args.dmaregister.chan0_data_width,
1080 temp_args.dmaregister.chan1_dir,
1081 temp_args.dmaregister.chan1_irq,
1082 temp_args.dmaregister.chan1_poll_mode,
1083 temp_args.dmaregister.chan1_include_dre,
1084 temp_args.dmaregister.chan1_data_width,
1085 &handle);
1086
1087 return status;
1088}
1089
1090static int xlnk_devunregister_ioctl(struct file *filp,
1091 unsigned int code,
1092 unsigned long args)
1093{
1094 union xlnk_args temp_args;
1095 int status;
1096
1097 status = copy_from_user(&temp_args, (void __user *)args,
1098 sizeof(union xlnk_args));
1099
1100 if (status)
1101 return -ENOMEM;
1102
1103 xlnk_devpacks_free(temp_args.devunregister.base);
1104
1105 return 0;
1106}
1107
1108static irqreturn_t xlnk_accel_isr(int irq, void *arg)
1109{
1110 struct xlnk_irq_control *irq_control = (struct xlnk_irq_control *)arg;
1111
1112 disable_irq_nosync(irq);
1113 complete(&irq_control->cmp);
1114
1115 return IRQ_HANDLED;
1116}
1117
1118static int xlnk_irq_register_ioctl(struct file *filp, unsigned int code,
1119 unsigned long args)
1120{
1121 union xlnk_args temp_args;
1122 int status;
1123 int i;
1124 struct xlnk_irq_control *ctrl;
1125 int irq_id = -1;
1126 int irq_entry_new = 0;
1127
1128 status = copy_from_user(&temp_args,
1129 (void __user *)args,
1130 sizeof(temp_args.irqregister));
1131 if (status)
1132 return -ENOMEM;
1133
1134 if (temp_args.irqregister.type !=
1135 (XLNK_IRQ_LEVEL | XLNK_IRQ_ACTIVE_HIGH)) {
1136 dev_err(xlnk_dev, "Unsupported interrupt type %x\n",
1137 temp_args.irqregister.type);
1138 return -EINVAL;
1139 }
1140
1141 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
1142 if (!ctrl)
1143 return -ENOMEM;
1144
1145 ctrl->irq = xlate_irq(temp_args.irqregister.irq);
1146 ctrl->enabled = 0;
1147 init_completion(&ctrl->cmp);
1148
1149 spin_lock(&xlnk_irq_lock);
1150 for (i = 0; i < XLNK_IRQ_POOL_SIZE; i++) {
1151 if (!xlnk_irq_set[i] && irq_id == -1) {
1152 irq_entry_new = 1;
1153 irq_id = i;
1154 xlnk_irq_set[i] = ctrl;
1155 } else if (xlnk_irq_set[i] &&
1156 xlnk_irq_set[i]->irq == ctrl->irq) {
1157 irq_id = i;
1158 break;
1159 }
1160 }
1161 spin_unlock(&xlnk_irq_lock);
1162
1163 if (irq_id == -1) {
1164 kfree(ctrl);
1165 return -ENOMEM;
1166 }
1167
1168 if (!irq_entry_new) {
1169 kfree(ctrl);
1170 } else {
1171 status = request_irq(ctrl->irq,
1172 xlnk_accel_isr,
1173 IRQF_SHARED,
1174 "xlnk",
1175 ctrl);
1176 if (status) {
1177 enable_irq(ctrl->irq);
1178 xlnk_irq_set[irq_id] = NULL;
1179 kfree(ctrl);
1180 return -EINVAL;
1181 }
1182 disable_irq_nosync(ctrl->irq);
1183 }
1184
1185 temp_args.irqregister.irq_id = irq_id;
1186
1187 status = copy_to_user((void __user *)args,
1188 &temp_args,
1189 sizeof(temp_args.irqregister));
1190
1191 return status;
1192}
1193
1194static int xlnk_irq_unregister_ioctl(struct file *filp, unsigned int code,
1195 unsigned long args)
1196{
1197 union xlnk_args temp_args;
1198 int status;
1199 int irq_id;
1200 struct xlnk_irq_control *ctrl;
1201
1202 status = copy_from_user(&temp_args,
1203 (void __user *)args,
1204 sizeof(union xlnk_args));
1205 if (status)
1206 return -ENOMEM;
1207
1208 irq_id = temp_args.irqunregister.irq_id;
1209 if (irq_id < 0 || irq_id >= XLNK_IRQ_POOL_SIZE)
1210 return -EINVAL;
1211
1212 ctrl = xlnk_irq_set[irq_id];
1213 if (!ctrl)
1214 return -EINVAL;
1215
1216 xlnk_irq_set[irq_id] = NULL;
1217
1218 if (ctrl->enabled) {
1219 disable_irq_nosync(ctrl->irq);
1220 complete(&ctrl->cmp);
1221 }
1222 free_irq(ctrl->irq, ctrl);
1223 kfree(ctrl);
1224
1225 return 0;
1226}
1227
1228static int xlnk_irq_wait_ioctl(struct file *filp, unsigned int code,
1229 unsigned long args)
1230{
1231 union xlnk_args temp_args;
1232 int status;
1233 int irq_id;
1234 struct xlnk_irq_control *ctrl;
1235
1236 status = copy_from_user(&temp_args,
1237 (void __user *)args,
1238 sizeof(temp_args.irqwait));
1239 if (status)
1240 return -ENOMEM;
1241
1242 irq_id = temp_args.irqwait.irq_id;
1243 if (irq_id < 0 || irq_id >= XLNK_IRQ_POOL_SIZE)
1244 return -EINVAL;
1245
1246 ctrl = xlnk_irq_set[irq_id];
1247 if (!ctrl)
1248 return -EINVAL;
1249
1250 if (!ctrl->enabled) {
1251 ctrl->enabled = 1;
1252 enable_irq(ctrl->irq);
1253 }
1254
1255 if (temp_args.irqwait.polling) {
1256 if (!try_wait_for_completion(&ctrl->cmp))
1257 temp_args.irqwait.success = 0;
1258 else
1259 temp_args.irqwait.success = 1;
1260 } else {
1261 wait_for_completion(&ctrl->cmp);
1262 temp_args.irqwait.success = 1;
1263 }
1264
1265 if (temp_args.irqwait.success) {
1266 reinit_completion(&ctrl->cmp);
1267 ctrl->enabled = 0;
1268 }
1269
1270 status = copy_to_user((void __user *)args,
1271 &temp_args,
1272 sizeof(temp_args.irqwait));
1273
1274 return status;
1275}
1276
1277static int xlnk_cachecontrol_ioctl(struct file *filp, unsigned int code,
1278 unsigned long args)
1279{
1280 union xlnk_args temp_args;
1281 int status, size;
1282 void *kaddr;
1283 xlnk_intptr_type paddr;
1284 int buf_id;
1285
1286 status = copy_from_user(&temp_args,
1287 (void __user *)args,
1288 sizeof(union xlnk_args));
1289
1290 if (status) {
1291 dev_err(xlnk_dev, "Error in copy_from_user. status = %d\n",
1292 status);
1293 return -ENOMEM;
1294 }
1295
1296 if (!(temp_args.cachecontrol.action == 0 ||
1297 temp_args.cachecontrol.action == 1)) {
1298 dev_err(xlnk_dev, "Illegal action specified to cachecontrol_ioctl: %d\n",
1299 temp_args.cachecontrol.action);
1300 return -EINVAL;
1301 }
1302
1303 size = temp_args.cachecontrol.size;
1304 paddr = temp_args.cachecontrol.phys_addr;
1305
1306 spin_lock(&xlnk_buf_lock);
1307 buf_id = xlnk_buf_find_by_phys_addr(paddr);
1308 kaddr = xlnk_bufpool[buf_id];
1309 spin_unlock(&xlnk_buf_lock);
1310
1311 if (buf_id == 0) {
1312 pr_err("Illegal cachecontrol on non-sds_alloc memory");
1313 return -EINVAL;
1314 }
1315
1316#if XLNK_SYS_BIT_WIDTH == 32
1317 __cpuc_flush_dcache_area(kaddr, size);
1318 outer_flush_range(paddr, paddr + size);
1319 if (temp_args.cachecontrol.action == 1)
1320 outer_inv_range(paddr, paddr + size);
1321#else
1322 if (temp_args.cachecontrol.action == 1)
1323 __dma_map_area(kaddr, size, DMA_FROM_DEVICE);
1324 else
1325 __dma_map_area(kaddr, size, DMA_TO_DEVICE);
1326#endif
1327 return 0;
1328}
1329
1330static int xlnk_memop_ioctl(struct file *filp, unsigned long arg_addr)
1331{
1332 union xlnk_args args;
1333 xlnk_intptr_type p_addr = 0;
1334 int status = 0;
1335 int buf_id;
1336 struct xlnk_dmabuf_reg *cp = NULL;
1337 int cacheable = 1;
1338 enum dma_data_direction dmadir;
1339 xlnk_intptr_type page_id;
1340 unsigned int page_offset;
1341 struct scatterlist sg;
1342 unsigned long attrs = 0;
1343
1344 status = copy_from_user(&args,
1345 (void __user *)arg_addr,
1346 sizeof(union xlnk_args));
1347
1348 if (status) {
1349 pr_err("Error in copy_from_user. status = %d\n", status);
1350 return status;
1351 }
1352
1353 if (!(args.memop.flags & XLNK_FLAG_MEM_ACQUIRE) &&
1354 !(args.memop.flags & XLNK_FLAG_MEM_RELEASE)) {
1355 pr_err("memop lacks acquire or release flag\n");
1356 return -EINVAL;
1357 }
1358
1359 if (args.memop.flags & XLNK_FLAG_MEM_ACQUIRE &&
1360 args.memop.flags & XLNK_FLAG_MEM_RELEASE) {
1361 pr_err("memop has both acquire and release defined\n");
1362 return -EINVAL;
1363 }
1364
1365 spin_lock(&xlnk_buf_lock);
1366 buf_id = xlnk_buf_find_by_user_addr(args.memop.virt_addr,
1367 current->pid);
1368 if (buf_id > 0) {
1369 cacheable = xlnk_bufcacheable[buf_id];
1370 p_addr = xlnk_phyaddr[buf_id] +
1371 (args.memop.virt_addr - xlnk_userbuf[buf_id]);
1372 } else {
1373 struct xlnk_dmabuf_reg *dp;
1374
1375 list_for_each_entry(dp, &xlnk_dmabuf_list, list) {
1376 if (dp->user_vaddr == args.memop.virt_addr) {
1377 cp = dp;
1378 break;
1379 }
1380 }
1381 }
1382 spin_unlock(&xlnk_buf_lock);
1383
1384 if (buf_id <= 0 && !cp) {
1385 pr_err("Error, buffer not found\n");
1386 return -EINVAL;
1387 }
1388
1389 dmadir = (enum dma_data_direction)args.memop.dir;
1390
1391 if (args.memop.flags & XLNK_FLAG_COHERENT || !cacheable)
1392 attrs |= DMA_ATTR_SKIP_CPU_SYNC;
1393
1394 if (buf_id > 0) {
1395 page_id = p_addr >> PAGE_SHIFT;
1396 page_offset = p_addr - (page_id << PAGE_SHIFT);
1397 sg_init_table(&sg, 1);
1398 sg_set_page(&sg,
1399 pfn_to_page(page_id),
1400 args.memop.size,
1401 page_offset);
1402 sg_dma_len(&sg) = args.memop.size;
1403 }
1404
1405 if (args.memop.flags & XLNK_FLAG_MEM_ACQUIRE) {
1406 if (buf_id > 0) {
1407 status = get_dma_ops(xlnk_dev)->map_sg(xlnk_dev,
1408 &sg,
1409 1,
1410 dmadir,
1411 attrs);
1412 if (!status) {
1413 pr_err("Failed to map address\n");
1414 return -EINVAL;
1415 }
1416 args.memop.phys_addr = (xlnk_intptr_type)
1417 sg_dma_address(&sg);
1418 args.memop.token = (xlnk_intptr_type)
1419 sg_dma_address(&sg);
1420 status = copy_to_user((void __user *)arg_addr,
1421 &args,
1422 sizeof(union xlnk_args));
1423 if (status)
1424 pr_err("Error in copy_to_user. status = %d\n",
1425 status);
1426 } else {
1427 if (cp->dbuf_sg_table->nents != 1) {
1428 pr_err("Non-SG-DMA datamovers require physically contiguous DMABUFs. DMABUF is not physically contiguous\n");
1429 return -EINVAL;
1430 }
1431 args.memop.phys_addr = (xlnk_intptr_type)
1432 sg_dma_address(cp->dbuf_sg_table->sgl);
1433 args.memop.token = 0;
1434 status = copy_to_user((void __user *)arg_addr,
1435 &args,
1436 sizeof(union xlnk_args));
1437 if (status)
1438 pr_err("Error in copy_to_user. status = %d\n",
1439 status);
1440 }
1441 } else {
1442 if (buf_id > 0) {
1443 sg_dma_address(&sg) = (dma_addr_t)args.memop.token;
1444 get_dma_ops(xlnk_dev)->unmap_sg(xlnk_dev,
1445 &sg,
1446 1,
1447 dmadir,
1448 attrs);
1449 }
1450 }
1451
1452 return status;
1453}
1454
1455
1456static long xlnk_ioctl(struct file *filp,
1457 unsigned int code,
1458 unsigned long args)
1459{
1460 if (_IOC_TYPE(code) != XLNK_IOC_MAGIC)
1461 return -ENOTTY;
1462 if (_IOC_NR(code) > XLNK_IOC_MAXNR)
1463 return -ENOTTY;
1464
1465
1466 switch (code) {
1467 case XLNK_IOCALLOCBUF:
1468 return xlnk_allocbuf_ioctl(filp, code, args);
1469 case XLNK_IOCFREEBUF:
1470 return xlnk_freebuf_ioctl(filp, code, args);
1471 case XLNK_IOCADDDMABUF:
1472 return xlnk_adddmabuf_ioctl(filp, code, args);
1473 case XLNK_IOCCLEARDMABUF:
1474 return xlnk_cleardmabuf_ioctl(filp, code, args);
1475 case XLNK_IOCDMAREQUEST:
1476 return xlnk_dmarequest_ioctl(filp, code, args);
1477 case XLNK_IOCDMASUBMIT:
1478 return xlnk_dmasubmit_ioctl(filp, code, args);
1479 case XLNK_IOCDMAWAIT:
1480 return xlnk_dmawait_ioctl(filp, code, args);
1481 case XLNK_IOCDMARELEASE:
1482 return xlnk_dmarelease_ioctl(filp, code, args);
1483 case XLNK_IOCDEVREGISTER:
1484 return xlnk_devregister_ioctl(filp, code, args);
1485 case XLNK_IOCDMAREGISTER:
1486 return xlnk_dmaregister_ioctl(filp, code, args);
1487 case XLNK_IOCDEVUNREGISTER:
1488 return xlnk_devunregister_ioctl(filp, code, args);
1489 case XLNK_IOCCACHECTRL:
1490 return xlnk_cachecontrol_ioctl(filp, code, args);
1491 case XLNK_IOCIRQREGISTER:
1492 return xlnk_irq_register_ioctl(filp, code, args);
1493 case XLNK_IOCIRQUNREGISTER:
1494 return xlnk_irq_unregister_ioctl(filp, code, args);
1495 case XLNK_IOCIRQWAIT:
1496 return xlnk_irq_wait_ioctl(filp, code, args);
1497 case XLNK_IOCSHUTDOWN:
1498 return xlnk_shutdown(args);
1499 case XLNK_IOCRECRES:
1500 return xlnk_recover_resource(args);
1501 case XLNK_IOCMEMOP:
1502 return xlnk_memop_ioctl(filp, args);
1503 default:
1504 return -EINVAL;
1505 }
1506}
1507
1508static const struct vm_operations_struct xlnk_vm_ops = {
1509 .open = xlnk_vma_open,
1510 .close = xlnk_vma_close,
1511};
1512
1513
1514static int xlnk_mmap(struct file *filp, struct vm_area_struct *vma)
1515{
1516 int bufid;
1517 int status;
1518
1519 bufid = vma->vm_pgoff >> (16 - PAGE_SHIFT);
1520
1521 if (bufid == 0) {
1522 unsigned long paddr = virt_to_phys(xlnk_dev_buf);
1523
1524 status = remap_pfn_range(vma,
1525 vma->vm_start,
1526 paddr >> PAGE_SHIFT,
1527 vma->vm_end - vma->vm_start,
1528 vma->vm_page_prot);
1529 } else {
1530 if (xlnk_bufcacheable[bufid] == 0)
1531 vma->vm_page_prot =
1532 pgprot_noncached(vma->vm_page_prot);
1533 status = remap_pfn_range(vma, vma->vm_start,
1534 xlnk_phyaddr[bufid]
1535 >> PAGE_SHIFT,
1536 vma->vm_end - vma->vm_start,
1537 vma->vm_page_prot);
1538 xlnk_userbuf[bufid] = vma->vm_start;
1539 xlnk_buf_process[bufid] = current->pid;
1540 }
1541 if (status) {
1542 pr_err("%s failed with code %d\n", __func__, status);
1543 return status;
1544 }
1545
1546 xlnk_vma_open(vma);
1547 vma->vm_ops = &xlnk_vm_ops;
1548 vma->vm_private_data = xlnk_bufpool[bufid];
1549
1550 return 0;
1551}
1552
1553static void xlnk_vma_open(struct vm_area_struct *vma)
1554{
1555 xlnk_dev_vmas++;
1556}
1557
1558static void xlnk_vma_close(struct vm_area_struct *vma)
1559{
1560 xlnk_dev_vmas--;
1561}
1562
1563static int xlnk_shutdown(unsigned long buf)
1564{
1565 return 0;
1566}
1567
1568static int xlnk_recover_resource(unsigned long buf)
1569{
1570 xlnk_free_all_buf();
1571#ifdef CONFIG_XILINX_DMA_APF
1572 xdma_release_all_channels();
1573#endif
1574 return 0;
1575}
1576
1577module_platform_driver(xlnk_driver);
1578
1579MODULE_DESCRIPTION("Xilinx APF driver");
1580MODULE_LICENSE("GPL");
1581