1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/amba/xilinx_dma.h>
15#include <linux/cdev.h>
16#include <linux/dmaengine.h>
17#include <linux/dma-mapping.h>
18#include <linux/fcntl.h>
19#include <linux/fs.h>
20#include <linux/io.h>
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/of_address.h>
24#include <linux/of_device.h>
25#include <linux/of_platform.h>
26#include <linux/slab.h>
27#include <linux/sysctl.h>
28#include <linux/uaccess.h>
29#include "xvdma.h"
30
31
32static struct xvdma_dev *xvdma_dev_info[MAX_DEVICES + 1];
33static u64 dma_mask = 0xFFFFFFFFUL;
34static struct chan_buf chan_buf[MAX_FRAMES];
35static u32 num_devices;
36static struct completion cmp;
37
38static void xvdma_get_dev_info(u32 device_id, struct xvdma_dev *dev)
39{
40 int i;
41
42 for (i = 0; i < MAX_DEVICES; i++) {
43 if (xvdma_dev_info[i]->device_id == device_id)
44 break;
45 }
46 memcpy(dev, xvdma_dev_info[i], sizeof(struct xvdma_dev));
47}
48
49
50
51
52
53static int xvdma_open(struct inode *ip, struct file *filp)
54{
55 return 0;
56}
57
58static int xvdma_release(struct inode *ip, struct file *filp)
59{
60 return 0;
61}
62
63
64static long xvdma_ioctl(struct file *file,
65 unsigned int cmd, unsigned long arg)
66{
67 struct xvdma_dev xvdma_dev;
68 struct xvdma_chan_cfg chan_cfg;
69 struct xvdma_buf_info buf_info;
70 struct xvdma_transfer tx_info;
71 u32 devices, chan;
72
73 switch (cmd) {
74 case XVDMA_GET_NUM_DEVICES:
75 {
76 if (copy_from_user((void *)&devices,
77 (const void __user *)arg,
78 sizeof(u32)))
79 return -EFAULT;
80
81 devices = num_devices;
82 if (copy_to_user((u32 __user *)arg,
83 &devices, sizeof(u32)))
84 return -EFAULT;
85 break;
86 }
87 case XVDMA_GET_DEV_INFO:
88 {
89 if (copy_from_user((void *)&xvdma_dev,
90 (const void __user *)arg,
91 sizeof(struct xvdma_dev)))
92 return -EFAULT;
93
94 xvdma_get_dev_info(xvdma_dev.device_id, &xvdma_dev);
95
96 if (copy_to_user((struct xvdma_dev __user *)arg,
97 &xvdma_dev, sizeof(struct xvdma_dev)))
98 return -EFAULT;
99 break;
100 }
101 case XVDMA_DEVICE_CONTROL:
102 {
103 if (copy_from_user((void *)&chan_cfg,
104 (const void __user *)arg,
105 sizeof(struct xvdma_chan_cfg)))
106 return -EFAULT;
107
108 xvdma_device_control(&chan_cfg);
109 break;
110 }
111 case XVDMA_PREP_BUF:
112 {
113 if (copy_from_user((void *)&buf_info,
114 (const void __user *)arg,
115 sizeof(struct xvdma_buf_info)))
116 return -EFAULT;
117 xvdma_prep_slave_sg(&buf_info);
118 break;
119 }
120 case XVDMA_START_TRANSFER:
121 {
122 if (copy_from_user((void *)&tx_info,
123 (const void __user *)arg,
124 sizeof(struct xvdma_transfer)))
125 return -EFAULT;
126
127 xvdma_start_transfer(&tx_info);
128 break;
129 }
130 case XVDMA_STOP_TRANSFER:
131 {
132 if (copy_from_user((void *)&chan,
133 (const void __user *)arg,
134 sizeof(u32)))
135 return -EFAULT;
136
137 xvdma_stop_transfer((struct dma_chan *)chan);
138 break;
139 }
140 default:
141 break;
142 }
143 return 0;
144}
145
146static bool xvdma_filter(struct dma_chan *chan, void *param)
147{
148 if (*((int *)chan->private) == *(int *)param)
149 return true;
150
151
152 return false;
153}
154
155static void vdma_sync_callback(void *completion)
156{
157 complete(completion);
158}
159
160void xvdma_stop_transfer(struct dma_chan *chan)
161{
162 struct dma_device *chan_dev;
163
164 if (chan) {
165 chan_dev = chan->device;
166 chan_dev->device_control(chan, DMA_TERMINATE_ALL,
167 (unsigned long)NULL);
168 }
169}
170
171void xvdma_start_transfer(struct xvdma_transfer *tx_info)
172{
173 unsigned long tmo = msecs_to_jiffies(3000);
174
175 init_completion(&cmp);
176 if (tx_info->chan)
177 dma_async_issue_pending((struct dma_chan *)tx_info->chan);
178
179 if (tx_info->wait) {
180 tmo = wait_for_completion_timeout(&cmp, tmo);
181 if (0 == tmo)
182 pr_err("Timeout has occured...\n");
183 }
184}
185
186void xvdma_prep_slave_sg(struct xvdma_buf_info *buf_info)
187{
188 struct dma_chan *chan;
189 struct dma_device *chan_dev;
190 struct dma_async_tx_descriptor *chan_desc;
191 struct scatterlist chansg[MAX_FRAMES];
192 dma_addr_t dma_srcs[MAX_FRAMES];
193 u8 **buf = NULL;
194 int buf_size;
195 u32 flags = 0;
196 int i;
197 u32 device_id;
198 u32 frm_cnt = buf_info->frm_cnt;
199
200 buf_size = buf_info->buf_size;
201 chan = (struct dma_chan *) buf_info->chan;
202 device_id = buf_info->device_id;
203
204 if (chan) {
205 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
206
207 if (buf_info->fixed_buffer) {
208 chan_dev = chan->device;
209 sg_init_table(chansg, frm_cnt);
210 for (i = 0; i < frm_cnt; i++) {
211 if (!buf_info->shared_buffer) {
212 dma_srcs[i] =
213 buf_info->addr_base + i * buf_size;
214 chan_buf[device_id].dma_addr[i] =
215 dma_srcs[i];
216 }
217 sg_dma_address(&chansg[i]) =
218 chan_buf[device_id].dma_addr[i];
219 sg_dma_len(&chansg[i]) = buf_size;
220 }
221 } else {
222 if (!buf_info->shared_buffer) {
223 buf = kcalloc(frm_cnt + 1, sizeof(u8 *),
224 GFP_KERNEL);
225 if (!buf)
226 pr_err("Buf failed\n");
227
228 for (i = 0; i < frm_cnt; i++) {
229 buf[i] = kmalloc(buf_size, GFP_KERNEL);
230 if (!buf[i])
231 pr_err("Buf[%d] failed\n", i);
232 }
233 buf[i] = NULL;
234 }
235
236 chan_dev = chan->device;
237 sg_init_table(chansg, frm_cnt);
238 for (i = 0; i < frm_cnt; i++) {
239 if (!buf_info->shared_buffer) {
240 dma_srcs[i] = dma_map_single(
241 chan_dev->dev, buf[i], buf_size,
242 buf_info->mem_type);
243 chan_buf[device_id].dma_addr[i] =
244 dma_srcs[i];
245 }
246 sg_dma_address(&chansg[i]) =
247 chan_buf[device_id].dma_addr[i];
248 sg_dma_len(&chansg[i]) = buf_size;
249 }
250 }
251 chan_desc = chan_dev->device_prep_slave_sg(chan, chansg,
252 frm_cnt, buf_info->direction, flags, NULL);
253 if (buf_info->callback) {
254 chan_desc->callback = vdma_sync_callback;
255 chan_desc->callback_param = &cmp;
256 }
257 chan_desc->tx_submit(chan_desc);
258 }
259}
260
261void xvdma_device_control(struct xvdma_chan_cfg *chan_cfg)
262{
263 struct dma_chan *chan;
264 struct dma_device *chan_dev;
265
266 chan = (struct dma_chan *) chan_cfg->chan;
267
268 if (chan) {
269 chan_dev = chan->device;
270 chan_dev->device_control(chan, DMA_SLAVE_CONFIG,
271 (unsigned long)&chan_cfg->config);
272 }
273}
274
275static void xvdma_add_dev_info(struct dma_chan *tx_chan,
276 struct dma_chan *rx_chan)
277{
278 static u32 i;
279
280 xvdma_dev_info[i] = kzalloc(sizeof(struct xvdma_dev), GFP_KERNEL);
281
282 xvdma_dev_info[i]->tx_chan = (u32) tx_chan;
283 xvdma_dev_info[i]->rx_chan = (u32) rx_chan;
284 xvdma_dev_info[i]->device_id = i;
285 num_devices++;
286 i++;
287}
288
289static void xvdma_scan_channels(void)
290{
291 dma_cap_mask_t mask;
292 u32 match_tx, match_rx;
293 struct dma_chan *tx_chan, *rx_chan;
294 u32 device_id = 0;
295
296 dma_cap_zero(mask);
297 dma_cap_set(DMA_SLAVE | DMA_PRIVATE, mask);
298
299 for (;;) {
300 match_tx = (DMA_TO_DEVICE & 0xFF) | XILINX_DMA_IP_VDMA |
301 (device_id << XVDMA_DEVICE_ID_SHIFT);
302 tx_chan = dma_request_channel(mask, xvdma_filter,
303 (void *)&match_tx);
304 match_rx = (DMA_FROM_DEVICE & 0xFF) | XILINX_DMA_IP_VDMA |
305 (device_id << XVDMA_DEVICE_ID_SHIFT);
306 rx_chan = dma_request_channel(mask, xvdma_filter,
307 (void *)&match_rx);
308
309 if (!tx_chan && !rx_chan)
310 break;
311 else
312 xvdma_add_dev_info(tx_chan, rx_chan);
313
314 device_id++;
315 }
316}
317
318static void xvdma_release_channels(void)
319{
320 int i;
321
322 for (i = 0; i < MAX_DEVICES; i++) {
323 if (xvdma_dev_info[i]->tx_chan)
324 dma_release_channel((struct dma_chan *)
325 xvdma_dev_info[i]->tx_chan);
326 if (xvdma_dev_info[i]->rx_chan)
327 dma_release_channel((struct dma_chan *)
328 xvdma_dev_info[i]->rx_chan);
329 }
330}
331
332static const struct file_operations xvdma_fops = {
333 .owner = THIS_MODULE,
334 .open = xvdma_open,
335 .unlocked_ioctl = xvdma_ioctl,
336 .release = xvdma_release,
337};
338
339static int xvdma_probe(struct platform_device *pdev)
340{
341 dev_t devt;
342 struct xvdma_drvdata *drvdata = NULL;
343 struct device *dev = &pdev->dev;
344 int retval;
345
346 devt = MKDEV(XVDMA_MAJOR, XVDMA_MINOR);
347
348 drvdata = devm_kzalloc(&pdev->dev, sizeof(struct xvdma_drvdata),
349 GFP_KERNEL);
350 if (!drvdata)
351 return -ENOMEM;
352 dev_set_drvdata(dev, (void *)drvdata);
353
354 drvdata->dev = dev;
355 drvdata->devt = devt;
356
357 cdev_init(&drvdata->cdev, &xvdma_fops);
358 drvdata->cdev.owner = THIS_MODULE;
359 retval = cdev_add(&drvdata->cdev, devt, 1);
360 if (retval) {
361 dev_err(dev, "cdev_add() failed\n");
362 return retval;
363 }
364
365 xvdma_scan_channels();
366 dev_info(dev, "Xilinx VDMA probe successful\n");
367 dev_info(dev, "Devices Scanned %d\n", num_devices);
368 return 0;
369}
370
371static int xvdma_remove(struct platform_device *op)
372{
373 struct xvdma_drvdata *drvdata;
374 struct device *dev = &op->dev;
375
376 drvdata = (struct xvdma_drvdata *)dev_get_drvdata(dev);
377 if (!drvdata)
378 return 0;
379
380 xvdma_release_channels();
381 cdev_del(&drvdata->cdev);
382 return 0;
383}
384
385static struct platform_driver xvdma_driver = {
386 .driver = {
387 .name = DRIVER_NAME,
388 },
389 .probe = xvdma_probe,
390 .remove = xvdma_remove,
391 .suspend = XVDMA_SUSPEND,
392 .resume = XVDMA_RESUME,
393};
394
395static struct platform_device xvdma_device = {
396 .name = "xvdma",
397 .id = 0,
398 .dev = {
399 .platform_data = NULL,
400 .dma_mask = &dma_mask,
401 .coherent_dma_mask = 0xFFFFFFFF,
402 },
403 .resource = NULL,
404 .num_resources = 0,
405};
406
407static int __init xvdma_init(void)
408{
409 platform_device_register(&xvdma_device);
410
411 return platform_driver_register(&xvdma_driver);
412}
413
414static void __exit xvdma_exit(void)
415{
416 platform_driver_unregister(&xvdma_driver);
417}
418
419late_initcall(xvdma_init);
420module_exit(xvdma_exit);
421
422MODULE_AUTHOR("Xilinx Inc.");
423MODULE_DESCRIPTION("Xilinx AXI VDMA client driver");
424MODULE_LICENSE("GPL v2");
425