1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/dma/xilinx_dma.h>
21#include <linux/delay.h>
22#include <linux/init.h>
23#include <linux/kthread.h>
24#include <linux/module.h>
25#include <linux/of_dma.h>
26#include <linux/platform_device.h>
27#include <linux/random.h>
28#include <linux/slab.h>
29#include <linux/wait.h>
30
31static unsigned int test_buf_size = 64;
32module_param(test_buf_size, uint, S_IRUGO);
33MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
34
35static unsigned int iterations = 1;
36module_param(iterations, uint, S_IRUGO);
37MODULE_PARM_DESC(iterations,
38 "Iterations before stopping test (default: infinite)");
39
40
41
42
43
44
45
46
47
48
49
50
51#define PATTERN_SRC 0x80
52#define PATTERN_DST 0x00
53#define PATTERN_COPY 0x40
54#define PATTERN_OVERWRITE 0x20
55#define PATTERN_COUNT_MASK 0x1f
56
57
58#define MAX_NUM_FRAMES 32
59
60
61
62
63
64
65
66
67
68
69
70struct xilinx_vdmatest_slave_thread {
71 struct list_head node;
72 struct task_struct *task;
73 struct dma_chan *tx_chan;
74 struct dma_chan *rx_chan;
75 u8 **srcs;
76 u8 **dsts;
77 enum dma_transaction_type type;
78 bool done;
79};
80
81
82
83
84
85
86
87struct xilinx_vdmatest_chan {
88 struct list_head node;
89 struct dma_chan *chan;
90 struct list_head threads;
91};
92
93
94static DECLARE_WAIT_QUEUE_HEAD(thread_wait);
95static LIST_HEAD(xilinx_vdmatest_channels);
96static unsigned int nr_channels;
97static unsigned int frm_cnt;
98static dma_addr_t dma_srcs[MAX_NUM_FRAMES];
99static dma_addr_t dma_dsts[MAX_NUM_FRAMES];
100static struct dma_interleaved_template xt;
101
102static bool is_threaded_test_run(struct xilinx_vdmatest_chan *tx_dtc,
103 struct xilinx_vdmatest_chan *rx_dtc)
104{
105 struct xilinx_vdmatest_slave_thread *thread;
106 int ret = false;
107
108 list_for_each_entry(thread, &tx_dtc->threads, node) {
109 if (!thread->done)
110 ret = true;
111 }
112
113 list_for_each_entry(thread, &rx_dtc->threads, node) {
114 if (!thread->done)
115 ret = true;
116 }
117 return ret;
118}
119
120static void xilinx_vdmatest_init_srcs(u8 **bufs, unsigned int start,
121 unsigned int len)
122{
123 unsigned int i;
124 u8 *buf;
125
126 for (; (buf = *bufs); bufs++) {
127 for (i = 0; i < start; i++)
128 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
129 for (; i < start + len; i++)
130 buf[i] = PATTERN_SRC | PATTERN_COPY
131 | (~i & PATTERN_COUNT_MASK);
132 for (; i < test_buf_size; i++)
133 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
134 buf++;
135 }
136}
137
138static void xilinx_vdmatest_init_dsts(u8 **bufs, unsigned int start,
139 unsigned int len)
140{
141 unsigned int i;
142 u8 *buf;
143
144 for (; (buf = *bufs); bufs++) {
145 for (i = 0; i < start; i++)
146 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
147 for (; i < start + len; i++)
148 buf[i] = PATTERN_DST | PATTERN_OVERWRITE
149 | (~i & PATTERN_COUNT_MASK);
150 for (; i < test_buf_size; i++)
151 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
152 }
153}
154
155static void xilinx_vdmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
156 unsigned int counter, bool is_srcbuf)
157{
158 u8 diff = actual ^ pattern;
159 u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
160 const char *thread_name = current->comm;
161
162 if (is_srcbuf)
163 pr_warn(
164 "%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n",
165 thread_name, index, expected, actual);
166 else if ((pattern & PATTERN_COPY)
167 && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
168 pr_warn(
169 "%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n",
170 thread_name, index, expected, actual);
171 else if (diff & PATTERN_SRC)
172 pr_warn(
173 "%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n",
174 thread_name, index, expected, actual);
175 else
176 pr_warn(
177 "%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n",
178 thread_name, index, expected, actual);
179}
180
181static unsigned int xilinx_vdmatest_verify(u8 **bufs, unsigned int start,
182 unsigned int end, unsigned int counter, u8 pattern,
183 bool is_srcbuf)
184{
185 unsigned int i, error_count = 0;
186 u8 actual, expected, *buf;
187 unsigned int counter_orig = counter;
188
189 for (; (buf = *bufs); bufs++) {
190 counter = counter_orig;
191 for (i = start; i < end; i++) {
192 actual = buf[i];
193 expected = pattern | (~counter & PATTERN_COUNT_MASK);
194 if (actual != expected) {
195 if (error_count < 32)
196 xilinx_vdmatest_mismatch(actual,
197 pattern, i,
198 counter, is_srcbuf);
199 error_count++;
200 }
201 counter++;
202 }
203 }
204
205 if (error_count > 32)
206 pr_warn("%s: %u errors suppressed\n",
207 current->comm, error_count - 32);
208
209 return error_count;
210}
211
212static void xilinx_vdmatest_slave_tx_callback(void *completion)
213{
214 pr_debug("Got tx callback\n");
215 complete(completion);
216}
217
218static void xilinx_vdmatest_slave_rx_callback(void *completion)
219{
220 pr_debug("Got rx callback\n");
221 complete(completion);
222}
223
224
225
226
227
228static int xilinx_vdmatest_slave_func(void *data)
229{
230 struct xilinx_vdmatest_slave_thread *thread = data;
231 struct dma_chan *tx_chan, *rx_chan;
232 const char *thread_name;
233 unsigned int len, error_count;
234 unsigned int failed_tests = 0, total_tests = 0;
235 dma_cookie_t tx_cookie = 0, rx_cookie = 0;
236 enum dma_status status;
237 enum dma_ctrl_flags flags;
238 int ret = -ENOMEM, i;
239 int hsize = 64, vsize = 32;
240 struct xilinx_vdma_config config;
241
242 thread_name = current->comm;
243
244
245 test_buf_size = hsize * vsize;
246
247
248
249
250 smp_rmb();
251 tx_chan = thread->tx_chan;
252 rx_chan = thread->rx_chan;
253
254 thread->srcs = kcalloc(frm_cnt+1, sizeof(u8 *), GFP_KERNEL);
255 if (!thread->srcs)
256 goto err_srcs;
257 for (i = 0; i < frm_cnt; i++) {
258 thread->srcs[i] = kmalloc(test_buf_size, GFP_KERNEL);
259 if (!thread->srcs[i])
260 goto err_srcbuf;
261 }
262
263 thread->dsts = kcalloc(frm_cnt+1, sizeof(u8 *), GFP_KERNEL);
264 if (!thread->dsts)
265 goto err_dsts;
266 for (i = 0; i < frm_cnt; i++) {
267 thread->dsts[i] = kmalloc(test_buf_size, GFP_KERNEL);
268 if (!thread->dsts[i])
269 goto err_dstbuf;
270 }
271
272 set_user_nice(current, 10);
273
274 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
275
276 while (!kthread_should_stop()
277 && !(iterations && total_tests >= iterations)) {
278 struct dma_device *tx_dev = tx_chan->device;
279 struct dma_device *rx_dev = rx_chan->device;
280 struct dma_async_tx_descriptor *txd = NULL;
281 struct dma_async_tx_descriptor *rxd = NULL;
282 struct completion rx_cmp, tx_cmp;
283 unsigned long rx_tmo =
284 msecs_to_jiffies(30000);
285 unsigned long tx_tmo = msecs_to_jiffies(30000);
286 u8 align = 0;
287
288 total_tests++;
289
290
291 align = tx_dev->copy_align;
292 if (rx_dev->copy_align > align)
293 align = rx_dev->copy_align;
294
295 if (1 << align > test_buf_size) {
296 pr_err("%u-byte buffer too small for %d-byte alignment\n",
297 test_buf_size, 1 << align);
298 break;
299 }
300
301 len = test_buf_size;
302 xilinx_vdmatest_init_srcs(thread->srcs, 0, len);
303 xilinx_vdmatest_init_dsts(thread->dsts, 0, len);
304
305
306 memset(&config, 0, sizeof(struct xilinx_vdma_config));
307
308
309 config.frm_cnt_en = 1;
310 config.coalesc = frm_cnt * 10;
311 config.park = 1;
312 xilinx_vdma_channel_set_config(tx_chan, &config);
313
314 config.park = 0;
315 xilinx_vdma_channel_set_config(rx_chan, &config);
316
317 for (i = 0; i < frm_cnt; i++) {
318 dma_dsts[i] = dma_map_single(rx_dev->dev,
319 thread->dsts[i],
320 test_buf_size,
321 DMA_DEV_TO_MEM);
322
323 if (dma_mapping_error(rx_dev->dev, dma_dsts[i])) {
324 failed_tests++;
325 continue;
326 }
327 xt.dst_start = dma_dsts[i];
328 xt.dir = DMA_DEV_TO_MEM;
329 xt.numf = vsize;
330 xt.sgl[0].size = hsize;
331 xt.sgl[0].icg = 0;
332 xt.frame_size = 1;
333 rxd = rx_dev->device_prep_interleaved_dma(rx_chan,
334 &xt, flags);
335 rx_cookie = rxd->tx_submit(rxd);
336 }
337
338 for (i = 0; i < frm_cnt; i++) {
339 u8 *buf = thread->srcs[i];
340
341 dma_srcs[i] = dma_map_single(tx_dev->dev, buf, len,
342 DMA_MEM_TO_DEV);
343
344 if (dma_mapping_error(tx_dev->dev, dma_srcs[i])) {
345 failed_tests++;
346 continue;
347 }
348 xt.src_start = dma_srcs[i];
349 xt.dir = DMA_MEM_TO_DEV;
350 xt.numf = vsize;
351 xt.sgl[0].size = hsize;
352 xt.sgl[0].icg = 0;
353 xt.frame_size = 1;
354 txd = tx_dev->device_prep_interleaved_dma(tx_chan,
355 &xt, flags);
356 tx_cookie = txd->tx_submit(txd);
357 }
358
359 if (!rxd || !txd) {
360 for (i = 0; i < frm_cnt; i++)
361 dma_unmap_single(tx_dev->dev, dma_srcs[i], len,
362 DMA_MEM_TO_DEV);
363 for (i = 0; i < frm_cnt; i++)
364 dma_unmap_single(rx_dev->dev, dma_dsts[i],
365 test_buf_size,
366 DMA_DEV_TO_MEM);
367 pr_warn("%s: #%u: prep error with len=0x%x ",
368 thread_name, total_tests - 1, len);
369 msleep(100);
370 failed_tests++;
371 continue;
372 }
373
374 init_completion(&rx_cmp);
375 rxd->callback = xilinx_vdmatest_slave_rx_callback;
376 rxd->callback_param = &rx_cmp;
377
378 init_completion(&tx_cmp);
379 txd->callback = xilinx_vdmatest_slave_tx_callback;
380 txd->callback_param = &tx_cmp;
381
382 if (dma_submit_error(rx_cookie) ||
383 dma_submit_error(tx_cookie)) {
384 pr_warn("%s: #%u: submit error %d/%d with len=0x%x ",
385 thread_name, total_tests - 1,
386 rx_cookie, tx_cookie, len);
387 msleep(100);
388 failed_tests++;
389 continue;
390 }
391 dma_async_issue_pending(tx_chan);
392 dma_async_issue_pending(rx_chan);
393
394 tx_tmo = wait_for_completion_timeout(&tx_cmp, tx_tmo);
395
396 status = dma_async_is_tx_complete(tx_chan, tx_cookie,
397 NULL, NULL);
398
399 if (tx_tmo == 0) {
400 pr_warn("%s: #%u: tx test timed out\n",
401 thread_name, total_tests - 1);
402 failed_tests++;
403 continue;
404 } else if (status != DMA_COMPLETE) {
405 pr_warn(
406 "%s: #%u: tx got completion callback, ",
407 thread_name, total_tests - 1);
408 pr_warn("but status is \'%s\'\n",
409 status == DMA_ERROR ? "error" :
410 "in progress");
411 failed_tests++;
412 continue;
413 }
414
415 rx_tmo = wait_for_completion_timeout(&rx_cmp, rx_tmo);
416 status = dma_async_is_tx_complete(rx_chan, rx_cookie,
417 NULL, NULL);
418
419 if (rx_tmo == 0) {
420 pr_warn("%s: #%u: rx test timed out\n",
421 thread_name, total_tests - 1);
422 failed_tests++;
423 continue;
424 } else if (status != DMA_COMPLETE) {
425 pr_warn(
426 "%s: #%u: rx got completion callback, ",
427 thread_name, total_tests - 1);
428 pr_warn("but status is \'%s\'\n",
429 status == DMA_ERROR ? "error" :
430 "in progress");
431 failed_tests++;
432 continue;
433 }
434
435
436 for (i = 0; i < frm_cnt; i++)
437 dma_unmap_single(rx_dev->dev, dma_dsts[i],
438 test_buf_size, DMA_DEV_TO_MEM);
439
440 error_count = 0;
441
442 pr_debug("%s: verifying source buffer...\n", thread_name);
443 error_count += xilinx_vdmatest_verify(thread->srcs, 0, 0,
444 0, PATTERN_SRC, true);
445 error_count += xilinx_vdmatest_verify(thread->srcs, 0,
446 len, 0, PATTERN_SRC | PATTERN_COPY, true);
447 error_count += xilinx_vdmatest_verify(thread->srcs, len,
448 test_buf_size, len, PATTERN_SRC, true);
449
450 pr_debug("%s: verifying dest buffer...\n",
451 thread->task->comm);
452 error_count += xilinx_vdmatest_verify(thread->dsts, 0, 0,
453 0, PATTERN_DST, false);
454 error_count += xilinx_vdmatest_verify(thread->dsts, 0,
455 len, 0, PATTERN_SRC | PATTERN_COPY, false);
456 error_count += xilinx_vdmatest_verify(thread->dsts, len,
457 test_buf_size, len, PATTERN_DST, false);
458
459 if (error_count) {
460 pr_warn("%s: #%u: %u errors with len=0x%x\n",
461 thread_name, total_tests - 1, error_count, len);
462 failed_tests++;
463 } else {
464 pr_debug("%s: #%u: No errors with len=0x%x\n",
465 thread_name, total_tests - 1, len);
466 }
467 }
468
469 ret = 0;
470 for (i = 0; thread->dsts[i]; i++)
471 kfree(thread->dsts[i]);
472err_dstbuf:
473 kfree(thread->dsts);
474err_dsts:
475 for (i = 0; thread->srcs[i]; i++)
476 kfree(thread->srcs[i]);
477err_srcbuf:
478 kfree(thread->srcs);
479err_srcs:
480 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
481 thread_name, total_tests, failed_tests, ret);
482
483 thread->done = true;
484 wake_up(&thread_wait);
485
486 return ret;
487}
488
489static void xilinx_vdmatest_cleanup_channel(struct xilinx_vdmatest_chan *dtc)
490{
491 struct xilinx_vdmatest_slave_thread *thread, *_thread;
492 int ret;
493
494 list_for_each_entry_safe(thread, _thread,
495 &dtc->threads, node) {
496 ret = kthread_stop(thread->task);
497 pr_info("xilinx_vdmatest: thread %s exited with status %d\n",
498 thread->task->comm, ret);
499 list_del(&thread->node);
500 put_task_struct(thread->task);
501 kfree(thread);
502 }
503 kfree(dtc);
504}
505
506static int
507xilinx_vdmatest_add_slave_threads(struct xilinx_vdmatest_chan *tx_dtc,
508 struct xilinx_vdmatest_chan *rx_dtc)
509{
510 struct xilinx_vdmatest_slave_thread *thread;
511 struct dma_chan *tx_chan = tx_dtc->chan;
512 struct dma_chan *rx_chan = rx_dtc->chan;
513
514 thread = kzalloc(sizeof(struct xilinx_vdmatest_slave_thread),
515 GFP_KERNEL);
516 if (!thread)
517 pr_warn("xilinx_vdmatest: No memory for slave thread %s-%s\n",
518 dma_chan_name(tx_chan), dma_chan_name(rx_chan));
519
520 thread->tx_chan = tx_chan;
521 thread->rx_chan = rx_chan;
522 thread->type = (enum dma_transaction_type)DMA_SLAVE;
523
524
525
526
527 smp_wmb();
528 thread->task = kthread_run(xilinx_vdmatest_slave_func, thread, "%s-%s",
529 dma_chan_name(tx_chan), dma_chan_name(rx_chan));
530 if (IS_ERR(thread->task)) {
531 pr_warn("xilinx_vdmatest: Failed to run thread %s-%s\n",
532 dma_chan_name(tx_chan), dma_chan_name(rx_chan));
533 kfree(thread);
534 return PTR_ERR(thread->task);
535 }
536
537 get_task_struct(thread->task);
538 list_add_tail(&thread->node, &tx_dtc->threads);
539
540
541 return 1;
542}
543
544static int xilinx_vdmatest_add_slave_channels(struct dma_chan *tx_chan,
545 struct dma_chan *rx_chan)
546{
547 struct xilinx_vdmatest_chan *tx_dtc, *rx_dtc;
548 unsigned int thread_count = 0;
549
550 tx_dtc = kmalloc(sizeof(struct xilinx_vdmatest_chan), GFP_KERNEL);
551 if (!tx_dtc)
552 return -ENOMEM;
553
554 rx_dtc = kmalloc(sizeof(struct xilinx_vdmatest_chan), GFP_KERNEL);
555 if (!rx_dtc)
556 return -ENOMEM;
557
558 tx_dtc->chan = tx_chan;
559 rx_dtc->chan = rx_chan;
560 INIT_LIST_HEAD(&tx_dtc->threads);
561 INIT_LIST_HEAD(&rx_dtc->threads);
562
563 xilinx_vdmatest_add_slave_threads(tx_dtc, rx_dtc);
564 thread_count += 1;
565
566 pr_info("xilinx_vdmatest: Started %u threads using %s %s\n",
567 thread_count, dma_chan_name(tx_chan), dma_chan_name(rx_chan));
568
569 list_add_tail(&tx_dtc->node, &xilinx_vdmatest_channels);
570 list_add_tail(&rx_dtc->node, &xilinx_vdmatest_channels);
571 nr_channels += 2;
572
573 if (iterations)
574 wait_event(thread_wait, !is_threaded_test_run(tx_dtc, rx_dtc));
575
576 return 0;
577}
578
579static int xilinx_vdmatest_probe(struct platform_device *pdev)
580{
581 struct dma_chan *chan, *rx_chan;
582 int err;
583
584 err = of_property_read_u32(pdev->dev.of_node,
585 "xlnx,num-fstores", &frm_cnt);
586 if (err < 0) {
587 pr_err("xilinx_vdmatest: missing xlnx,num-fstores property\n");
588 return err;
589 }
590
591 chan = dma_request_slave_channel(&pdev->dev, "vdma0");
592 if (IS_ERR(chan)) {
593 pr_err("xilinx_vdmatest: No Tx channel\n");
594 return PTR_ERR(chan);
595 }
596
597 rx_chan = dma_request_slave_channel(&pdev->dev, "vdma1");
598 if (IS_ERR(rx_chan)) {
599 err = PTR_ERR(rx_chan);
600 pr_err("xilinx_vdmatest: No Rx channel\n");
601 goto free_tx;
602 }
603
604 err = xilinx_vdmatest_add_slave_channels(chan, rx_chan);
605 if (err) {
606 pr_err("xilinx_vdmatest: Unable to add channels\n");
607 goto free_rx;
608 }
609 return 0;
610
611free_rx:
612 dma_release_channel(rx_chan);
613free_tx:
614 dma_release_channel(chan);
615
616 return err;
617}
618
619static int xilinx_vdmatest_remove(struct platform_device *pdev)
620{
621 struct xilinx_vdmatest_chan *dtc, *_dtc;
622 struct dma_chan *chan;
623
624 list_for_each_entry_safe(dtc, _dtc, &xilinx_vdmatest_channels, node) {
625 list_del(&dtc->node);
626 chan = dtc->chan;
627 xilinx_vdmatest_cleanup_channel(dtc);
628 pr_info("xilinx_vdmatest: dropped channel %s\n",
629 dma_chan_name(chan));
630 dma_release_channel(chan);
631 }
632 return 0;
633}
634
635static const struct of_device_id xilinx_vdmatest_of_ids[] = {
636 { .compatible = "xlnx,axi-vdma-test-1.00.a",},
637 {}
638};
639
640static struct platform_driver xilinx_vdmatest_driver = {
641 .driver = {
642 .name = "xilinx_vdmatest",
643 .owner = THIS_MODULE,
644 .of_match_table = xilinx_vdmatest_of_ids,
645 },
646 .probe = xilinx_vdmatest_probe,
647 .remove = xilinx_vdmatest_remove,
648};
649
650module_platform_driver(xilinx_vdmatest_driver);
651
652MODULE_AUTHOR("Xilinx, Inc.");
653MODULE_DESCRIPTION("Xilinx AXI VDMA Test Client");
654MODULE_LICENSE("GPL v2");
655