1
2
3
4
5
6
7
8
9
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/delay.h>
14#include <linux/dma-mapping.h>
15#include <linux/dmaengine.h>
16#include <linux/freezer.h>
17#include <linux/init.h>
18#include <linux/kthread.h>
19#include <linux/sched/task.h>
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/random.h>
23#include <linux/slab.h>
24#include <linux/wait.h>
25
26static unsigned int test_buf_size = 16384;
27module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
28MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
29
30static char test_channel[20];
31module_param_string(channel, test_channel, sizeof(test_channel),
32 S_IRUGO | S_IWUSR);
33MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
34
35static char test_device[32];
36module_param_string(device, test_device, sizeof(test_device),
37 S_IRUGO | S_IWUSR);
38MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
39
40static unsigned int threads_per_chan = 1;
41module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR);
42MODULE_PARM_DESC(threads_per_chan,
43 "Number of threads to start per channel (default: 1)");
44
45static unsigned int max_channels;
46module_param(max_channels, uint, S_IRUGO | S_IWUSR);
47MODULE_PARM_DESC(max_channels,
48 "Maximum number of channels to use (default: all)");
49
50static unsigned int iterations;
51module_param(iterations, uint, S_IRUGO | S_IWUSR);
52MODULE_PARM_DESC(iterations,
53 "Iterations before stopping test (default: infinite)");
54
55static unsigned int sg_buffers = 1;
56module_param(sg_buffers, uint, S_IRUGO | S_IWUSR);
57MODULE_PARM_DESC(sg_buffers,
58 "Number of scatter gather buffers (default: 1)");
59
60static unsigned int dmatest;
61module_param(dmatest, uint, S_IRUGO | S_IWUSR);
62MODULE_PARM_DESC(dmatest,
63 "dmatest 0-memcpy 1-slave_sg 2-memset (default: 0)");
64
65static unsigned int xor_sources = 3;
66module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
67MODULE_PARM_DESC(xor_sources,
68 "Number of xor source buffers (default: 3)");
69
70static unsigned int pq_sources = 3;
71module_param(pq_sources, uint, S_IRUGO | S_IWUSR);
72MODULE_PARM_DESC(pq_sources,
73 "Number of p+q source buffers (default: 3)");
74
75static int timeout = 3000;
76module_param(timeout, uint, S_IRUGO | S_IWUSR);
77MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
78 "Pass -1 for infinite timeout");
79
80static bool noverify;
81module_param(noverify, bool, S_IRUGO | S_IWUSR);
82MODULE_PARM_DESC(noverify, "Disable data verification (default: verify)");
83
84static bool norandom;
85module_param(norandom, bool, 0644);
86MODULE_PARM_DESC(norandom, "Disable random offset setup (default: random)");
87
88static bool verbose;
89module_param(verbose, bool, S_IRUGO | S_IWUSR);
90MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)");
91
92
93
94
95
96
97
98
99
100
101
102
103
104struct dmatest_params {
105 unsigned int buf_size;
106 char channel[20];
107 char device[32];
108 unsigned int threads_per_chan;
109 unsigned int max_channels;
110 unsigned int iterations;
111 unsigned int xor_sources;
112 unsigned int pq_sources;
113 int timeout;
114 bool noverify;
115 bool norandom;
116};
117
118
119
120
121
122
123static struct dmatest_info {
124
125 struct dmatest_params params;
126
127
128 struct list_head channels;
129 unsigned int nr_channels;
130 struct mutex lock;
131 bool did_init;
132} test_info = {
133 .channels = LIST_HEAD_INIT(test_info.channels),
134 .lock = __MUTEX_INITIALIZER(test_info.lock),
135};
136
137static int dmatest_run_set(const char *val, const struct kernel_param *kp);
138static int dmatest_run_get(char *val, const struct kernel_param *kp);
139static const struct kernel_param_ops run_ops = {
140 .set = dmatest_run_set,
141 .get = dmatest_run_get,
142};
143static bool dmatest_run;
144module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR);
145MODULE_PARM_DESC(run, "Run the test (default: false)");
146
147
148#define MAX_ERROR_COUNT 32
149
150
151
152
153
154
155
156
157
158
159
160
161#define PATTERN_SRC 0x80
162#define PATTERN_DST 0x00
163#define PATTERN_COPY 0x40
164#define PATTERN_OVERWRITE 0x20
165#define PATTERN_COUNT_MASK 0x1f
166#define PATTERN_MEMSET_IDX 0x01
167
168
169struct dmatest_done {
170 bool done;
171 wait_queue_head_t *wait;
172};
173
174struct dmatest_thread {
175 struct list_head node;
176 struct dmatest_info *info;
177 struct task_struct *task;
178 struct dma_chan *chan;
179 u8 **srcs;
180 u8 **usrcs;
181 u8 **dsts;
182 u8 **udsts;
183 enum dma_transaction_type type;
184 wait_queue_head_t done_wait;
185 struct dmatest_done test_done;
186 bool done;
187};
188
189struct dmatest_chan {
190 struct list_head node;
191 struct dma_chan *chan;
192 struct list_head threads;
193};
194
195static DECLARE_WAIT_QUEUE_HEAD(thread_wait);
196static bool wait;
197
198static bool is_threaded_test_run(struct dmatest_info *info)
199{
200 struct dmatest_chan *dtc;
201
202 list_for_each_entry(dtc, &info->channels, node) {
203 struct dmatest_thread *thread;
204
205 list_for_each_entry(thread, &dtc->threads, node) {
206 if (!thread->done)
207 return true;
208 }
209 }
210
211 return false;
212}
213
214static int dmatest_wait_get(char *val, const struct kernel_param *kp)
215{
216 struct dmatest_info *info = &test_info;
217 struct dmatest_params *params = &info->params;
218
219 if (params->iterations)
220 wait_event(thread_wait, !is_threaded_test_run(info));
221 wait = true;
222 return param_get_bool(val, kp);
223}
224
225static const struct kernel_param_ops wait_ops = {
226 .get = dmatest_wait_get,
227 .set = param_set_bool,
228};
229module_param_cb(wait, &wait_ops, &wait, S_IRUGO);
230MODULE_PARM_DESC(wait, "Wait for tests to complete (default: false)");
231
232static bool dmatest_match_channel(struct dmatest_params *params,
233 struct dma_chan *chan)
234{
235 if (params->channel[0] == '\0')
236 return true;
237 return strcmp(dma_chan_name(chan), params->channel) == 0;
238}
239
240static bool dmatest_match_device(struct dmatest_params *params,
241 struct dma_device *device)
242{
243 if (params->device[0] == '\0')
244 return true;
245 return strcmp(dev_name(device->dev), params->device) == 0;
246}
247
248static unsigned long dmatest_random(void)
249{
250 unsigned long buf;
251
252 prandom_bytes(&buf, sizeof(buf));
253 return buf;
254}
255
256static inline u8 gen_inv_idx(u8 index, bool is_memset)
257{
258 u8 val = is_memset ? PATTERN_MEMSET_IDX : index;
259
260 return ~val & PATTERN_COUNT_MASK;
261}
262
263static inline u8 gen_src_value(u8 index, bool is_memset)
264{
265 return PATTERN_SRC | gen_inv_idx(index, is_memset);
266}
267
268static inline u8 gen_dst_value(u8 index, bool is_memset)
269{
270 return PATTERN_DST | gen_inv_idx(index, is_memset);
271}
272
273static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
274 unsigned int buf_size, bool is_memset)
275{
276 unsigned int i;
277 u8 *buf;
278
279 for (; (buf = *bufs); bufs++) {
280 for (i = 0; i < start; i++)
281 buf[i] = gen_src_value(i, is_memset);
282 for ( ; i < start + len; i++)
283 buf[i] = gen_src_value(i, is_memset) | PATTERN_COPY;
284 for ( ; i < buf_size; i++)
285 buf[i] = gen_src_value(i, is_memset);
286 buf++;
287 }
288}
289
290static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
291 unsigned int buf_size, bool is_memset)
292{
293 unsigned int i;
294 u8 *buf;
295
296 for (; (buf = *bufs); bufs++) {
297 for (i = 0; i < start; i++)
298 buf[i] = gen_dst_value(i, is_memset);
299 for ( ; i < start + len; i++)
300 buf[i] = gen_dst_value(i, is_memset) |
301 PATTERN_OVERWRITE;
302 for ( ; i < buf_size; i++)
303 buf[i] = gen_dst_value(i, is_memset);
304 }
305}
306
307static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
308 unsigned int counter, bool is_srcbuf, bool is_memset)
309{
310 u8 diff = actual ^ pattern;
311 u8 expected = pattern | gen_inv_idx(counter, is_memset);
312 const char *thread_name = current->comm;
313
314 if (is_srcbuf)
315 pr_warn("%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n",
316 thread_name, index, expected, actual);
317 else if ((pattern & PATTERN_COPY)
318 && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
319 pr_warn("%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n",
320 thread_name, index, expected, actual);
321 else if (diff & PATTERN_SRC)
322 pr_warn("%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n",
323 thread_name, index, expected, actual);
324 else
325 pr_warn("%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n",
326 thread_name, index, expected, actual);
327}
328
329static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
330 unsigned int end, unsigned int counter, u8 pattern,
331 bool is_srcbuf, bool is_memset)
332{
333 unsigned int i;
334 unsigned int error_count = 0;
335 u8 actual;
336 u8 expected;
337 u8 *buf;
338 unsigned int counter_orig = counter;
339
340 for (; (buf = *bufs); bufs++) {
341 counter = counter_orig;
342 for (i = start; i < end; i++) {
343 actual = buf[i];
344 expected = pattern | gen_inv_idx(counter, is_memset);
345 if (actual != expected) {
346 if (error_count < MAX_ERROR_COUNT)
347 dmatest_mismatch(actual, pattern, i,
348 counter, is_srcbuf,
349 is_memset);
350 error_count++;
351 }
352 counter++;
353 }
354 }
355
356 if (error_count > MAX_ERROR_COUNT)
357 pr_warn("%s: %u errors suppressed\n",
358 current->comm, error_count - MAX_ERROR_COUNT);
359
360 return error_count;
361}
362
363
364static void dmatest_callback(void *arg)
365{
366 struct dmatest_done *done = arg;
367 struct dmatest_thread *thread =
368 container_of(done, struct dmatest_thread, test_done);
369 if (!thread->done) {
370 done->done = true;
371 wake_up_all(done->wait);
372 } else {
373
374
375
376
377
378
379
380 WARN(1, "dmatest: Kernel memory may be corrupted!!\n");
381 }
382}
383
384static unsigned int min_odd(unsigned int x, unsigned int y)
385{
386 unsigned int val = min(x, y);
387
388 return val % 2 ? val : val - 1;
389}
390
391static void result(const char *err, unsigned int n, unsigned int src_off,
392 unsigned int dst_off, unsigned int len, unsigned long data)
393{
394 pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
395 current->comm, n, err, src_off, dst_off, len, data);
396}
397
398static void dbg_result(const char *err, unsigned int n, unsigned int src_off,
399 unsigned int dst_off, unsigned int len,
400 unsigned long data)
401{
402 pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
403 current->comm, n, err, src_off, dst_off, len, data);
404}
405
406#define verbose_result(err, n, src_off, dst_off, len, data) ({ \
407 if (verbose) \
408 result(err, n, src_off, dst_off, len, data); \
409 else \
410 dbg_result(err, n, src_off, dst_off, len, data);\
411})
412
413static unsigned long long dmatest_persec(s64 runtime, unsigned int val)
414{
415 unsigned long long per_sec = 1000000;
416
417 if (runtime <= 0)
418 return 0;
419
420
421 while (runtime > UINT_MAX) {
422 runtime >>= 1;
423 per_sec <<= 1;
424 }
425
426 per_sec *= val;
427 do_div(per_sec, runtime);
428 return per_sec;
429}
430
431static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
432{
433 return dmatest_persec(runtime, len >> 10);
434}
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450static int dmatest_func(void *data)
451{
452 struct dmatest_thread *thread = data;
453 struct dmatest_done *done = &thread->test_done;
454 struct dmatest_info *info;
455 struct dmatest_params *params;
456 struct dma_chan *chan;
457 struct dma_device *dev;
458 unsigned int error_count;
459 unsigned int failed_tests = 0;
460 unsigned int total_tests = 0;
461 dma_cookie_t cookie;
462 enum dma_status status;
463 enum dma_ctrl_flags flags;
464 u8 *pq_coefs = NULL;
465 int ret;
466 int src_cnt;
467 int dst_cnt;
468 int i;
469 ktime_t ktime, start, diff;
470 ktime_t filltime = 0;
471 ktime_t comparetime = 0;
472 s64 runtime = 0;
473 unsigned long long total_len = 0;
474 u8 align = 0;
475 bool is_memset = false;
476 dma_addr_t *srcs;
477 dma_addr_t *dma_pq;
478
479 set_freezable();
480
481 ret = -ENOMEM;
482
483 smp_rmb();
484 info = thread->info;
485 params = &info->params;
486 chan = thread->chan;
487 dev = chan->device;
488 if (thread->type == DMA_MEMCPY) {
489 align = dev->copy_align;
490 src_cnt = dst_cnt = 1;
491 } else if (thread->type == DMA_MEMSET) {
492 align = dev->fill_align;
493 src_cnt = dst_cnt = 1;
494 is_memset = true;
495 } else if (thread->type == DMA_SG) {
496 align = dev->copy_align;
497 src_cnt = dst_cnt = sg_buffers;
498 } else if (thread->type == DMA_XOR) {
499
500 src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
501 dst_cnt = 1;
502 align = dev->xor_align;
503 } else if (thread->type == DMA_PQ) {
504
505 src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
506 dst_cnt = 2;
507 align = dev->pq_align;
508
509 pq_coefs = kmalloc(params->pq_sources + 1, GFP_KERNEL);
510 if (!pq_coefs)
511 goto err_thread_type;
512
513 for (i = 0; i < src_cnt; i++)
514 pq_coefs[i] = 1;
515 } else
516 goto err_thread_type;
517
518 thread->srcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL);
519 if (!thread->srcs)
520 goto err_srcs;
521
522 thread->usrcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL);
523 if (!thread->usrcs)
524 goto err_usrcs;
525
526 for (i = 0; i < src_cnt; i++) {
527 thread->usrcs[i] = kmalloc(params->buf_size + align,
528 GFP_KERNEL);
529 if (!thread->usrcs[i])
530 goto err_srcbuf;
531
532
533 if (align)
534 thread->srcs[i] = PTR_ALIGN(thread->usrcs[i], align);
535 else
536 thread->srcs[i] = thread->usrcs[i];
537 }
538 thread->srcs[i] = NULL;
539
540 thread->dsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL);
541 if (!thread->dsts)
542 goto err_dsts;
543
544 thread->udsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL);
545 if (!thread->udsts)
546 goto err_udsts;
547
548 for (i = 0; i < dst_cnt; i++) {
549 thread->udsts[i] = kmalloc(params->buf_size + align,
550 GFP_KERNEL);
551 if (!thread->udsts[i])
552 goto err_dstbuf;
553
554
555 if (align)
556 thread->dsts[i] = PTR_ALIGN(thread->udsts[i], align);
557 else
558 thread->dsts[i] = thread->udsts[i];
559 }
560 thread->dsts[i] = NULL;
561
562 set_user_nice(current, 10);
563
564 srcs = kcalloc(src_cnt, sizeof(dma_addr_t), GFP_KERNEL);
565 if (!srcs)
566 goto err_dstbuf;
567
568 dma_pq = kcalloc(dst_cnt, sizeof(dma_addr_t), GFP_KERNEL);
569 if (!dma_pq)
570 goto err_srcs_array;
571
572
573
574
575 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
576
577 ktime = ktime_get();
578 while (!kthread_should_stop()
579 && !(params->iterations && total_tests >= params->iterations)) {
580 struct dma_async_tx_descriptor *tx = NULL;
581 struct dmaengine_unmap_data *um;
582 dma_addr_t *dsts;
583 unsigned int src_off, dst_off, len;
584 struct scatterlist tx_sg[src_cnt];
585 struct scatterlist rx_sg[src_cnt];
586
587 total_tests++;
588
589
590 if ((src_cnt + dst_cnt) >= 255) {
591 pr_err("too many buffers (%d of 255 supported)\n",
592 src_cnt + dst_cnt);
593 break;
594 }
595
596 if (1 << align > params->buf_size) {
597 pr_err("%u-byte buffer too small for %d-byte alignment\n",
598 params->buf_size, 1 << align);
599 break;
600 }
601
602 align = 3;
603
604 if (params->norandom)
605 len = params->buf_size;
606 else
607 len = dmatest_random() % params->buf_size + 1;
608
609 len = (len >> align) << align;
610 if (!len)
611 len = 1 << align;
612
613 total_len += len;
614
615 if (params->norandom) {
616 src_off = 0;
617 dst_off = 0;
618 } else {
619 src_off = dmatest_random() % (params->buf_size - len + 1);
620 dst_off = dmatest_random() % (params->buf_size - len + 1);
621
622 src_off = (src_off >> align) << align;
623 dst_off = (dst_off >> align) << align;
624 }
625
626 if (!params->noverify) {
627 start = ktime_get();
628 dmatest_init_srcs(thread->srcs, src_off, len,
629 params->buf_size, is_memset);
630 dmatest_init_dsts(thread->dsts, dst_off, len,
631 params->buf_size, is_memset);
632
633 diff = ktime_sub(ktime_get(), start);
634 filltime = ktime_add(filltime, diff);
635 }
636
637 um = dmaengine_get_unmap_data(dev->dev, src_cnt + dst_cnt,
638 GFP_KERNEL);
639 if (!um) {
640 failed_tests++;
641 result("unmap data NULL", total_tests,
642 src_off, dst_off, len, ret);
643 continue;
644 }
645
646 um->len = params->buf_size;
647 for (i = 0; i < src_cnt; i++) {
648 void *buf = thread->srcs[i];
649 struct page *pg = virt_to_page(buf);
650 unsigned long pg_off = offset_in_page(buf);
651
652 um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
653 um->len, DMA_TO_DEVICE);
654 srcs[i] = um->addr[i] + src_off;
655 ret = dma_mapping_error(dev->dev, um->addr[i]);
656 if (ret) {
657 dmaengine_unmap_put(um);
658 result("src mapping error", total_tests,
659 src_off, dst_off, len, ret);
660 failed_tests++;
661 continue;
662 }
663 um->to_cnt++;
664 }
665
666 dsts = &um->addr[src_cnt];
667 for (i = 0; i < dst_cnt; i++) {
668 void *buf = thread->dsts[i];
669 struct page *pg = virt_to_page(buf);
670 unsigned long pg_off = offset_in_page(buf);
671
672 dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
673 DMA_BIDIRECTIONAL);
674 ret = dma_mapping_error(dev->dev, dsts[i]);
675 if (ret) {
676 dmaengine_unmap_put(um);
677 result("dst mapping error", total_tests,
678 src_off, dst_off, len, ret);
679 failed_tests++;
680 continue;
681 }
682 um->bidi_cnt++;
683 }
684
685 sg_init_table(tx_sg, src_cnt);
686 sg_init_table(rx_sg, src_cnt);
687 for (i = 0; i < src_cnt; i++) {
688 sg_dma_address(&rx_sg[i]) = srcs[i];
689 sg_dma_address(&tx_sg[i]) = dsts[i] + dst_off;
690 sg_dma_len(&tx_sg[i]) = len;
691 sg_dma_len(&rx_sg[i]) = len;
692 }
693
694 if (thread->type == DMA_MEMCPY)
695 tx = dev->device_prep_dma_memcpy(chan,
696 dsts[0] + dst_off,
697 srcs[0], len, flags);
698 else if (thread->type == DMA_MEMSET)
699 tx = dev->device_prep_dma_memset(chan,
700 dsts[0] + dst_off,
701 *(thread->srcs[0] + src_off),
702 len, flags);
703 else if (thread->type == DMA_SG)
704 tx = dev->device_prep_dma_sg(chan, tx_sg, src_cnt,
705 rx_sg, src_cnt, flags);
706 else if (thread->type == DMA_XOR)
707 tx = dev->device_prep_dma_xor(chan,
708 dsts[0] + dst_off,
709 srcs, src_cnt,
710 len, flags);
711 else if (thread->type == DMA_PQ) {
712 for (i = 0; i < dst_cnt; i++)
713 dma_pq[i] = dsts[i] + dst_off;
714 tx = dev->device_prep_dma_pq(chan, dma_pq, srcs,
715 src_cnt, pq_coefs,
716 len, flags);
717 }
718
719 if (!tx) {
720 dmaengine_unmap_put(um);
721 result("prep error", total_tests, src_off,
722 dst_off, len, ret);
723 msleep(100);
724 failed_tests++;
725 continue;
726 }
727
728 done->done = false;
729 tx->callback = dmatest_callback;
730 tx->callback_param = done;
731 cookie = tx->tx_submit(tx);
732
733 if (dma_submit_error(cookie)) {
734 dmaengine_unmap_put(um);
735 result("submit error", total_tests, src_off,
736 dst_off, len, ret);
737 msleep(100);
738 failed_tests++;
739 continue;
740 }
741 dma_async_issue_pending(chan);
742
743 wait_event_freezable_timeout(thread->done_wait, done->done,
744 msecs_to_jiffies(params->timeout));
745
746 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
747
748 if (!done->done) {
749 dmaengine_unmap_put(um);
750 result("test timed out", total_tests, src_off, dst_off,
751 len, 0);
752 failed_tests++;
753 continue;
754 } else if (status != DMA_COMPLETE) {
755 dmaengine_unmap_put(um);
756 result(status == DMA_ERROR ?
757 "completion error status" :
758 "completion busy status", total_tests, src_off,
759 dst_off, len, ret);
760 failed_tests++;
761 continue;
762 }
763
764 dmaengine_unmap_put(um);
765
766 if (params->noverify) {
767 verbose_result("test passed", total_tests, src_off,
768 dst_off, len, 0);
769 continue;
770 }
771
772 start = ktime_get();
773 pr_debug("%s: verifying source buffer...\n", current->comm);
774 error_count = dmatest_verify(thread->srcs, 0, src_off,
775 0, PATTERN_SRC, true, is_memset);
776 error_count += dmatest_verify(thread->srcs, src_off,
777 src_off + len, src_off,
778 PATTERN_SRC | PATTERN_COPY, true, is_memset);
779 error_count += dmatest_verify(thread->srcs, src_off + len,
780 params->buf_size, src_off + len,
781 PATTERN_SRC, true, is_memset);
782
783 pr_debug("%s: verifying dest buffer...\n", current->comm);
784 error_count += dmatest_verify(thread->dsts, 0, dst_off,
785 0, PATTERN_DST, false, is_memset);
786
787 error_count += dmatest_verify(thread->dsts, dst_off,
788 dst_off + len, src_off,
789 PATTERN_SRC | PATTERN_COPY, false, is_memset);
790
791 error_count += dmatest_verify(thread->dsts, dst_off + len,
792 params->buf_size, dst_off + len,
793 PATTERN_DST, false, is_memset);
794
795 diff = ktime_sub(ktime_get(), start);
796 comparetime = ktime_add(comparetime, diff);
797
798 if (error_count) {
799 result("data error", total_tests, src_off, dst_off,
800 len, error_count);
801 failed_tests++;
802 } else {
803 verbose_result("test passed", total_tests, src_off,
804 dst_off, len, 0);
805 }
806 }
807 ktime = ktime_sub(ktime_get(), ktime);
808 ktime = ktime_sub(ktime, comparetime);
809 ktime = ktime_sub(ktime, filltime);
810 runtime = ktime_to_us(ktime);
811
812 ret = 0;
813 kfree(dma_pq);
814err_srcs_array:
815 kfree(srcs);
816err_dstbuf:
817 for (i = 0; thread->udsts[i]; i++)
818 kfree(thread->udsts[i]);
819 kfree(thread->udsts);
820err_udsts:
821 kfree(thread->dsts);
822err_dsts:
823err_srcbuf:
824 for (i = 0; thread->usrcs[i]; i++)
825 kfree(thread->usrcs[i]);
826 kfree(thread->usrcs);
827err_usrcs:
828 kfree(thread->srcs);
829err_srcs:
830 kfree(pq_coefs);
831err_thread_type:
832 pr_info("%s: summary %u tests, %u failures %llu iops %llu KB/s (%d)\n",
833 current->comm, total_tests, failed_tests,
834 dmatest_persec(runtime, total_tests),
835 dmatest_KBs(runtime, total_len), ret);
836
837
838 if (ret || failed_tests)
839 dmaengine_terminate_all(chan);
840
841 thread->done = true;
842 wake_up(&thread_wait);
843
844 return ret;
845}
846
847static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
848{
849 struct dmatest_thread *thread;
850 struct dmatest_thread *_thread;
851 int ret;
852
853 list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
854 ret = kthread_stop(thread->task);
855 pr_debug("thread %s exited with status %d\n",
856 thread->task->comm, ret);
857 list_del(&thread->node);
858 put_task_struct(thread->task);
859 kfree(thread);
860 }
861
862
863 dmaengine_terminate_all(dtc->chan);
864
865 kfree(dtc);
866}
867
868static int dmatest_add_threads(struct dmatest_info *info,
869 struct dmatest_chan *dtc, enum dma_transaction_type type)
870{
871 struct dmatest_params *params = &info->params;
872 struct dmatest_thread *thread;
873 struct dma_chan *chan = dtc->chan;
874 char *op;
875 unsigned int i;
876
877 if (type == DMA_MEMCPY)
878 op = "copy";
879 else if (type == DMA_MEMSET)
880 op = "set";
881 else if (type == DMA_SG)
882 op = "sg";
883 else if (type == DMA_XOR)
884 op = "xor";
885 else if (type == DMA_PQ)
886 op = "pq";
887 else
888 return -EINVAL;
889
890 for (i = 0; i < params->threads_per_chan; i++) {
891 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
892 if (!thread) {
893 pr_warn("No memory for %s-%s%u\n",
894 dma_chan_name(chan), op, i);
895 break;
896 }
897 thread->info = info;
898 thread->chan = dtc->chan;
899 thread->type = type;
900 thread->test_done.wait = &thread->done_wait;
901 init_waitqueue_head(&thread->done_wait);
902 smp_wmb();
903 thread->task = kthread_create(dmatest_func, thread, "%s-%s%u",
904 dma_chan_name(chan), op, i);
905 if (IS_ERR(thread->task)) {
906 pr_warn("Failed to create thread %s-%s%u\n",
907 dma_chan_name(chan), op, i);
908 kfree(thread);
909 break;
910 }
911
912
913 get_task_struct(thread->task);
914 list_add_tail(&thread->node, &dtc->threads);
915 wake_up_process(thread->task);
916 }
917
918 return i;
919}
920
921static int dmatest_add_channel(struct dmatest_info *info,
922 struct dma_chan *chan)
923{
924 struct dmatest_chan *dtc;
925 struct dma_device *dma_dev = chan->device;
926 unsigned int thread_count = 0;
927 int cnt;
928
929 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
930 if (!dtc) {
931 pr_warn("No memory for %s\n", dma_chan_name(chan));
932 return -ENOMEM;
933 }
934
935 dtc->chan = chan;
936 INIT_LIST_HEAD(&dtc->threads);
937
938 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
939 if (dmatest == 0) {
940 cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
941 thread_count += cnt > 0 ? cnt : 0;
942 }
943 }
944
945 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) {
946 if (dmatest == 2) {
947 cnt = dmatest_add_threads(info, dtc, DMA_MEMSET);
948 thread_count += cnt > 0 ? cnt : 0;
949 }
950 }
951
952 if (dma_has_cap(DMA_SG, dma_dev->cap_mask)) {
953 if (dmatest == 1) {
954 cnt = dmatest_add_threads(info, dtc, DMA_SG);
955 thread_count += cnt > 0 ? cnt : 0;
956 }
957 }
958
959 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
960 cnt = dmatest_add_threads(info, dtc, DMA_XOR);
961 thread_count += cnt > 0 ? cnt : 0;
962 }
963 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
964 cnt = dmatest_add_threads(info, dtc, DMA_PQ);
965 thread_count += cnt > 0 ? cnt : 0;
966 }
967
968 pr_info("Started %u threads using %s\n",
969 thread_count, dma_chan_name(chan));
970
971 list_add_tail(&dtc->node, &info->channels);
972 info->nr_channels++;
973
974 return 0;
975}
976
977static bool filter(struct dma_chan *chan, void *param)
978{
979 struct dmatest_params *params = param;
980
981 if (!dmatest_match_channel(params, chan) ||
982 !dmatest_match_device(params, chan->device))
983 return false;
984 else
985 return true;
986}
987
988static void request_channels(struct dmatest_info *info,
989 enum dma_transaction_type type)
990{
991 dma_cap_mask_t mask;
992
993 dma_cap_zero(mask);
994 dma_cap_set(type, mask);
995 for (;;) {
996 struct dmatest_params *params = &info->params;
997 struct dma_chan *chan;
998
999 chan = dma_request_channel(mask, filter, params);
1000 if (chan) {
1001 if (dmatest_add_channel(info, chan)) {
1002 dma_release_channel(chan);
1003 break;
1004 }
1005 } else
1006 break;
1007 if (params->max_channels &&
1008 info->nr_channels >= params->max_channels)
1009 break;
1010 }
1011}
1012
1013static void run_threaded_test(struct dmatest_info *info)
1014{
1015 struct dmatest_params *params = &info->params;
1016
1017
1018 params->buf_size = test_buf_size;
1019 strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
1020 strlcpy(params->device, strim(test_device), sizeof(params->device));
1021 params->threads_per_chan = threads_per_chan;
1022 params->max_channels = max_channels;
1023 params->iterations = iterations;
1024 params->xor_sources = xor_sources;
1025 params->pq_sources = pq_sources;
1026 params->timeout = timeout;
1027 params->noverify = noverify;
1028 params->norandom = norandom;
1029
1030 request_channels(info, DMA_MEMCPY);
1031 request_channels(info, DMA_MEMSET);
1032 request_channels(info, DMA_XOR);
1033 request_channels(info, DMA_SG);
1034 request_channels(info, DMA_PQ);
1035}
1036
1037static void stop_threaded_test(struct dmatest_info *info)
1038{
1039 struct dmatest_chan *dtc, *_dtc;
1040 struct dma_chan *chan;
1041
1042 list_for_each_entry_safe(dtc, _dtc, &info->channels, node) {
1043 list_del(&dtc->node);
1044 chan = dtc->chan;
1045 dmatest_cleanup_channel(dtc);
1046 pr_debug("dropped channel %s\n", dma_chan_name(chan));
1047 dma_release_channel(chan);
1048 }
1049
1050 info->nr_channels = 0;
1051}
1052
1053static void restart_threaded_test(struct dmatest_info *info, bool run)
1054{
1055
1056
1057
1058 if (!info->did_init)
1059 return;
1060
1061
1062 stop_threaded_test(info);
1063
1064
1065 run_threaded_test(info);
1066}
1067
1068static int dmatest_run_get(char *val, const struct kernel_param *kp)
1069{
1070 struct dmatest_info *info = &test_info;
1071
1072 mutex_lock(&info->lock);
1073 if (is_threaded_test_run(info)) {
1074 dmatest_run = true;
1075 } else {
1076 stop_threaded_test(info);
1077 dmatest_run = false;
1078 }
1079 mutex_unlock(&info->lock);
1080
1081 return param_get_bool(val, kp);
1082}
1083
1084static int dmatest_run_set(const char *val, const struct kernel_param *kp)
1085{
1086 struct dmatest_info *info = &test_info;
1087 int ret;
1088
1089 mutex_lock(&info->lock);
1090 ret = param_set_bool(val, kp);
1091 if (ret) {
1092 mutex_unlock(&info->lock);
1093 return ret;
1094 }
1095
1096 if (is_threaded_test_run(info))
1097 ret = -EBUSY;
1098 else if (dmatest_run)
1099 restart_threaded_test(info, dmatest_run);
1100
1101 mutex_unlock(&info->lock);
1102
1103 return ret;
1104}
1105
1106static int __init dmatest_init(void)
1107{
1108 struct dmatest_info *info = &test_info;
1109 struct dmatest_params *params = &info->params;
1110
1111 if (dmatest_run) {
1112 mutex_lock(&info->lock);
1113 run_threaded_test(info);
1114 mutex_unlock(&info->lock);
1115 }
1116
1117 if (params->iterations && wait)
1118 wait_event(thread_wait, !is_threaded_test_run(info));
1119
1120
1121
1122
1123 info->did_init = true;
1124
1125 return 0;
1126}
1127
1128late_initcall(dmatest_init);
1129
1130static void __exit dmatest_exit(void)
1131{
1132 struct dmatest_info *info = &test_info;
1133
1134 mutex_lock(&info->lock);
1135 stop_threaded_test(info);
1136 mutex_unlock(&info->lock);
1137}
1138module_exit(dmatest_exit);
1139
1140MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1141MODULE_LICENSE("GPL v2");
1142