1
2
3
4
5
6
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/delay.h>
11#include <linux/dma-mapping.h>
12#include <linux/dmaengine.h>
13#include <linux/freezer.h>
14#include <linux/init.h>
15#include <linux/kthread.h>
16#include <linux/sched/task.h>
17#include <linux/module.h>
18#include <linux/moduleparam.h>
19#include <linux/random.h>
20#include <linux/slab.h>
21#include <linux/wait.h>
22
23static unsigned int test_buf_size = 16384;
24module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
25MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
26
27static char test_device[32];
28module_param_string(device, test_device, sizeof(test_device),
29 S_IRUGO | S_IWUSR);
30MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
31
32static unsigned int threads_per_chan = 1;
33module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR);
34MODULE_PARM_DESC(threads_per_chan,
35 "Number of threads to start per channel (default: 1)");
36
37static unsigned int max_channels;
38module_param(max_channels, uint, S_IRUGO | S_IWUSR);
39MODULE_PARM_DESC(max_channels,
40 "Maximum number of channels to use (default: all)");
41
42static unsigned int iterations;
43module_param(iterations, uint, S_IRUGO | S_IWUSR);
44MODULE_PARM_DESC(iterations,
45 "Iterations before stopping test (default: infinite)");
46
47static unsigned int dmatest;
48module_param(dmatest, uint, S_IRUGO | S_IWUSR);
49MODULE_PARM_DESC(dmatest,
50 "dmatest 0-memcpy 1-memset (default: 0)");
51
52static unsigned int xor_sources = 3;
53module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
54MODULE_PARM_DESC(xor_sources,
55 "Number of xor source buffers (default: 3)");
56
57static unsigned int pq_sources = 3;
58module_param(pq_sources, uint, S_IRUGO | S_IWUSR);
59MODULE_PARM_DESC(pq_sources,
60 "Number of p+q source buffers (default: 3)");
61
62static int timeout = 3000;
63module_param(timeout, uint, S_IRUGO | S_IWUSR);
64MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
65 "Pass 0xFFFFFFFF (4294967295) for maximum timeout");
66
67static bool noverify;
68module_param(noverify, bool, S_IRUGO | S_IWUSR);
69MODULE_PARM_DESC(noverify, "Disable data verification (default: verify)");
70
71static bool norandom;
72module_param(norandom, bool, 0644);
73MODULE_PARM_DESC(norandom, "Disable random offset setup (default: random)");
74
75static bool verbose;
76module_param(verbose, bool, S_IRUGO | S_IWUSR);
77MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)");
78
79static int alignment = -1;
80module_param(alignment, int, 0644);
81MODULE_PARM_DESC(alignment, "Custom data address alignment taken as 2^(alignment) (default: not used (-1))");
82
83static unsigned int transfer_size;
84module_param(transfer_size, uint, 0644);
85MODULE_PARM_DESC(transfer_size, "Optional custom transfer size in bytes (default: not used (0))");
86
87
88
89
90
91
92
93
94
95
96
97
98
99struct dmatest_params {
100 unsigned int buf_size;
101 char channel[20];
102 char device[32];
103 unsigned int threads_per_chan;
104 unsigned int max_channels;
105 unsigned int iterations;
106 unsigned int xor_sources;
107 unsigned int pq_sources;
108 unsigned int timeout;
109 bool noverify;
110 bool norandom;
111 int alignment;
112 unsigned int transfer_size;
113};
114
115
116
117
118
119
120static struct dmatest_info {
121
122 struct dmatest_params params;
123
124
125 struct list_head channels;
126 unsigned int nr_channels;
127 struct mutex lock;
128 bool did_init;
129} test_info = {
130 .channels = LIST_HEAD_INIT(test_info.channels),
131 .lock = __MUTEX_INITIALIZER(test_info.lock),
132};
133
134static int dmatest_run_set(const char *val, const struct kernel_param *kp);
135static int dmatest_run_get(char *val, const struct kernel_param *kp);
136static const struct kernel_param_ops run_ops = {
137 .set = dmatest_run_set,
138 .get = dmatest_run_get,
139};
140static bool dmatest_run;
141module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR);
142MODULE_PARM_DESC(run, "Run the test (default: false)");
143
144static int dmatest_chan_set(const char *val, const struct kernel_param *kp);
145static int dmatest_chan_get(char *val, const struct kernel_param *kp);
146static const struct kernel_param_ops multi_chan_ops = {
147 .set = dmatest_chan_set,
148 .get = dmatest_chan_get,
149};
150
151static char test_channel[20];
152static struct kparam_string newchan_kps = {
153 .string = test_channel,
154 .maxlen = 20,
155};
156module_param_cb(channel, &multi_chan_ops, &newchan_kps, 0644);
157MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
158
159static int dmatest_test_list_get(char *val, const struct kernel_param *kp);
160static const struct kernel_param_ops test_list_ops = {
161 .get = dmatest_test_list_get,
162};
163module_param_cb(test_list, &test_list_ops, NULL, 0444);
164MODULE_PARM_DESC(test_list, "Print current test list");
165
166
167#define MAX_ERROR_COUNT 32
168
169
170
171
172
173
174
175
176
177
178
179
180#define PATTERN_SRC 0x80
181#define PATTERN_DST 0x00
182#define PATTERN_COPY 0x40
183#define PATTERN_OVERWRITE 0x20
184#define PATTERN_COUNT_MASK 0x1f
185#define PATTERN_MEMSET_IDX 0x01
186
187
188#define FIXPT_SHIFT 8
189#define FIXPNT_MASK 0xFF
190#define FIXPT_TO_INT(a) ((a) >> FIXPT_SHIFT)
191#define INT_TO_FIXPT(a) ((a) << FIXPT_SHIFT)
192#define FIXPT_GET_FRAC(a) ((((a) & FIXPNT_MASK) * 100) >> FIXPT_SHIFT)
193
194
195struct dmatest_done {
196 bool done;
197 wait_queue_head_t *wait;
198};
199
200struct dmatest_data {
201 u8 **raw;
202 u8 **aligned;
203 unsigned int cnt;
204 unsigned int off;
205};
206
207struct dmatest_thread {
208 struct list_head node;
209 struct dmatest_info *info;
210 struct task_struct *task;
211 struct dma_chan *chan;
212 struct dmatest_data src;
213 struct dmatest_data dst;
214 enum dma_transaction_type type;
215 wait_queue_head_t done_wait;
216 struct dmatest_done test_done;
217 bool done;
218 bool pending;
219};
220
221struct dmatest_chan {
222 struct list_head node;
223 struct dma_chan *chan;
224 struct list_head threads;
225};
226
227static DECLARE_WAIT_QUEUE_HEAD(thread_wait);
228static bool wait;
229
230static bool is_threaded_test_run(struct dmatest_info *info)
231{
232 struct dmatest_chan *dtc;
233
234 list_for_each_entry(dtc, &info->channels, node) {
235 struct dmatest_thread *thread;
236
237 list_for_each_entry(thread, &dtc->threads, node) {
238 if (!thread->done)
239 return true;
240 }
241 }
242
243 return false;
244}
245
246static bool is_threaded_test_pending(struct dmatest_info *info)
247{
248 struct dmatest_chan *dtc;
249
250 list_for_each_entry(dtc, &info->channels, node) {
251 struct dmatest_thread *thread;
252
253 list_for_each_entry(thread, &dtc->threads, node) {
254 if (thread->pending)
255 return true;
256 }
257 }
258
259 return false;
260}
261
262static int dmatest_wait_get(char *val, const struct kernel_param *kp)
263{
264 struct dmatest_info *info = &test_info;
265 struct dmatest_params *params = &info->params;
266
267 if (params->iterations)
268 wait_event(thread_wait, !is_threaded_test_run(info));
269 wait = true;
270 return param_get_bool(val, kp);
271}
272
273static const struct kernel_param_ops wait_ops = {
274 .get = dmatest_wait_get,
275 .set = param_set_bool,
276};
277module_param_cb(wait, &wait_ops, &wait, S_IRUGO);
278MODULE_PARM_DESC(wait, "Wait for tests to complete (default: false)");
279
280static bool dmatest_match_channel(struct dmatest_params *params,
281 struct dma_chan *chan)
282{
283 if (params->channel[0] == '\0')
284 return true;
285 return strcmp(dma_chan_name(chan), params->channel) == 0;
286}
287
288static bool dmatest_match_device(struct dmatest_params *params,
289 struct dma_device *device)
290{
291 if (params->device[0] == '\0')
292 return true;
293 return strcmp(dev_name(device->dev), params->device) == 0;
294}
295
296static unsigned long dmatest_random(void)
297{
298 unsigned long buf;
299
300 prandom_bytes(&buf, sizeof(buf));
301 return buf;
302}
303
304static inline u8 gen_inv_idx(u8 index, bool is_memset)
305{
306 u8 val = is_memset ? PATTERN_MEMSET_IDX : index;
307
308 return ~val & PATTERN_COUNT_MASK;
309}
310
311static inline u8 gen_src_value(u8 index, bool is_memset)
312{
313 return PATTERN_SRC | gen_inv_idx(index, is_memset);
314}
315
316static inline u8 gen_dst_value(u8 index, bool is_memset)
317{
318 return PATTERN_DST | gen_inv_idx(index, is_memset);
319}
320
321static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
322 unsigned int buf_size, bool is_memset)
323{
324 unsigned int i;
325 u8 *buf;
326
327 for (; (buf = *bufs); bufs++) {
328 for (i = 0; i < start; i++)
329 buf[i] = gen_src_value(i, is_memset);
330 for ( ; i < start + len; i++)
331 buf[i] = gen_src_value(i, is_memset) | PATTERN_COPY;
332 for ( ; i < buf_size; i++)
333 buf[i] = gen_src_value(i, is_memset);
334 buf++;
335 }
336}
337
338static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
339 unsigned int buf_size, bool is_memset)
340{
341 unsigned int i;
342 u8 *buf;
343
344 for (; (buf = *bufs); bufs++) {
345 for (i = 0; i < start; i++)
346 buf[i] = gen_dst_value(i, is_memset);
347 for ( ; i < start + len; i++)
348 buf[i] = gen_dst_value(i, is_memset) |
349 PATTERN_OVERWRITE;
350 for ( ; i < buf_size; i++)
351 buf[i] = gen_dst_value(i, is_memset);
352 }
353}
354
355static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
356 unsigned int counter, bool is_srcbuf, bool is_memset)
357{
358 u8 diff = actual ^ pattern;
359 u8 expected = pattern | gen_inv_idx(counter, is_memset);
360 const char *thread_name = current->comm;
361
362 if (is_srcbuf)
363 pr_warn("%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n",
364 thread_name, index, expected, actual);
365 else if ((pattern & PATTERN_COPY)
366 && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
367 pr_warn("%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n",
368 thread_name, index, expected, actual);
369 else if (diff & PATTERN_SRC)
370 pr_warn("%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n",
371 thread_name, index, expected, actual);
372 else
373 pr_warn("%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n",
374 thread_name, index, expected, actual);
375}
376
377static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
378 unsigned int end, unsigned int counter, u8 pattern,
379 bool is_srcbuf, bool is_memset)
380{
381 unsigned int i;
382 unsigned int error_count = 0;
383 u8 actual;
384 u8 expected;
385 u8 *buf;
386 unsigned int counter_orig = counter;
387
388 for (; (buf = *bufs); bufs++) {
389 counter = counter_orig;
390 for (i = start; i < end; i++) {
391 actual = buf[i];
392 expected = pattern | gen_inv_idx(counter, is_memset);
393 if (actual != expected) {
394 if (error_count < MAX_ERROR_COUNT)
395 dmatest_mismatch(actual, pattern, i,
396 counter, is_srcbuf,
397 is_memset);
398 error_count++;
399 }
400 counter++;
401 }
402 }
403
404 if (error_count > MAX_ERROR_COUNT)
405 pr_warn("%s: %u errors suppressed\n",
406 current->comm, error_count - MAX_ERROR_COUNT);
407
408 return error_count;
409}
410
411
412static void dmatest_callback(void *arg)
413{
414 struct dmatest_done *done = arg;
415 struct dmatest_thread *thread =
416 container_of(done, struct dmatest_thread, test_done);
417 if (!thread->done) {
418 done->done = true;
419 wake_up_all(done->wait);
420 } else {
421
422
423
424
425
426
427
428 WARN(1, "dmatest: Kernel memory may be corrupted!!\n");
429 }
430}
431
432static unsigned int min_odd(unsigned int x, unsigned int y)
433{
434 unsigned int val = min(x, y);
435
436 return val % 2 ? val : val - 1;
437}
438
439static void result(const char *err, unsigned int n, unsigned int src_off,
440 unsigned int dst_off, unsigned int len, unsigned long data)
441{
442 pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
443 current->comm, n, err, src_off, dst_off, len, data);
444}
445
446static void dbg_result(const char *err, unsigned int n, unsigned int src_off,
447 unsigned int dst_off, unsigned int len,
448 unsigned long data)
449{
450 pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
451 current->comm, n, err, src_off, dst_off, len, data);
452}
453
454#define verbose_result(err, n, src_off, dst_off, len, data) ({ \
455 if (verbose) \
456 result(err, n, src_off, dst_off, len, data); \
457 else \
458 dbg_result(err, n, src_off, dst_off, len, data);\
459})
460
461static unsigned long long dmatest_persec(s64 runtime, unsigned int val)
462{
463 unsigned long long per_sec = 1000000;
464
465 if (runtime <= 0)
466 return 0;
467
468
469 while (runtime > UINT_MAX) {
470 runtime >>= 1;
471 per_sec <<= 1;
472 }
473
474 per_sec *= val;
475 per_sec = INT_TO_FIXPT(per_sec);
476 do_div(per_sec, runtime);
477
478 return per_sec;
479}
480
481static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
482{
483 return FIXPT_TO_INT(dmatest_persec(runtime, len >> 10));
484}
485
486static void __dmatest_free_test_data(struct dmatest_data *d, unsigned int cnt)
487{
488 unsigned int i;
489
490 for (i = 0; i < cnt; i++)
491 kfree(d->raw[i]);
492
493 kfree(d->aligned);
494 kfree(d->raw);
495}
496
497static void dmatest_free_test_data(struct dmatest_data *d)
498{
499 __dmatest_free_test_data(d, d->cnt);
500}
501
502static int dmatest_alloc_test_data(struct dmatest_data *d,
503 unsigned int buf_size, u8 align)
504{
505 unsigned int i = 0;
506
507 d->raw = kcalloc(d->cnt + 1, sizeof(u8 *), GFP_KERNEL);
508 if (!d->raw)
509 return -ENOMEM;
510
511 d->aligned = kcalloc(d->cnt + 1, sizeof(u8 *), GFP_KERNEL);
512 if (!d->aligned)
513 goto err;
514
515 for (i = 0; i < d->cnt; i++) {
516 d->raw[i] = kmalloc(buf_size + align, GFP_KERNEL);
517 if (!d->raw[i])
518 goto err;
519
520
521 if (align)
522 d->aligned[i] = PTR_ALIGN(d->raw[i], align);
523 else
524 d->aligned[i] = d->raw[i];
525 }
526
527 return 0;
528err:
529 __dmatest_free_test_data(d, i);
530 return -ENOMEM;
531}
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547static int dmatest_func(void *data)
548{
549 struct dmatest_thread *thread = data;
550 struct dmatest_done *done = &thread->test_done;
551 struct dmatest_info *info;
552 struct dmatest_params *params;
553 struct dma_chan *chan;
554 struct dma_device *dev;
555 unsigned int error_count;
556 unsigned int failed_tests = 0;
557 unsigned int total_tests = 0;
558 dma_cookie_t cookie;
559 enum dma_status status;
560 enum dma_ctrl_flags flags;
561 u8 *pq_coefs = NULL;
562 int ret;
563 unsigned int buf_size;
564 struct dmatest_data *src;
565 struct dmatest_data *dst;
566 int i;
567 ktime_t ktime, start, diff;
568 ktime_t filltime = 0;
569 ktime_t comparetime = 0;
570 s64 runtime = 0;
571 unsigned long long total_len = 0;
572 unsigned long long iops = 0;
573 u8 align = 0;
574 bool is_memset = false;
575 dma_addr_t *srcs;
576 dma_addr_t *dma_pq;
577
578 set_freezable();
579
580 ret = -ENOMEM;
581
582 smp_rmb();
583 thread->pending = false;
584 info = thread->info;
585 params = &info->params;
586 chan = thread->chan;
587 dev = chan->device;
588 src = &thread->src;
589 dst = &thread->dst;
590 if (thread->type == DMA_MEMCPY) {
591 align = params->alignment < 0 ? dev->copy_align :
592 params->alignment;
593 src->cnt = dst->cnt = 1;
594 } else if (thread->type == DMA_MEMSET) {
595 align = params->alignment < 0 ? dev->fill_align :
596 params->alignment;
597 src->cnt = dst->cnt = 1;
598 is_memset = true;
599 } else if (thread->type == DMA_XOR) {
600
601 src->cnt = min_odd(params->xor_sources | 1, dev->max_xor);
602 dst->cnt = 1;
603 align = params->alignment < 0 ? dev->xor_align :
604 params->alignment;
605 } else if (thread->type == DMA_PQ) {
606
607 src->cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
608 dst->cnt = 2;
609 align = params->alignment < 0 ? dev->pq_align :
610 params->alignment;
611
612 pq_coefs = kmalloc(params->pq_sources + 1, GFP_KERNEL);
613 if (!pq_coefs)
614 goto err_thread_type;
615
616 for (i = 0; i < src->cnt; i++)
617 pq_coefs[i] = 1;
618 } else
619 goto err_thread_type;
620
621
622 if ((src->cnt + dst->cnt) >= 255) {
623 pr_err("too many buffers (%d of 255 supported)\n",
624 src->cnt + dst->cnt);
625 goto err_free_coefs;
626 }
627
628 buf_size = params->buf_size;
629 if (1 << align > buf_size) {
630 pr_err("%u-byte buffer too small for %d-byte alignment\n",
631 buf_size, 1 << align);
632 goto err_free_coefs;
633 }
634
635 if (dmatest_alloc_test_data(src, buf_size, align) < 0)
636 goto err_free_coefs;
637
638 if (dmatest_alloc_test_data(dst, buf_size, align) < 0)
639 goto err_src;
640
641 set_user_nice(current, 10);
642
643 srcs = kcalloc(src->cnt, sizeof(dma_addr_t), GFP_KERNEL);
644 if (!srcs)
645 goto err_dst;
646
647 dma_pq = kcalloc(dst->cnt, sizeof(dma_addr_t), GFP_KERNEL);
648 if (!dma_pq)
649 goto err_srcs_array;
650
651
652
653
654 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
655
656 ktime = ktime_get();
657 while (!kthread_should_stop()
658 && !(params->iterations && total_tests >= params->iterations)) {
659 struct dma_async_tx_descriptor *tx = NULL;
660 struct dmaengine_unmap_data *um;
661 dma_addr_t *dsts;
662 unsigned int len;
663
664 total_tests++;
665
666 if (params->transfer_size) {
667 if (params->transfer_size >= buf_size) {
668 pr_err("%u-byte transfer size must be lower than %u-buffer size\n",
669 params->transfer_size, buf_size);
670 break;
671 }
672 len = params->transfer_size;
673 } else if (params->norandom) {
674 len = buf_size;
675 } else {
676 len = dmatest_random() % buf_size + 1;
677 }
678
679
680 if (!params->transfer_size) {
681 len = (len >> align) << align;
682 if (!len)
683 len = 1 << align;
684 }
685 total_len += len;
686
687 if (params->norandom) {
688 src->off = 0;
689 dst->off = 0;
690 } else {
691 src->off = dmatest_random() % (buf_size - len + 1);
692 dst->off = dmatest_random() % (buf_size - len + 1);
693
694 src->off = (src->off >> align) << align;
695 dst->off = (dst->off >> align) << align;
696 }
697
698 if (!params->noverify) {
699 start = ktime_get();
700 dmatest_init_srcs(src->aligned, src->off, len,
701 buf_size, is_memset);
702 dmatest_init_dsts(dst->aligned, dst->off, len,
703 buf_size, is_memset);
704
705 diff = ktime_sub(ktime_get(), start);
706 filltime = ktime_add(filltime, diff);
707 }
708
709 um = dmaengine_get_unmap_data(dev->dev, src->cnt + dst->cnt,
710 GFP_KERNEL);
711 if (!um) {
712 failed_tests++;
713 result("unmap data NULL", total_tests,
714 src->off, dst->off, len, ret);
715 continue;
716 }
717
718 um->len = buf_size;
719 for (i = 0; i < src->cnt; i++) {
720 void *buf = src->aligned[i];
721 struct page *pg = virt_to_page(buf);
722 unsigned long pg_off = offset_in_page(buf);
723
724 um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
725 um->len, DMA_TO_DEVICE);
726 srcs[i] = um->addr[i] + src->off;
727 ret = dma_mapping_error(dev->dev, um->addr[i]);
728 if (ret) {
729 result("src mapping error", total_tests,
730 src->off, dst->off, len, ret);
731 goto error_unmap_continue;
732 }
733 um->to_cnt++;
734 }
735
736 dsts = &um->addr[src->cnt];
737 for (i = 0; i < dst->cnt; i++) {
738 void *buf = dst->aligned[i];
739 struct page *pg = virt_to_page(buf);
740 unsigned long pg_off = offset_in_page(buf);
741
742 dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
743 DMA_BIDIRECTIONAL);
744 ret = dma_mapping_error(dev->dev, dsts[i]);
745 if (ret) {
746 result("dst mapping error", total_tests,
747 src->off, dst->off, len, ret);
748 goto error_unmap_continue;
749 }
750 um->bidi_cnt++;
751 }
752
753 if (thread->type == DMA_MEMCPY)
754 tx = dev->device_prep_dma_memcpy(chan,
755 dsts[0] + dst->off,
756 srcs[0], len, flags);
757 else if (thread->type == DMA_MEMSET)
758 tx = dev->device_prep_dma_memset(chan,
759 dsts[0] + dst->off,
760 *(src->aligned[0] + src->off),
761 len, flags);
762 else if (thread->type == DMA_XOR)
763 tx = dev->device_prep_dma_xor(chan,
764 dsts[0] + dst->off,
765 srcs, src->cnt,
766 len, flags);
767 else if (thread->type == DMA_PQ) {
768 for (i = 0; i < dst->cnt; i++)
769 dma_pq[i] = dsts[i] + dst->off;
770 tx = dev->device_prep_dma_pq(chan, dma_pq, srcs,
771 src->cnt, pq_coefs,
772 len, flags);
773 }
774
775 if (!tx) {
776 result("prep error", total_tests, src->off,
777 dst->off, len, ret);
778 msleep(100);
779 goto error_unmap_continue;
780 }
781
782 done->done = false;
783 tx->callback = dmatest_callback;
784 tx->callback_param = done;
785 cookie = tx->tx_submit(tx);
786
787 if (dma_submit_error(cookie)) {
788 result("submit error", total_tests, src->off,
789 dst->off, len, ret);
790 msleep(100);
791 goto error_unmap_continue;
792 }
793 dma_async_issue_pending(chan);
794
795 wait_event_freezable_timeout(thread->done_wait, done->done,
796 msecs_to_jiffies(params->timeout));
797
798 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
799
800 if (!done->done) {
801 result("test timed out", total_tests, src->off, dst->off,
802 len, 0);
803 goto error_unmap_continue;
804 } else if (status != DMA_COMPLETE) {
805 result(status == DMA_ERROR ?
806 "completion error status" :
807 "completion busy status", total_tests, src->off,
808 dst->off, len, ret);
809 goto error_unmap_continue;
810 }
811
812 dmaengine_unmap_put(um);
813
814 if (params->noverify) {
815 verbose_result("test passed", total_tests, src->off,
816 dst->off, len, 0);
817 continue;
818 }
819
820 start = ktime_get();
821 pr_debug("%s: verifying source buffer...\n", current->comm);
822 error_count = dmatest_verify(src->aligned, 0, src->off,
823 0, PATTERN_SRC, true, is_memset);
824 error_count += dmatest_verify(src->aligned, src->off,
825 src->off + len, src->off,
826 PATTERN_SRC | PATTERN_COPY, true, is_memset);
827 error_count += dmatest_verify(src->aligned, src->off + len,
828 buf_size, src->off + len,
829 PATTERN_SRC, true, is_memset);
830
831 pr_debug("%s: verifying dest buffer...\n", current->comm);
832 error_count += dmatest_verify(dst->aligned, 0, dst->off,
833 0, PATTERN_DST, false, is_memset);
834
835 error_count += dmatest_verify(dst->aligned, dst->off,
836 dst->off + len, src->off,
837 PATTERN_SRC | PATTERN_COPY, false, is_memset);
838
839 error_count += dmatest_verify(dst->aligned, dst->off + len,
840 buf_size, dst->off + len,
841 PATTERN_DST, false, is_memset);
842
843 diff = ktime_sub(ktime_get(), start);
844 comparetime = ktime_add(comparetime, diff);
845
846 if (error_count) {
847 result("data error", total_tests, src->off, dst->off,
848 len, error_count);
849 failed_tests++;
850 } else {
851 verbose_result("test passed", total_tests, src->off,
852 dst->off, len, 0);
853 }
854
855 continue;
856
857error_unmap_continue:
858 dmaengine_unmap_put(um);
859 failed_tests++;
860 }
861 ktime = ktime_sub(ktime_get(), ktime);
862 ktime = ktime_sub(ktime, comparetime);
863 ktime = ktime_sub(ktime, filltime);
864 runtime = ktime_to_us(ktime);
865
866 ret = 0;
867 kfree(dma_pq);
868err_srcs_array:
869 kfree(srcs);
870err_dst:
871 dmatest_free_test_data(dst);
872err_src:
873 dmatest_free_test_data(src);
874err_free_coefs:
875 kfree(pq_coefs);
876err_thread_type:
877 iops = dmatest_persec(runtime, total_tests);
878 pr_info("%s: summary %u tests, %u failures %llu.%02llu iops %llu KB/s (%d)\n",
879 current->comm, total_tests, failed_tests,
880 FIXPT_TO_INT(iops), FIXPT_GET_FRAC(iops),
881 dmatest_KBs(runtime, total_len), ret);
882
883
884 if (ret || failed_tests)
885 dmaengine_terminate_sync(chan);
886
887 thread->done = true;
888 wake_up(&thread_wait);
889
890 return ret;
891}
892
893static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
894{
895 struct dmatest_thread *thread;
896 struct dmatest_thread *_thread;
897 int ret;
898
899 list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
900 ret = kthread_stop(thread->task);
901 pr_debug("thread %s exited with status %d\n",
902 thread->task->comm, ret);
903 list_del(&thread->node);
904 put_task_struct(thread->task);
905 kfree(thread);
906 }
907
908
909 dmaengine_terminate_sync(dtc->chan);
910
911 kfree(dtc);
912}
913
914static int dmatest_add_threads(struct dmatest_info *info,
915 struct dmatest_chan *dtc, enum dma_transaction_type type)
916{
917 struct dmatest_params *params = &info->params;
918 struct dmatest_thread *thread;
919 struct dma_chan *chan = dtc->chan;
920 char *op;
921 unsigned int i;
922
923 if (type == DMA_MEMCPY)
924 op = "copy";
925 else if (type == DMA_MEMSET)
926 op = "set";
927 else if (type == DMA_XOR)
928 op = "xor";
929 else if (type == DMA_PQ)
930 op = "pq";
931 else
932 return -EINVAL;
933
934 for (i = 0; i < params->threads_per_chan; i++) {
935 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
936 if (!thread) {
937 pr_warn("No memory for %s-%s%u\n",
938 dma_chan_name(chan), op, i);
939 break;
940 }
941 thread->info = info;
942 thread->chan = dtc->chan;
943 thread->type = type;
944 thread->test_done.wait = &thread->done_wait;
945 init_waitqueue_head(&thread->done_wait);
946 smp_wmb();
947 thread->task = kthread_create(dmatest_func, thread, "%s-%s%u",
948 dma_chan_name(chan), op, i);
949 if (IS_ERR(thread->task)) {
950 pr_warn("Failed to create thread %s-%s%u\n",
951 dma_chan_name(chan), op, i);
952 kfree(thread);
953 break;
954 }
955
956
957 get_task_struct(thread->task);
958 list_add_tail(&thread->node, &dtc->threads);
959 thread->pending = true;
960 }
961
962 return i;
963}
964
965static int dmatest_add_channel(struct dmatest_info *info,
966 struct dma_chan *chan)
967{
968 struct dmatest_chan *dtc;
969 struct dma_device *dma_dev = chan->device;
970 unsigned int thread_count = 0;
971 int cnt;
972
973 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
974 if (!dtc) {
975 pr_warn("No memory for %s\n", dma_chan_name(chan));
976 return -ENOMEM;
977 }
978
979 dtc->chan = chan;
980 INIT_LIST_HEAD(&dtc->threads);
981
982 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
983 if (dmatest == 0) {
984 cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
985 thread_count += cnt > 0 ? cnt : 0;
986 }
987 }
988
989 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) {
990 if (dmatest == 1) {
991 cnt = dmatest_add_threads(info, dtc, DMA_MEMSET);
992 thread_count += cnt > 0 ? cnt : 0;
993 }
994 }
995
996 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
997 cnt = dmatest_add_threads(info, dtc, DMA_XOR);
998 thread_count += cnt > 0 ? cnt : 0;
999 }
1000 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
1001 cnt = dmatest_add_threads(info, dtc, DMA_PQ);
1002 thread_count += cnt > 0 ? cnt : 0;
1003 }
1004
1005 pr_info("Added %u threads using %s\n",
1006 thread_count, dma_chan_name(chan));
1007
1008 list_add_tail(&dtc->node, &info->channels);
1009 info->nr_channels++;
1010
1011 return 0;
1012}
1013
1014static bool filter(struct dma_chan *chan, void *param)
1015{
1016 struct dmatest_params *params = param;
1017
1018 if (!dmatest_match_channel(params, chan) ||
1019 !dmatest_match_device(params, chan->device))
1020 return false;
1021 else
1022 return true;
1023}
1024
1025static void request_channels(struct dmatest_info *info,
1026 enum dma_transaction_type type)
1027{
1028 dma_cap_mask_t mask;
1029
1030 dma_cap_zero(mask);
1031 dma_cap_set(type, mask);
1032 for (;;) {
1033 struct dmatest_params *params = &info->params;
1034 struct dma_chan *chan;
1035
1036 chan = dma_request_channel(mask, filter, params);
1037 if (chan) {
1038 if (dmatest_add_channel(info, chan)) {
1039 dma_release_channel(chan);
1040 break;
1041 }
1042 } else
1043 break;
1044 if (params->max_channels &&
1045 info->nr_channels >= params->max_channels)
1046 break;
1047 }
1048}
1049
1050static void add_threaded_test(struct dmatest_info *info)
1051{
1052 struct dmatest_params *params = &info->params;
1053
1054
1055 params->buf_size = test_buf_size;
1056 strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
1057 strlcpy(params->device, strim(test_device), sizeof(params->device));
1058 params->threads_per_chan = threads_per_chan;
1059 params->max_channels = max_channels;
1060 params->iterations = iterations;
1061 params->xor_sources = xor_sources;
1062 params->pq_sources = pq_sources;
1063 params->timeout = timeout;
1064 params->noverify = noverify;
1065 params->norandom = norandom;
1066 params->alignment = alignment;
1067 params->transfer_size = transfer_size;
1068
1069 request_channels(info, DMA_MEMCPY);
1070 request_channels(info, DMA_MEMSET);
1071 request_channels(info, DMA_XOR);
1072 request_channels(info, DMA_PQ);
1073}
1074
1075static void run_pending_tests(struct dmatest_info *info)
1076{
1077 struct dmatest_chan *dtc;
1078 unsigned int thread_count = 0;
1079
1080 list_for_each_entry(dtc, &info->channels, node) {
1081 struct dmatest_thread *thread;
1082
1083 thread_count = 0;
1084 list_for_each_entry(thread, &dtc->threads, node) {
1085 wake_up_process(thread->task);
1086 thread_count++;
1087 }
1088 pr_info("Started %u threads using %s\n",
1089 thread_count, dma_chan_name(dtc->chan));
1090 }
1091}
1092
1093static void stop_threaded_test(struct dmatest_info *info)
1094{
1095 struct dmatest_chan *dtc, *_dtc;
1096 struct dma_chan *chan;
1097
1098 list_for_each_entry_safe(dtc, _dtc, &info->channels, node) {
1099 list_del(&dtc->node);
1100 chan = dtc->chan;
1101 dmatest_cleanup_channel(dtc);
1102 pr_debug("dropped channel %s\n", dma_chan_name(chan));
1103 dma_release_channel(chan);
1104 }
1105
1106 info->nr_channels = 0;
1107}
1108
1109static void start_threaded_tests(struct dmatest_info *info)
1110{
1111
1112
1113
1114 if (!info->did_init)
1115 return;
1116
1117 run_pending_tests(info);
1118}
1119
1120static int dmatest_run_get(char *val, const struct kernel_param *kp)
1121{
1122 struct dmatest_info *info = &test_info;
1123
1124 mutex_lock(&info->lock);
1125 if (is_threaded_test_run(info)) {
1126 dmatest_run = true;
1127 } else {
1128 if (!is_threaded_test_pending(info))
1129 stop_threaded_test(info);
1130 dmatest_run = false;
1131 }
1132 mutex_unlock(&info->lock);
1133
1134 return param_get_bool(val, kp);
1135}
1136
1137static int dmatest_run_set(const char *val, const struct kernel_param *kp)
1138{
1139 struct dmatest_info *info = &test_info;
1140 int ret;
1141
1142 mutex_lock(&info->lock);
1143 ret = param_set_bool(val, kp);
1144 if (ret) {
1145 mutex_unlock(&info->lock);
1146 return ret;
1147 } else if (dmatest_run) {
1148 if (is_threaded_test_pending(info))
1149 start_threaded_tests(info);
1150 else
1151 pr_info("Could not start test, no channels configured\n");
1152 } else {
1153 stop_threaded_test(info);
1154 }
1155
1156 mutex_unlock(&info->lock);
1157
1158 return ret;
1159}
1160
1161static int dmatest_chan_set(const char *val, const struct kernel_param *kp)
1162{
1163 struct dmatest_info *info = &test_info;
1164 struct dmatest_chan *dtc;
1165 char chan_reset_val[20];
1166 int ret = 0;
1167
1168 mutex_lock(&info->lock);
1169 ret = param_set_copystring(val, kp);
1170 if (ret) {
1171 mutex_unlock(&info->lock);
1172 return ret;
1173 }
1174
1175 if (!is_threaded_test_run(info) && !is_threaded_test_pending(info))
1176 stop_threaded_test(info);
1177
1178 if (is_threaded_test_pending(info)) {
1179 list_for_each_entry(dtc, &info->channels, node) {
1180 if (strcmp(dma_chan_name(dtc->chan),
1181 strim(test_channel)) == 0) {
1182 dtc = list_last_entry(&info->channels,
1183 struct dmatest_chan,
1184 node);
1185 strlcpy(chan_reset_val,
1186 dma_chan_name(dtc->chan),
1187 sizeof(chan_reset_val));
1188 ret = -EBUSY;
1189 goto add_chan_err;
1190 }
1191 }
1192 }
1193
1194 add_threaded_test(info);
1195
1196
1197 dtc = list_last_entry(&info->channels, struct dmatest_chan, node);
1198
1199 if (dtc->chan) {
1200
1201
1202
1203
1204
1205
1206 if ((strcmp(dma_chan_name(dtc->chan), strim(test_channel)) != 0)
1207 && (strcmp("", strim(test_channel)) != 0)) {
1208 ret = -EINVAL;
1209 strlcpy(chan_reset_val, dma_chan_name(dtc->chan),
1210 sizeof(chan_reset_val));
1211 goto add_chan_err;
1212 }
1213
1214 } else {
1215
1216 strlcpy(chan_reset_val, "", sizeof(chan_reset_val));
1217 ret = -EBUSY;
1218 goto add_chan_err;
1219 }
1220
1221 mutex_unlock(&info->lock);
1222
1223 return ret;
1224
1225add_chan_err:
1226 param_set_copystring(chan_reset_val, kp);
1227 mutex_unlock(&info->lock);
1228
1229 return ret;
1230}
1231
1232static int dmatest_chan_get(char *val, const struct kernel_param *kp)
1233{
1234 struct dmatest_info *info = &test_info;
1235
1236 mutex_lock(&info->lock);
1237 if (!is_threaded_test_run(info) && !is_threaded_test_pending(info)) {
1238 stop_threaded_test(info);
1239 strlcpy(test_channel, "", sizeof(test_channel));
1240 }
1241 mutex_unlock(&info->lock);
1242
1243 return param_get_string(val, kp);
1244}
1245
1246static int dmatest_test_list_get(char *val, const struct kernel_param *kp)
1247{
1248 struct dmatest_info *info = &test_info;
1249 struct dmatest_chan *dtc;
1250 unsigned int thread_count = 0;
1251
1252 list_for_each_entry(dtc, &info->channels, node) {
1253 struct dmatest_thread *thread;
1254
1255 thread_count = 0;
1256 list_for_each_entry(thread, &dtc->threads, node) {
1257 thread_count++;
1258 }
1259 pr_info("%u threads using %s\n",
1260 thread_count, dma_chan_name(dtc->chan));
1261 }
1262
1263 return 0;
1264}
1265
1266static int __init dmatest_init(void)
1267{
1268 struct dmatest_info *info = &test_info;
1269 struct dmatest_params *params = &info->params;
1270
1271 if (dmatest_run) {
1272 mutex_lock(&info->lock);
1273 add_threaded_test(info);
1274 run_pending_tests(info);
1275 mutex_unlock(&info->lock);
1276 }
1277
1278 if (params->iterations && wait)
1279 wait_event(thread_wait, !is_threaded_test_run(info));
1280
1281
1282
1283
1284 info->did_init = true;
1285
1286 return 0;
1287}
1288
1289late_initcall(dmatest_init);
1290
1291static void __exit dmatest_exit(void)
1292{
1293 struct dmatest_info *info = &test_info;
1294
1295 mutex_lock(&info->lock);
1296 stop_threaded_test(info);
1297 mutex_unlock(&info->lock);
1298}
1299module_exit(dmatest_exit);
1300
1301MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1302MODULE_LICENSE("GPL v2");
1303