linux/drivers/dma/dmatest.c
<<
>>
Prefs
   1/*
   2 * DMA Engine test module
   3 *
   4 * Copyright (C) 2007 Atmel Corporation
   5 * Copyright (C) 2013 Intel Corporation
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12
  13#include <linux/delay.h>
  14#include <linux/dma-mapping.h>
  15#include <linux/dmaengine.h>
  16#include <linux/freezer.h>
  17#include <linux/init.h>
  18#include <linux/kthread.h>
  19#include <linux/module.h>
  20#include <linux/moduleparam.h>
  21#include <linux/random.h>
  22#include <linux/slab.h>
  23#include <linux/wait.h>
  24
  25static unsigned int test_buf_size = 16384;
  26module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
  27MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
  28
  29static char test_channel[20];
  30module_param_string(channel, test_channel, sizeof(test_channel),
  31                S_IRUGO | S_IWUSR);
  32MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
  33
  34static char test_device[32];
  35module_param_string(device, test_device, sizeof(test_device),
  36                S_IRUGO | S_IWUSR);
  37MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
  38
  39static unsigned int threads_per_chan = 1;
  40module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR);
  41MODULE_PARM_DESC(threads_per_chan,
  42                "Number of threads to start per channel (default: 1)");
  43
  44static unsigned int max_channels;
  45module_param(max_channels, uint, S_IRUGO | S_IWUSR);
  46MODULE_PARM_DESC(max_channels,
  47                "Maximum number of channels to use (default: all)");
  48
  49static unsigned int iterations;
  50module_param(iterations, uint, S_IRUGO | S_IWUSR);
  51MODULE_PARM_DESC(iterations,
  52                "Iterations before stopping test (default: infinite)");
  53
  54static unsigned int xor_sources = 3;
  55module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
  56MODULE_PARM_DESC(xor_sources,
  57                "Number of xor source buffers (default: 3)");
  58
  59static unsigned int pq_sources = 3;
  60module_param(pq_sources, uint, S_IRUGO | S_IWUSR);
  61MODULE_PARM_DESC(pq_sources,
  62                "Number of p+q source buffers (default: 3)");
  63
  64static int timeout = 3000;
  65module_param(timeout, uint, S_IRUGO | S_IWUSR);
  66MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
  67                 "Pass -1 for infinite timeout");
  68
  69static bool noverify;
  70module_param(noverify, bool, S_IRUGO | S_IWUSR);
  71MODULE_PARM_DESC(noverify, "Disable random data setup and verification");
  72
  73static bool verbose;
  74module_param(verbose, bool, S_IRUGO | S_IWUSR);
  75MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)");
  76
  77/**
  78 * struct dmatest_params - test parameters.
  79 * @buf_size:           size of the memcpy test buffer
  80 * @channel:            bus ID of the channel to test
  81 * @device:             bus ID of the DMA Engine to test
  82 * @threads_per_chan:   number of threads to start per channel
  83 * @max_channels:       maximum number of channels to use
  84 * @iterations:         iterations before stopping test
  85 * @xor_sources:        number of xor source buffers
  86 * @pq_sources:         number of p+q source buffers
  87 * @timeout:            transfer timeout in msec, -1 for infinite timeout
  88 */
  89struct dmatest_params {
  90        unsigned int    buf_size;
  91        char            channel[20];
  92        char            device[32];
  93        unsigned int    threads_per_chan;
  94        unsigned int    max_channels;
  95        unsigned int    iterations;
  96        unsigned int    xor_sources;
  97        unsigned int    pq_sources;
  98        int             timeout;
  99        bool            noverify;
 100};
 101
 102/**
 103 * struct dmatest_info - test information.
 104 * @params:             test parameters
 105 * @lock:               access protection to the fields of this structure
 106 */
 107static struct dmatest_info {
 108        /* Test parameters */
 109        struct dmatest_params   params;
 110
 111        /* Internal state */
 112        struct list_head        channels;
 113        unsigned int            nr_channels;
 114        struct mutex            lock;
 115        bool                    did_init;
 116} test_info = {
 117        .channels = LIST_HEAD_INIT(test_info.channels),
 118        .lock = __MUTEX_INITIALIZER(test_info.lock),
 119};
 120
 121static int dmatest_run_set(const char *val, const struct kernel_param *kp);
 122static int dmatest_run_get(char *val, const struct kernel_param *kp);
 123static const struct kernel_param_ops run_ops = {
 124        .set = dmatest_run_set,
 125        .get = dmatest_run_get,
 126};
 127static bool dmatest_run;
 128module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR);
 129MODULE_PARM_DESC(run, "Run the test (default: false)");
 130
 131/* Maximum amount of mismatched bytes in buffer to print */
 132#define MAX_ERROR_COUNT         32
 133
 134/*
 135 * Initialization patterns. All bytes in the source buffer has bit 7
 136 * set, all bytes in the destination buffer has bit 7 cleared.
 137 *
 138 * Bit 6 is set for all bytes which are to be copied by the DMA
 139 * engine. Bit 5 is set for all bytes which are to be overwritten by
 140 * the DMA engine.
 141 *
 142 * The remaining bits are the inverse of a counter which increments by
 143 * one for each byte address.
 144 */
 145#define PATTERN_SRC             0x80
 146#define PATTERN_DST             0x00
 147#define PATTERN_COPY            0x40
 148#define PATTERN_OVERWRITE       0x20
 149#define PATTERN_COUNT_MASK      0x1f
 150
 151struct dmatest_thread {
 152        struct list_head        node;
 153        struct dmatest_info     *info;
 154        struct task_struct      *task;
 155        struct dma_chan         *chan;
 156        u8                      **srcs;
 157        u8                      **dsts;
 158        enum dma_transaction_type type;
 159        bool                    done;
 160};
 161
 162struct dmatest_chan {
 163        struct list_head        node;
 164        struct dma_chan         *chan;
 165        struct list_head        threads;
 166};
 167
 168static DECLARE_WAIT_QUEUE_HEAD(thread_wait);
 169static bool wait;
 170
 171static bool is_threaded_test_run(struct dmatest_info *info)
 172{
 173        struct dmatest_chan *dtc;
 174
 175        list_for_each_entry(dtc, &info->channels, node) {
 176                struct dmatest_thread *thread;
 177
 178                list_for_each_entry(thread, &dtc->threads, node) {
 179                        if (!thread->done)
 180                                return true;
 181                }
 182        }
 183
 184        return false;
 185}
 186
 187static int dmatest_wait_get(char *val, const struct kernel_param *kp)
 188{
 189        struct dmatest_info *info = &test_info;
 190        struct dmatest_params *params = &info->params;
 191
 192        if (params->iterations)
 193                wait_event(thread_wait, !is_threaded_test_run(info));
 194        wait = true;
 195        return param_get_bool(val, kp);
 196}
 197
 198static const struct kernel_param_ops wait_ops = {
 199        .get = dmatest_wait_get,
 200        .set = param_set_bool,
 201};
 202module_param_cb(wait, &wait_ops, &wait, S_IRUGO);
 203MODULE_PARM_DESC(wait, "Wait for tests to complete (default: false)");
 204
 205static bool dmatest_match_channel(struct dmatest_params *params,
 206                struct dma_chan *chan)
 207{
 208        if (params->channel[0] == '\0')
 209                return true;
 210        return strcmp(dma_chan_name(chan), params->channel) == 0;
 211}
 212
 213static bool dmatest_match_device(struct dmatest_params *params,
 214                struct dma_device *device)
 215{
 216        if (params->device[0] == '\0')
 217                return true;
 218        return strcmp(dev_name(device->dev), params->device) == 0;
 219}
 220
 221static unsigned long dmatest_random(void)
 222{
 223        unsigned long buf;
 224
 225        prandom_bytes(&buf, sizeof(buf));
 226        return buf;
 227}
 228
 229static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
 230                unsigned int buf_size)
 231{
 232        unsigned int i;
 233        u8 *buf;
 234
 235        for (; (buf = *bufs); bufs++) {
 236                for (i = 0; i < start; i++)
 237                        buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
 238                for ( ; i < start + len; i++)
 239                        buf[i] = PATTERN_SRC | PATTERN_COPY
 240                                | (~i & PATTERN_COUNT_MASK);
 241                for ( ; i < buf_size; i++)
 242                        buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
 243                buf++;
 244        }
 245}
 246
 247static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
 248                unsigned int buf_size)
 249{
 250        unsigned int i;
 251        u8 *buf;
 252
 253        for (; (buf = *bufs); bufs++) {
 254                for (i = 0; i < start; i++)
 255                        buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
 256                for ( ; i < start + len; i++)
 257                        buf[i] = PATTERN_DST | PATTERN_OVERWRITE
 258                                | (~i & PATTERN_COUNT_MASK);
 259                for ( ; i < buf_size; i++)
 260                        buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
 261        }
 262}
 263
 264static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
 265                unsigned int counter, bool is_srcbuf)
 266{
 267        u8              diff = actual ^ pattern;
 268        u8              expected = pattern | (~counter & PATTERN_COUNT_MASK);
 269        const char      *thread_name = current->comm;
 270
 271        if (is_srcbuf)
 272                pr_warn("%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n",
 273                        thread_name, index, expected, actual);
 274        else if ((pattern & PATTERN_COPY)
 275                        && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
 276                pr_warn("%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n",
 277                        thread_name, index, expected, actual);
 278        else if (diff & PATTERN_SRC)
 279                pr_warn("%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n",
 280                        thread_name, index, expected, actual);
 281        else
 282                pr_warn("%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n",
 283                        thread_name, index, expected, actual);
 284}
 285
 286static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
 287                unsigned int end, unsigned int counter, u8 pattern,
 288                bool is_srcbuf)
 289{
 290        unsigned int i;
 291        unsigned int error_count = 0;
 292        u8 actual;
 293        u8 expected;
 294        u8 *buf;
 295        unsigned int counter_orig = counter;
 296
 297        for (; (buf = *bufs); bufs++) {
 298                counter = counter_orig;
 299                for (i = start; i < end; i++) {
 300                        actual = buf[i];
 301                        expected = pattern | (~counter & PATTERN_COUNT_MASK);
 302                        if (actual != expected) {
 303                                if (error_count < MAX_ERROR_COUNT)
 304                                        dmatest_mismatch(actual, pattern, i,
 305                                                         counter, is_srcbuf);
 306                                error_count++;
 307                        }
 308                        counter++;
 309                }
 310        }
 311
 312        if (error_count > MAX_ERROR_COUNT)
 313                pr_warn("%s: %u errors suppressed\n",
 314                        current->comm, error_count - MAX_ERROR_COUNT);
 315
 316        return error_count;
 317}
 318
 319/* poor man's completion - we want to use wait_event_freezable() on it */
 320struct dmatest_done {
 321        bool                    done;
 322        wait_queue_head_t       *wait;
 323};
 324
 325static void dmatest_callback(void *arg)
 326{
 327        struct dmatest_done *done = arg;
 328
 329        done->done = true;
 330        wake_up_all(done->wait);
 331}
 332
 333static unsigned int min_odd(unsigned int x, unsigned int y)
 334{
 335        unsigned int val = min(x, y);
 336
 337        return val % 2 ? val : val - 1;
 338}
 339
 340static void result(const char *err, unsigned int n, unsigned int src_off,
 341                   unsigned int dst_off, unsigned int len, unsigned long data)
 342{
 343        pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
 344                current->comm, n, err, src_off, dst_off, len, data);
 345}
 346
 347static void dbg_result(const char *err, unsigned int n, unsigned int src_off,
 348                       unsigned int dst_off, unsigned int len,
 349                       unsigned long data)
 350{
 351        pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
 352                 current->comm, n, err, src_off, dst_off, len, data);
 353}
 354
 355#define verbose_result(err, n, src_off, dst_off, len, data) ({  \
 356        if (verbose)                                            \
 357                result(err, n, src_off, dst_off, len, data);    \
 358        else                                                    \
 359                dbg_result(err, n, src_off, dst_off, len, data);\
 360})
 361
 362static unsigned long long dmatest_persec(s64 runtime, unsigned int val)
 363{
 364        unsigned long long per_sec = 1000000;
 365
 366        if (runtime <= 0)
 367                return 0;
 368
 369        /* drop precision until runtime is 32-bits */
 370        while (runtime > UINT_MAX) {
 371                runtime >>= 1;
 372                per_sec <<= 1;
 373        }
 374
 375        per_sec *= val;
 376        do_div(per_sec, runtime);
 377        return per_sec;
 378}
 379
 380static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
 381{
 382        return dmatest_persec(runtime, len >> 10);
 383}
 384
 385/*
 386 * This function repeatedly tests DMA transfers of various lengths and
 387 * offsets for a given operation type until it is told to exit by
 388 * kthread_stop(). There may be multiple threads running this function
 389 * in parallel for a single channel, and there may be multiple channels
 390 * being tested in parallel.
 391 *
 392 * Before each test, the source and destination buffer is initialized
 393 * with a known pattern. This pattern is different depending on
 394 * whether it's in an area which is supposed to be copied or
 395 * overwritten, and different in the source and destination buffers.
 396 * So if the DMA engine doesn't copy exactly what we tell it to copy,
 397 * we'll notice.
 398 */
 399static int dmatest_func(void *data)
 400{
 401        DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
 402        struct dmatest_thread   *thread = data;
 403        struct dmatest_done     done = { .wait = &done_wait };
 404        struct dmatest_info     *info;
 405        struct dmatest_params   *params;
 406        struct dma_chan         *chan;
 407        struct dma_device       *dev;
 408        unsigned int            error_count;
 409        unsigned int            failed_tests = 0;
 410        unsigned int            total_tests = 0;
 411        dma_cookie_t            cookie;
 412        enum dma_status         status;
 413        enum dma_ctrl_flags     flags;
 414        u8                      *pq_coefs = NULL;
 415        int                     ret;
 416        int                     src_cnt;
 417        int                     dst_cnt;
 418        int                     i;
 419        ktime_t                 ktime;
 420        s64                     runtime = 0;
 421        unsigned long long      total_len = 0;
 422
 423        set_freezable();
 424
 425        ret = -ENOMEM;
 426
 427        smp_rmb();
 428        info = thread->info;
 429        params = &info->params;
 430        chan = thread->chan;
 431        dev = chan->device;
 432        if (thread->type == DMA_MEMCPY)
 433                src_cnt = dst_cnt = 1;
 434        else if (thread->type == DMA_XOR) {
 435                /* force odd to ensure dst = src */
 436                src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
 437                dst_cnt = 1;
 438        } else if (thread->type == DMA_PQ) {
 439                /* force odd to ensure dst = src */
 440                src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
 441                dst_cnt = 2;
 442
 443                pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL);
 444                if (!pq_coefs)
 445                        goto err_thread_type;
 446
 447                for (i = 0; i < src_cnt; i++)
 448                        pq_coefs[i] = 1;
 449        } else
 450                goto err_thread_type;
 451
 452        thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
 453        if (!thread->srcs)
 454                goto err_srcs;
 455        for (i = 0; i < src_cnt; i++) {
 456                thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL);
 457                if (!thread->srcs[i])
 458                        goto err_srcbuf;
 459        }
 460        thread->srcs[i] = NULL;
 461
 462        thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
 463        if (!thread->dsts)
 464                goto err_dsts;
 465        for (i = 0; i < dst_cnt; i++) {
 466                thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL);
 467                if (!thread->dsts[i])
 468                        goto err_dstbuf;
 469        }
 470        thread->dsts[i] = NULL;
 471
 472        set_user_nice(current, 10);
 473
 474        /*
 475         * src and dst buffers are freed by ourselves below
 476         */
 477        flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
 478
 479        ktime = ktime_get();
 480        while (!kthread_should_stop()
 481               && !(params->iterations && total_tests >= params->iterations)) {
 482                struct dma_async_tx_descriptor *tx = NULL;
 483                struct dmaengine_unmap_data *um;
 484                dma_addr_t srcs[src_cnt];
 485                dma_addr_t *dsts;
 486                unsigned int src_off, dst_off, len;
 487                u8 align = 0;
 488
 489                total_tests++;
 490
 491                /* honor alignment restrictions */
 492                if (thread->type == DMA_MEMCPY)
 493                        align = dev->copy_align;
 494                else if (thread->type == DMA_XOR)
 495                        align = dev->xor_align;
 496                else if (thread->type == DMA_PQ)
 497                        align = dev->pq_align;
 498
 499                if (1 << align > params->buf_size) {
 500                        pr_err("%u-byte buffer too small for %d-byte alignment\n",
 501                               params->buf_size, 1 << align);
 502                        break;
 503                }
 504
 505                if (params->noverify)
 506                        len = params->buf_size;
 507                else
 508                        len = dmatest_random() % params->buf_size + 1;
 509
 510                len = (len >> align) << align;
 511                if (!len)
 512                        len = 1 << align;
 513
 514                total_len += len;
 515
 516                if (params->noverify) {
 517                        src_off = 0;
 518                        dst_off = 0;
 519                } else {
 520                        src_off = dmatest_random() % (params->buf_size - len + 1);
 521                        dst_off = dmatest_random() % (params->buf_size - len + 1);
 522
 523                        src_off = (src_off >> align) << align;
 524                        dst_off = (dst_off >> align) << align;
 525
 526                        dmatest_init_srcs(thread->srcs, src_off, len,
 527                                          params->buf_size);
 528                        dmatest_init_dsts(thread->dsts, dst_off, len,
 529                                          params->buf_size);
 530                }
 531
 532                um = dmaengine_get_unmap_data(dev->dev, src_cnt+dst_cnt,
 533                                              GFP_KERNEL);
 534                if (!um) {
 535                        failed_tests++;
 536                        result("unmap data NULL", total_tests,
 537                               src_off, dst_off, len, ret);
 538                        continue;
 539                }
 540
 541                um->len = params->buf_size;
 542                for (i = 0; i < src_cnt; i++) {
 543                        void *buf = thread->srcs[i];
 544                        struct page *pg = virt_to_page(buf);
 545                        unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;
 546
 547                        um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
 548                                                   um->len, DMA_TO_DEVICE);
 549                        srcs[i] = um->addr[i] + src_off;
 550                        ret = dma_mapping_error(dev->dev, um->addr[i]);
 551                        if (ret) {
 552                                dmaengine_unmap_put(um);
 553                                result("src mapping error", total_tests,
 554                                       src_off, dst_off, len, ret);
 555                                failed_tests++;
 556                                continue;
 557                        }
 558                        um->to_cnt++;
 559                }
 560                /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
 561                dsts = &um->addr[src_cnt];
 562                for (i = 0; i < dst_cnt; i++) {
 563                        void *buf = thread->dsts[i];
 564                        struct page *pg = virt_to_page(buf);
 565                        unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;
 566
 567                        dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
 568                                               DMA_BIDIRECTIONAL);
 569                        ret = dma_mapping_error(dev->dev, dsts[i]);
 570                        if (ret) {
 571                                dmaengine_unmap_put(um);
 572                                result("dst mapping error", total_tests,
 573                                       src_off, dst_off, len, ret);
 574                                failed_tests++;
 575                                continue;
 576                        }
 577                        um->bidi_cnt++;
 578                }
 579
 580                if (thread->type == DMA_MEMCPY)
 581                        tx = dev->device_prep_dma_memcpy(chan,
 582                                                         dsts[0] + dst_off,
 583                                                         srcs[0], len, flags);
 584                else if (thread->type == DMA_XOR)
 585                        tx = dev->device_prep_dma_xor(chan,
 586                                                      dsts[0] + dst_off,
 587                                                      srcs, src_cnt,
 588                                                      len, flags);
 589                else if (thread->type == DMA_PQ) {
 590                        dma_addr_t dma_pq[dst_cnt];
 591
 592                        for (i = 0; i < dst_cnt; i++)
 593                                dma_pq[i] = dsts[i] + dst_off;
 594                        tx = dev->device_prep_dma_pq(chan, dma_pq, srcs,
 595                                                     src_cnt, pq_coefs,
 596                                                     len, flags);
 597                }
 598
 599                if (!tx) {
 600                        dmaengine_unmap_put(um);
 601                        result("prep error", total_tests, src_off,
 602                               dst_off, len, ret);
 603                        msleep(100);
 604                        failed_tests++;
 605                        continue;
 606                }
 607
 608                done.done = false;
 609                tx->callback = dmatest_callback;
 610                tx->callback_param = &done;
 611                cookie = tx->tx_submit(tx);
 612
 613                if (dma_submit_error(cookie)) {
 614                        dmaengine_unmap_put(um);
 615                        result("submit error", total_tests, src_off,
 616                               dst_off, len, ret);
 617                        msleep(100);
 618                        failed_tests++;
 619                        continue;
 620                }
 621                dma_async_issue_pending(chan);
 622
 623                wait_event_freezable_timeout(done_wait, done.done,
 624                                             msecs_to_jiffies(params->timeout));
 625
 626                status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
 627
 628                if (!done.done) {
 629                        /*
 630                         * We're leaving the timed out dma operation with
 631                         * dangling pointer to done_wait.  To make this
 632                         * correct, we'll need to allocate wait_done for
 633                         * each test iteration and perform "who's gonna
 634                         * free it this time?" dancing.  For now, just
 635                         * leave it dangling.
 636                         */
 637                        dmaengine_unmap_put(um);
 638                        result("test timed out", total_tests, src_off, dst_off,
 639                               len, 0);
 640                        failed_tests++;
 641                        continue;
 642                } else if (status != DMA_COMPLETE) {
 643                        dmaengine_unmap_put(um);
 644                        result(status == DMA_ERROR ?
 645                               "completion error status" :
 646                               "completion busy status", total_tests, src_off,
 647                               dst_off, len, ret);
 648                        failed_tests++;
 649                        continue;
 650                }
 651
 652                dmaengine_unmap_put(um);
 653
 654                if (params->noverify) {
 655                        verbose_result("test passed", total_tests, src_off,
 656                                       dst_off, len, 0);
 657                        continue;
 658                }
 659
 660                pr_debug("%s: verifying source buffer...\n", current->comm);
 661                error_count = dmatest_verify(thread->srcs, 0, src_off,
 662                                0, PATTERN_SRC, true);
 663                error_count += dmatest_verify(thread->srcs, src_off,
 664                                src_off + len, src_off,
 665                                PATTERN_SRC | PATTERN_COPY, true);
 666                error_count += dmatest_verify(thread->srcs, src_off + len,
 667                                params->buf_size, src_off + len,
 668                                PATTERN_SRC, true);
 669
 670                pr_debug("%s: verifying dest buffer...\n", current->comm);
 671                error_count += dmatest_verify(thread->dsts, 0, dst_off,
 672                                0, PATTERN_DST, false);
 673                error_count += dmatest_verify(thread->dsts, dst_off,
 674                                dst_off + len, src_off,
 675                                PATTERN_SRC | PATTERN_COPY, false);
 676                error_count += dmatest_verify(thread->dsts, dst_off + len,
 677                                params->buf_size, dst_off + len,
 678                                PATTERN_DST, false);
 679
 680                if (error_count) {
 681                        result("data error", total_tests, src_off, dst_off,
 682                               len, error_count);
 683                        failed_tests++;
 684                } else {
 685                        verbose_result("test passed", total_tests, src_off,
 686                                       dst_off, len, 0);
 687                }
 688        }
 689        runtime = ktime_us_delta(ktime_get(), ktime);
 690
 691        ret = 0;
 692err_dstbuf:
 693        for (i = 0; thread->dsts[i]; i++)
 694                kfree(thread->dsts[i]);
 695        kfree(thread->dsts);
 696err_dsts:
 697err_srcbuf:
 698        for (i = 0; thread->srcs[i]; i++)
 699                kfree(thread->srcs[i]);
 700        kfree(thread->srcs);
 701err_srcs:
 702        kfree(pq_coefs);
 703err_thread_type:
 704        pr_info("%s: summary %u tests, %u failures %llu iops %llu KB/s (%d)\n",
 705                current->comm, total_tests, failed_tests,
 706                dmatest_persec(runtime, total_tests),
 707                dmatest_KBs(runtime, total_len), ret);
 708
 709        /* terminate all transfers on specified channels */
 710        if (ret)
 711                dmaengine_terminate_all(chan);
 712
 713        thread->done = true;
 714        wake_up(&thread_wait);
 715
 716        return ret;
 717}
 718
 719static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
 720{
 721        struct dmatest_thread   *thread;
 722        struct dmatest_thread   *_thread;
 723        int                     ret;
 724
 725        list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
 726                ret = kthread_stop(thread->task);
 727                pr_debug("thread %s exited with status %d\n",
 728                         thread->task->comm, ret);
 729                list_del(&thread->node);
 730                put_task_struct(thread->task);
 731                kfree(thread);
 732        }
 733
 734        /* terminate all transfers on specified channels */
 735        dmaengine_terminate_all(dtc->chan);
 736
 737        kfree(dtc);
 738}
 739
 740static int dmatest_add_threads(struct dmatest_info *info,
 741                struct dmatest_chan *dtc, enum dma_transaction_type type)
 742{
 743        struct dmatest_params *params = &info->params;
 744        struct dmatest_thread *thread;
 745        struct dma_chan *chan = dtc->chan;
 746        char *op;
 747        unsigned int i;
 748
 749        if (type == DMA_MEMCPY)
 750                op = "copy";
 751        else if (type == DMA_XOR)
 752                op = "xor";
 753        else if (type == DMA_PQ)
 754                op = "pq";
 755        else
 756                return -EINVAL;
 757
 758        for (i = 0; i < params->threads_per_chan; i++) {
 759                thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
 760                if (!thread) {
 761                        pr_warn("No memory for %s-%s%u\n",
 762                                dma_chan_name(chan), op, i);
 763                        break;
 764                }
 765                thread->info = info;
 766                thread->chan = dtc->chan;
 767                thread->type = type;
 768                smp_wmb();
 769                thread->task = kthread_create(dmatest_func, thread, "%s-%s%u",
 770                                dma_chan_name(chan), op, i);
 771                if (IS_ERR(thread->task)) {
 772                        pr_warn("Failed to create thread %s-%s%u\n",
 773                                dma_chan_name(chan), op, i);
 774                        kfree(thread);
 775                        break;
 776                }
 777
 778                /* srcbuf and dstbuf are allocated by the thread itself */
 779                get_task_struct(thread->task);
 780                list_add_tail(&thread->node, &dtc->threads);
 781                wake_up_process(thread->task);
 782        }
 783
 784        return i;
 785}
 786
 787static int dmatest_add_channel(struct dmatest_info *info,
 788                struct dma_chan *chan)
 789{
 790        struct dmatest_chan     *dtc;
 791        struct dma_device       *dma_dev = chan->device;
 792        unsigned int            thread_count = 0;
 793        int cnt;
 794
 795        dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
 796        if (!dtc) {
 797                pr_warn("No memory for %s\n", dma_chan_name(chan));
 798                return -ENOMEM;
 799        }
 800
 801        dtc->chan = chan;
 802        INIT_LIST_HEAD(&dtc->threads);
 803
 804        if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
 805                cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
 806                thread_count += cnt > 0 ? cnt : 0;
 807        }
 808        if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
 809                cnt = dmatest_add_threads(info, dtc, DMA_XOR);
 810                thread_count += cnt > 0 ? cnt : 0;
 811        }
 812        if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
 813                cnt = dmatest_add_threads(info, dtc, DMA_PQ);
 814                thread_count += cnt > 0 ? cnt : 0;
 815        }
 816
 817        pr_info("Started %u threads using %s\n",
 818                thread_count, dma_chan_name(chan));
 819
 820        list_add_tail(&dtc->node, &info->channels);
 821        info->nr_channels++;
 822
 823        return 0;
 824}
 825
 826static bool filter(struct dma_chan *chan, void *param)
 827{
 828        struct dmatest_params *params = param;
 829
 830        if (!dmatest_match_channel(params, chan) ||
 831            !dmatest_match_device(params, chan->device))
 832                return false;
 833        else
 834                return true;
 835}
 836
 837static void request_channels(struct dmatest_info *info,
 838                             enum dma_transaction_type type)
 839{
 840        dma_cap_mask_t mask;
 841
 842        dma_cap_zero(mask);
 843        dma_cap_set(type, mask);
 844        for (;;) {
 845                struct dmatest_params *params = &info->params;
 846                struct dma_chan *chan;
 847
 848                chan = dma_request_channel(mask, filter, params);
 849                if (chan) {
 850                        if (dmatest_add_channel(info, chan)) {
 851                                dma_release_channel(chan);
 852                                break; /* add_channel failed, punt */
 853                        }
 854                } else
 855                        break; /* no more channels available */
 856                if (params->max_channels &&
 857                    info->nr_channels >= params->max_channels)
 858                        break; /* we have all we need */
 859        }
 860}
 861
 862static void run_threaded_test(struct dmatest_info *info)
 863{
 864        struct dmatest_params *params = &info->params;
 865
 866        /* Copy test parameters */
 867        params->buf_size = test_buf_size;
 868        strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
 869        strlcpy(params->device, strim(test_device), sizeof(params->device));
 870        params->threads_per_chan = threads_per_chan;
 871        params->max_channels = max_channels;
 872        params->iterations = iterations;
 873        params->xor_sources = xor_sources;
 874        params->pq_sources = pq_sources;
 875        params->timeout = timeout;
 876        params->noverify = noverify;
 877
 878        request_channels(info, DMA_MEMCPY);
 879        request_channels(info, DMA_XOR);
 880        request_channels(info, DMA_PQ);
 881}
 882
 883static void stop_threaded_test(struct dmatest_info *info)
 884{
 885        struct dmatest_chan *dtc, *_dtc;
 886        struct dma_chan *chan;
 887
 888        list_for_each_entry_safe(dtc, _dtc, &info->channels, node) {
 889                list_del(&dtc->node);
 890                chan = dtc->chan;
 891                dmatest_cleanup_channel(dtc);
 892                pr_debug("dropped channel %s\n", dma_chan_name(chan));
 893                dma_release_channel(chan);
 894        }
 895
 896        info->nr_channels = 0;
 897}
 898
 899static void restart_threaded_test(struct dmatest_info *info, bool run)
 900{
 901        /* we might be called early to set run=, defer running until all
 902         * parameters have been evaluated
 903         */
 904        if (!info->did_init)
 905                return;
 906
 907        /* Stop any running test first */
 908        stop_threaded_test(info);
 909
 910        /* Run test with new parameters */
 911        run_threaded_test(info);
 912}
 913
 914static int dmatest_run_get(char *val, const struct kernel_param *kp)
 915{
 916        struct dmatest_info *info = &test_info;
 917
 918        mutex_lock(&info->lock);
 919        if (is_threaded_test_run(info)) {
 920                dmatest_run = true;
 921        } else {
 922                stop_threaded_test(info);
 923                dmatest_run = false;
 924        }
 925        mutex_unlock(&info->lock);
 926
 927        return param_get_bool(val, kp);
 928}
 929
 930static int dmatest_run_set(const char *val, const struct kernel_param *kp)
 931{
 932        struct dmatest_info *info = &test_info;
 933        int ret;
 934
 935        mutex_lock(&info->lock);
 936        ret = param_set_bool(val, kp);
 937        if (ret) {
 938                mutex_unlock(&info->lock);
 939                return ret;
 940        }
 941
 942        if (is_threaded_test_run(info))
 943                ret = -EBUSY;
 944        else if (dmatest_run)
 945                restart_threaded_test(info, dmatest_run);
 946
 947        mutex_unlock(&info->lock);
 948
 949        return ret;
 950}
 951
 952static int __init dmatest_init(void)
 953{
 954        struct dmatest_info *info = &test_info;
 955        struct dmatest_params *params = &info->params;
 956
 957        if (dmatest_run) {
 958                mutex_lock(&info->lock);
 959                run_threaded_test(info);
 960                mutex_unlock(&info->lock);
 961        }
 962
 963        if (params->iterations && wait)
 964                wait_event(thread_wait, !is_threaded_test_run(info));
 965
 966        /* module parameters are stable, inittime tests are started,
 967         * let userspace take over 'run' control
 968         */
 969        info->did_init = true;
 970
 971        return 0;
 972}
 973/* when compiled-in wait for drivers to load first */
 974late_initcall(dmatest_init);
 975
 976static void __exit dmatest_exit(void)
 977{
 978        struct dmatest_info *info = &test_info;
 979
 980        mutex_lock(&info->lock);
 981        stop_threaded_test(info);
 982        mutex_unlock(&info->lock);
 983}
 984module_exit(dmatest_exit);
 985
 986MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
 987MODULE_LICENSE("GPL v2");
 988