linux/drivers/dma/dmatest.c
<<
>>
Prefs
   1/*
   2 * DMA Engine test module
   3 *
   4 * Copyright (C) 2007 Atmel Corporation
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10#include <linux/delay.h>
  11#include <linux/dma-mapping.h>
  12#include <linux/dmaengine.h>
  13#include <linux/init.h>
  14#include <linux/kthread.h>
  15#include <linux/module.h>
  16#include <linux/moduleparam.h>
  17#include <linux/random.h>
  18#include <linux/slab.h>
  19#include <linux/wait.h>
  20
  21static unsigned int test_buf_size = 16384;
  22module_param(test_buf_size, uint, S_IRUGO);
  23MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
  24
  25static char test_channel[20];
  26module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO);
  27MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
  28
  29static char test_device[20];
  30module_param_string(device, test_device, sizeof(test_device), S_IRUGO);
  31MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
  32
  33static unsigned int threads_per_chan = 1;
  34module_param(threads_per_chan, uint, S_IRUGO);
  35MODULE_PARM_DESC(threads_per_chan,
  36                "Number of threads to start per channel (default: 1)");
  37
  38static unsigned int max_channels;
  39module_param(max_channels, uint, S_IRUGO);
  40MODULE_PARM_DESC(max_channels,
  41                "Maximum number of channels to use (default: all)");
  42
  43static unsigned int iterations;
  44module_param(iterations, uint, S_IRUGO);
  45MODULE_PARM_DESC(iterations,
  46                "Iterations before stopping test (default: infinite)");
  47
  48static unsigned int xor_sources = 3;
  49module_param(xor_sources, uint, S_IRUGO);
  50MODULE_PARM_DESC(xor_sources,
  51                "Number of xor source buffers (default: 3)");
  52
  53static unsigned int pq_sources = 3;
  54module_param(pq_sources, uint, S_IRUGO);
  55MODULE_PARM_DESC(pq_sources,
  56                "Number of p+q source buffers (default: 3)");
  57
  58static int timeout = 3000;
  59module_param(timeout, uint, S_IRUGO);
  60MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
  61                 "Pass -1 for infinite timeout");
  62
  63/*
  64 * Initialization patterns. All bytes in the source buffer has bit 7
  65 * set, all bytes in the destination buffer has bit 7 cleared.
  66 *
  67 * Bit 6 is set for all bytes which are to be copied by the DMA
  68 * engine. Bit 5 is set for all bytes which are to be overwritten by
  69 * the DMA engine.
  70 *
  71 * The remaining bits are the inverse of a counter which increments by
  72 * one for each byte address.
  73 */
  74#define PATTERN_SRC             0x80
  75#define PATTERN_DST             0x00
  76#define PATTERN_COPY            0x40
  77#define PATTERN_OVERWRITE       0x20
  78#define PATTERN_COUNT_MASK      0x1f
  79
  80struct dmatest_thread {
  81        struct list_head        node;
  82        struct task_struct      *task;
  83        struct dma_chan         *chan;
  84        u8                      **srcs;
  85        u8                      **dsts;
  86        enum dma_transaction_type type;
  87};
  88
  89struct dmatest_chan {
  90        struct list_head        node;
  91        struct dma_chan         *chan;
  92        struct list_head        threads;
  93};
  94
  95/*
  96 * These are protected by dma_list_mutex since they're only used by
  97 * the DMA filter function callback
  98 */
  99static LIST_HEAD(dmatest_channels);
 100static unsigned int nr_channels;
 101
 102static bool dmatest_match_channel(struct dma_chan *chan)
 103{
 104        if (test_channel[0] == '\0')
 105                return true;
 106        return strcmp(dma_chan_name(chan), test_channel) == 0;
 107}
 108
 109static bool dmatest_match_device(struct dma_device *device)
 110{
 111        if (test_device[0] == '\0')
 112                return true;
 113        return strcmp(dev_name(device->dev), test_device) == 0;
 114}
 115
 116static unsigned long dmatest_random(void)
 117{
 118        unsigned long buf;
 119
 120        get_random_bytes(&buf, sizeof(buf));
 121        return buf;
 122}
 123
 124static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len)
 125{
 126        unsigned int i;
 127        u8 *buf;
 128
 129        for (; (buf = *bufs); bufs++) {
 130                for (i = 0; i < start; i++)
 131                        buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
 132                for ( ; i < start + len; i++)
 133                        buf[i] = PATTERN_SRC | PATTERN_COPY
 134                                | (~i & PATTERN_COUNT_MASK);
 135                for ( ; i < test_buf_size; i++)
 136                        buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
 137                buf++;
 138        }
 139}
 140
 141static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len)
 142{
 143        unsigned int i;
 144        u8 *buf;
 145
 146        for (; (buf = *bufs); bufs++) {
 147                for (i = 0; i < start; i++)
 148                        buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
 149                for ( ; i < start + len; i++)
 150                        buf[i] = PATTERN_DST | PATTERN_OVERWRITE
 151                                | (~i & PATTERN_COUNT_MASK);
 152                for ( ; i < test_buf_size; i++)
 153                        buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
 154        }
 155}
 156
 157static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
 158                unsigned int counter, bool is_srcbuf)
 159{
 160        u8              diff = actual ^ pattern;
 161        u8              expected = pattern | (~counter & PATTERN_COUNT_MASK);
 162        const char      *thread_name = current->comm;
 163
 164        if (is_srcbuf)
 165                pr_warning("%s: srcbuf[0x%x] overwritten!"
 166                                " Expected %02x, got %02x\n",
 167                                thread_name, index, expected, actual);
 168        else if ((pattern & PATTERN_COPY)
 169                        && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
 170                pr_warning("%s: dstbuf[0x%x] not copied!"
 171                                " Expected %02x, got %02x\n",
 172                                thread_name, index, expected, actual);
 173        else if (diff & PATTERN_SRC)
 174                pr_warning("%s: dstbuf[0x%x] was copied!"
 175                                " Expected %02x, got %02x\n",
 176                                thread_name, index, expected, actual);
 177        else
 178                pr_warning("%s: dstbuf[0x%x] mismatch!"
 179                                " Expected %02x, got %02x\n",
 180                                thread_name, index, expected, actual);
 181}
 182
 183static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
 184                unsigned int end, unsigned int counter, u8 pattern,
 185                bool is_srcbuf)
 186{
 187        unsigned int i;
 188        unsigned int error_count = 0;
 189        u8 actual;
 190        u8 expected;
 191        u8 *buf;
 192        unsigned int counter_orig = counter;
 193
 194        for (; (buf = *bufs); bufs++) {
 195                counter = counter_orig;
 196                for (i = start; i < end; i++) {
 197                        actual = buf[i];
 198                        expected = pattern | (~counter & PATTERN_COUNT_MASK);
 199                        if (actual != expected) {
 200                                if (error_count < 32)
 201                                        dmatest_mismatch(actual, pattern, i,
 202                                                         counter, is_srcbuf);
 203                                error_count++;
 204                        }
 205                        counter++;
 206                }
 207        }
 208
 209        if (error_count > 32)
 210                pr_warning("%s: %u errors suppressed\n",
 211                        current->comm, error_count - 32);
 212
 213        return error_count;
 214}
 215
 216static void dmatest_callback(void *completion)
 217{
 218        complete(completion);
 219}
 220
 221/*
 222 * This function repeatedly tests DMA transfers of various lengths and
 223 * offsets for a given operation type until it is told to exit by
 224 * kthread_stop(). There may be multiple threads running this function
 225 * in parallel for a single channel, and there may be multiple channels
 226 * being tested in parallel.
 227 *
 228 * Before each test, the source and destination buffer is initialized
 229 * with a known pattern. This pattern is different depending on
 230 * whether it's in an area which is supposed to be copied or
 231 * overwritten, and different in the source and destination buffers.
 232 * So if the DMA engine doesn't copy exactly what we tell it to copy,
 233 * we'll notice.
 234 */
 235static int dmatest_func(void *data)
 236{
 237        struct dmatest_thread   *thread = data;
 238        struct dma_chan         *chan;
 239        const char              *thread_name;
 240        unsigned int            src_off, dst_off, len;
 241        unsigned int            error_count;
 242        unsigned int            failed_tests = 0;
 243        unsigned int            total_tests = 0;
 244        dma_cookie_t            cookie;
 245        enum dma_status         status;
 246        enum dma_ctrl_flags     flags;
 247        u8                      pq_coefs[pq_sources + 1];
 248        int                     ret;
 249        int                     src_cnt;
 250        int                     dst_cnt;
 251        int                     i;
 252
 253        thread_name = current->comm;
 254
 255        ret = -ENOMEM;
 256
 257        smp_rmb();
 258        chan = thread->chan;
 259        if (thread->type == DMA_MEMCPY)
 260                src_cnt = dst_cnt = 1;
 261        else if (thread->type == DMA_XOR) {
 262                src_cnt = xor_sources | 1; /* force odd to ensure dst = src */
 263                dst_cnt = 1;
 264        } else if (thread->type == DMA_PQ) {
 265                src_cnt = pq_sources | 1; /* force odd to ensure dst = src */
 266                dst_cnt = 2;
 267                for (i = 0; i < src_cnt; i++)
 268                        pq_coefs[i] = 1;
 269        } else
 270                goto err_srcs;
 271
 272        thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
 273        if (!thread->srcs)
 274                goto err_srcs;
 275        for (i = 0; i < src_cnt; i++) {
 276                thread->srcs[i] = kmalloc(test_buf_size, GFP_KERNEL);
 277                if (!thread->srcs[i])
 278                        goto err_srcbuf;
 279        }
 280        thread->srcs[i] = NULL;
 281
 282        thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
 283        if (!thread->dsts)
 284                goto err_dsts;
 285        for (i = 0; i < dst_cnt; i++) {
 286                thread->dsts[i] = kmalloc(test_buf_size, GFP_KERNEL);
 287                if (!thread->dsts[i])
 288                        goto err_dstbuf;
 289        }
 290        thread->dsts[i] = NULL;
 291
 292        set_user_nice(current, 10);
 293
 294        /*
 295         * src buffers are freed by the DMAEngine code with dma_unmap_single()
 296         * dst buffers are freed by ourselves below
 297         */
 298        flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT
 299              | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE;
 300
 301        while (!kthread_should_stop()
 302               && !(iterations && total_tests >= iterations)) {
 303                struct dma_device *dev = chan->device;
 304                struct dma_async_tx_descriptor *tx = NULL;
 305                dma_addr_t dma_srcs[src_cnt];
 306                dma_addr_t dma_dsts[dst_cnt];
 307                struct completion cmp;
 308                unsigned long tmo = msecs_to_jiffies(timeout);
 309                u8 align = 0;
 310
 311                total_tests++;
 312
 313                /* honor alignment restrictions */
 314                if (thread->type == DMA_MEMCPY)
 315                        align = dev->copy_align;
 316                else if (thread->type == DMA_XOR)
 317                        align = dev->xor_align;
 318                else if (thread->type == DMA_PQ)
 319                        align = dev->pq_align;
 320
 321                if (1 << align > test_buf_size) {
 322                        pr_err("%u-byte buffer too small for %d-byte alignment\n",
 323                               test_buf_size, 1 << align);
 324                        break;
 325                }
 326
 327                len = dmatest_random() % test_buf_size + 1;
 328                len = (len >> align) << align;
 329                if (!len)
 330                        len = 1 << align;
 331                src_off = dmatest_random() % (test_buf_size - len + 1);
 332                dst_off = dmatest_random() % (test_buf_size - len + 1);
 333
 334                src_off = (src_off >> align) << align;
 335                dst_off = (dst_off >> align) << align;
 336
 337                dmatest_init_srcs(thread->srcs, src_off, len);
 338                dmatest_init_dsts(thread->dsts, dst_off, len);
 339
 340                for (i = 0; i < src_cnt; i++) {
 341                        u8 *buf = thread->srcs[i] + src_off;
 342
 343                        dma_srcs[i] = dma_map_single(dev->dev, buf, len,
 344                                                     DMA_TO_DEVICE);
 345                }
 346                /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
 347                for (i = 0; i < dst_cnt; i++) {
 348                        dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
 349                                                     test_buf_size,
 350                                                     DMA_BIDIRECTIONAL);
 351                }
 352
 353
 354                if (thread->type == DMA_MEMCPY)
 355                        tx = dev->device_prep_dma_memcpy(chan,
 356                                                         dma_dsts[0] + dst_off,
 357                                                         dma_srcs[0], len,
 358                                                         flags);
 359                else if (thread->type == DMA_XOR)
 360                        tx = dev->device_prep_dma_xor(chan,
 361                                                      dma_dsts[0] + dst_off,
 362                                                      dma_srcs, src_cnt,
 363                                                      len, flags);
 364                else if (thread->type == DMA_PQ) {
 365                        dma_addr_t dma_pq[dst_cnt];
 366
 367                        for (i = 0; i < dst_cnt; i++)
 368                                dma_pq[i] = dma_dsts[i] + dst_off;
 369                        tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
 370                                                     src_cnt, pq_coefs,
 371                                                     len, flags);
 372                }
 373
 374                if (!tx) {
 375                        for (i = 0; i < src_cnt; i++)
 376                                dma_unmap_single(dev->dev, dma_srcs[i], len,
 377                                                 DMA_TO_DEVICE);
 378                        for (i = 0; i < dst_cnt; i++)
 379                                dma_unmap_single(dev->dev, dma_dsts[i],
 380                                                 test_buf_size,
 381                                                 DMA_BIDIRECTIONAL);
 382                        pr_warning("%s: #%u: prep error with src_off=0x%x "
 383                                        "dst_off=0x%x len=0x%x\n",
 384                                        thread_name, total_tests - 1,
 385                                        src_off, dst_off, len);
 386                        msleep(100);
 387                        failed_tests++;
 388                        continue;
 389                }
 390
 391                init_completion(&cmp);
 392                tx->callback = dmatest_callback;
 393                tx->callback_param = &cmp;
 394                cookie = tx->tx_submit(tx);
 395
 396                if (dma_submit_error(cookie)) {
 397                        pr_warning("%s: #%u: submit error %d with src_off=0x%x "
 398                                        "dst_off=0x%x len=0x%x\n",
 399                                        thread_name, total_tests - 1, cookie,
 400                                        src_off, dst_off, len);
 401                        msleep(100);
 402                        failed_tests++;
 403                        continue;
 404                }
 405                dma_async_issue_pending(chan);
 406
 407                tmo = wait_for_completion_timeout(&cmp, tmo);
 408                status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
 409
 410                if (tmo == 0) {
 411                        pr_warning("%s: #%u: test timed out\n",
 412                                   thread_name, total_tests - 1);
 413                        failed_tests++;
 414                        continue;
 415                } else if (status != DMA_SUCCESS) {
 416                        pr_warning("%s: #%u: got completion callback,"
 417                                   " but status is \'%s\'\n",
 418                                   thread_name, total_tests - 1,
 419                                   status == DMA_ERROR ? "error" : "in progress");
 420                        failed_tests++;
 421                        continue;
 422                }
 423
 424                /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
 425                for (i = 0; i < dst_cnt; i++)
 426                        dma_unmap_single(dev->dev, dma_dsts[i], test_buf_size,
 427                                         DMA_BIDIRECTIONAL);
 428
 429                error_count = 0;
 430
 431                pr_debug("%s: verifying source buffer...\n", thread_name);
 432                error_count += dmatest_verify(thread->srcs, 0, src_off,
 433                                0, PATTERN_SRC, true);
 434                error_count += dmatest_verify(thread->srcs, src_off,
 435                                src_off + len, src_off,
 436                                PATTERN_SRC | PATTERN_COPY, true);
 437                error_count += dmatest_verify(thread->srcs, src_off + len,
 438                                test_buf_size, src_off + len,
 439                                PATTERN_SRC, true);
 440
 441                pr_debug("%s: verifying dest buffer...\n",
 442                                thread->task->comm);
 443                error_count += dmatest_verify(thread->dsts, 0, dst_off,
 444                                0, PATTERN_DST, false);
 445                error_count += dmatest_verify(thread->dsts, dst_off,
 446                                dst_off + len, src_off,
 447                                PATTERN_SRC | PATTERN_COPY, false);
 448                error_count += dmatest_verify(thread->dsts, dst_off + len,
 449                                test_buf_size, dst_off + len,
 450                                PATTERN_DST, false);
 451
 452                if (error_count) {
 453                        pr_warning("%s: #%u: %u errors with "
 454                                "src_off=0x%x dst_off=0x%x len=0x%x\n",
 455                                thread_name, total_tests - 1, error_count,
 456                                src_off, dst_off, len);
 457                        failed_tests++;
 458                } else {
 459                        pr_debug("%s: #%u: No errors with "
 460                                "src_off=0x%x dst_off=0x%x len=0x%x\n",
 461                                thread_name, total_tests - 1,
 462                                src_off, dst_off, len);
 463                }
 464        }
 465
 466        ret = 0;
 467        for (i = 0; thread->dsts[i]; i++)
 468                kfree(thread->dsts[i]);
 469err_dstbuf:
 470        kfree(thread->dsts);
 471err_dsts:
 472        for (i = 0; thread->srcs[i]; i++)
 473                kfree(thread->srcs[i]);
 474err_srcbuf:
 475        kfree(thread->srcs);
 476err_srcs:
 477        pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
 478                        thread_name, total_tests, failed_tests, ret);
 479
 480        if (iterations > 0)
 481                while (!kthread_should_stop()) {
 482                        DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
 483                        interruptible_sleep_on(&wait_dmatest_exit);
 484                }
 485
 486        return ret;
 487}
 488
 489static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
 490{
 491        struct dmatest_thread   *thread;
 492        struct dmatest_thread   *_thread;
 493        int                     ret;
 494
 495        list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
 496                ret = kthread_stop(thread->task);
 497                pr_debug("dmatest: thread %s exited with status %d\n",
 498                                thread->task->comm, ret);
 499                list_del(&thread->node);
 500                kfree(thread);
 501        }
 502        kfree(dtc);
 503}
 504
 505static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_type type)
 506{
 507        struct dmatest_thread *thread;
 508        struct dma_chan *chan = dtc->chan;
 509        char *op;
 510        unsigned int i;
 511
 512        if (type == DMA_MEMCPY)
 513                op = "copy";
 514        else if (type == DMA_XOR)
 515                op = "xor";
 516        else if (type == DMA_PQ)
 517                op = "pq";
 518        else
 519                return -EINVAL;
 520
 521        for (i = 0; i < threads_per_chan; i++) {
 522                thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
 523                if (!thread) {
 524                        pr_warning("dmatest: No memory for %s-%s%u\n",
 525                                   dma_chan_name(chan), op, i);
 526
 527                        break;
 528                }
 529                thread->chan = dtc->chan;
 530                thread->type = type;
 531                smp_wmb();
 532                thread->task = kthread_run(dmatest_func, thread, "%s-%s%u",
 533                                dma_chan_name(chan), op, i);
 534                if (IS_ERR(thread->task)) {
 535                        pr_warning("dmatest: Failed to run thread %s-%s%u\n",
 536                                        dma_chan_name(chan), op, i);
 537                        kfree(thread);
 538                        break;
 539                }
 540
 541                /* srcbuf and dstbuf are allocated by the thread itself */
 542
 543                list_add_tail(&thread->node, &dtc->threads);
 544        }
 545
 546        return i;
 547}
 548
 549static int dmatest_add_channel(struct dma_chan *chan)
 550{
 551        struct dmatest_chan     *dtc;
 552        struct dma_device       *dma_dev = chan->device;
 553        unsigned int            thread_count = 0;
 554        int cnt;
 555
 556        dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
 557        if (!dtc) {
 558                pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
 559                return -ENOMEM;
 560        }
 561
 562        dtc->chan = chan;
 563        INIT_LIST_HEAD(&dtc->threads);
 564
 565        if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
 566                cnt = dmatest_add_threads(dtc, DMA_MEMCPY);
 567                thread_count += cnt > 0 ? cnt : 0;
 568        }
 569        if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
 570                cnt = dmatest_add_threads(dtc, DMA_XOR);
 571                thread_count += cnt > 0 ? cnt : 0;
 572        }
 573        if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
 574                cnt = dmatest_add_threads(dtc, DMA_PQ);
 575                thread_count += cnt > 0 ?: 0;
 576        }
 577
 578        pr_info("dmatest: Started %u threads using %s\n",
 579                thread_count, dma_chan_name(chan));
 580
 581        list_add_tail(&dtc->node, &dmatest_channels);
 582        nr_channels++;
 583
 584        return 0;
 585}
 586
 587static bool filter(struct dma_chan *chan, void *param)
 588{
 589        if (!dmatest_match_channel(chan) || !dmatest_match_device(chan->device))
 590                return false;
 591        else
 592                return true;
 593}
 594
 595static int __init dmatest_init(void)
 596{
 597        dma_cap_mask_t mask;
 598        struct dma_chan *chan;
 599        int err = 0;
 600
 601        dma_cap_zero(mask);
 602        dma_cap_set(DMA_MEMCPY, mask);
 603        for (;;) {
 604                chan = dma_request_channel(mask, filter, NULL);
 605                if (chan) {
 606                        err = dmatest_add_channel(chan);
 607                        if (err) {
 608                                dma_release_channel(chan);
 609                                break; /* add_channel failed, punt */
 610                        }
 611                } else
 612                        break; /* no more channels available */
 613                if (max_channels && nr_channels >= max_channels)
 614                        break; /* we have all we need */
 615        }
 616
 617        return err;
 618}
 619/* when compiled-in wait for drivers to load first */
 620late_initcall(dmatest_init);
 621
 622static void __exit dmatest_exit(void)
 623{
 624        struct dmatest_chan *dtc, *_dtc;
 625        struct dma_chan *chan;
 626
 627        list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) {
 628                list_del(&dtc->node);
 629                chan = dtc->chan;
 630                dmatest_cleanup_channel(dtc);
 631                pr_debug("dmatest: dropped channel %s\n",
 632                         dma_chan_name(chan));
 633                dma_release_channel(chan);
 634        }
 635}
 636module_exit(dmatest_exit);
 637
 638MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
 639MODULE_LICENSE("GPL v2");
 640