linux/arch/blackfin/kernel/bfin_dma.c
<<
>>
Prefs
   1/*
   2 * bfin_dma.c - Blackfin DMA implementation
   3 *
   4 * Copyright 2004-2008 Analog Devices Inc.
   5 *
   6 * Licensed under the GPL-2 or later.
   7 */
   8
   9#include <linux/errno.h>
  10#include <linux/interrupt.h>
  11#include <linux/kernel.h>
  12#include <linux/module.h>
  13#include <linux/param.h>
  14#include <linux/proc_fs.h>
  15#include <linux/sched.h>
  16#include <linux/seq_file.h>
  17#include <linux/spinlock.h>
  18
  19#include <asm/blackfin.h>
  20#include <asm/cacheflush.h>
  21#include <asm/dma.h>
  22#include <asm/uaccess.h>
  23#include <asm/early_printk.h>
  24
  25/*
  26 * To make sure we work around 05000119 - we always check DMA_DONE bit,
  27 * never the DMA_RUN bit
  28 */
  29
  30struct dma_channel dma_ch[MAX_DMA_CHANNELS];
  31EXPORT_SYMBOL(dma_ch);
  32
  33static int __init blackfin_dma_init(void)
  34{
  35        int i;
  36
  37        printk(KERN_INFO "Blackfin DMA Controller\n");
  38
  39
  40#if ANOMALY_05000480
  41        bfin_write_DMAC_TC_PER(0x0111);
  42#endif
  43
  44        for (i = 0; i < MAX_DMA_CHANNELS; i++) {
  45                atomic_set(&dma_ch[i].chan_status, 0);
  46                dma_ch[i].regs = dma_io_base_addr[i];
  47        }
  48#if defined(CH_MEM_STREAM3_SRC) && defined(CONFIG_BF60x)
  49        /* Mark MEMDMA Channel 3 as requested since we're using it internally */
  50        request_dma(CH_MEM_STREAM3_DEST, "Blackfin dma_memcpy");
  51        request_dma(CH_MEM_STREAM3_SRC, "Blackfin dma_memcpy");
  52#else
  53        /* Mark MEMDMA Channel 0 as requested since we're using it internally */
  54        request_dma(CH_MEM_STREAM0_DEST, "Blackfin dma_memcpy");
  55        request_dma(CH_MEM_STREAM0_SRC, "Blackfin dma_memcpy");
  56#endif
  57
  58#if defined(CONFIG_DEB_DMA_URGENT)
  59        bfin_write_EBIU_DDRQUE(bfin_read_EBIU_DDRQUE()
  60                         | DEB1_URGENT | DEB2_URGENT | DEB3_URGENT);
  61#endif
  62
  63        return 0;
  64}
  65arch_initcall(blackfin_dma_init);
  66
  67#ifdef CONFIG_PROC_FS
  68static int proc_dma_show(struct seq_file *m, void *v)
  69{
  70        int i;
  71
  72        for (i = 0; i < MAX_DMA_CHANNELS; ++i)
  73                if (dma_channel_active(i))
  74                        seq_printf(m, "%2d: %s\n", i, dma_ch[i].device_id);
  75
  76        return 0;
  77}
  78
  79static int proc_dma_open(struct inode *inode, struct file *file)
  80{
  81        return single_open(file, proc_dma_show, NULL);
  82}
  83
  84static const struct file_operations proc_dma_operations = {
  85        .open           = proc_dma_open,
  86        .read           = seq_read,
  87        .llseek         = seq_lseek,
  88        .release        = single_release,
  89};
  90
  91static int __init proc_dma_init(void)
  92{
  93        proc_create("dma", 0, NULL, &proc_dma_operations);
  94        return 0;
  95}
  96late_initcall(proc_dma_init);
  97#endif
  98
  99static void set_dma_peripheral_map(unsigned int channel, const char *device_id)
 100{
 101#ifdef CONFIG_BF54x
 102        unsigned int per_map;
 103
 104        switch (channel) {
 105                case CH_UART2_RX: per_map = 0xC << 12; break;
 106                case CH_UART2_TX: per_map = 0xD << 12; break;
 107                case CH_UART3_RX: per_map = 0xE << 12; break;
 108                case CH_UART3_TX: per_map = 0xF << 12; break;
 109                default:          return;
 110        }
 111
 112        if (strncmp(device_id, "BFIN_UART", 9) == 0)
 113                dma_ch[channel].regs->peripheral_map = per_map;
 114#endif
 115}
 116
 117/**
 118 *      request_dma - request a DMA channel
 119 *
 120 * Request the specific DMA channel from the system if it's available.
 121 */
 122int request_dma(unsigned int channel, const char *device_id)
 123{
 124        pr_debug("request_dma() : BEGIN\n");
 125
 126        if (device_id == NULL)
 127                printk(KERN_WARNING "request_dma(%u): no device_id given\n", channel);
 128
 129#if defined(CONFIG_BF561) && ANOMALY_05000182
 130        if (channel >= CH_IMEM_STREAM0_DEST && channel <= CH_IMEM_STREAM1_DEST) {
 131                if (get_cclk() > 500000000) {
 132                        printk(KERN_WARNING
 133                               "Request IMDMA failed due to ANOMALY 05000182\n");
 134                        return -EFAULT;
 135                }
 136        }
 137#endif
 138
 139        if (atomic_cmpxchg(&dma_ch[channel].chan_status, 0, 1)) {
 140                pr_debug("DMA CHANNEL IN USE\n");
 141                return -EBUSY;
 142        }
 143
 144        set_dma_peripheral_map(channel, device_id);
 145        dma_ch[channel].device_id = device_id;
 146        dma_ch[channel].irq = 0;
 147
 148        /* This is to be enabled by putting a restriction -
 149         * you have to request DMA, before doing any operations on
 150         * descriptor/channel
 151         */
 152        pr_debug("request_dma() : END\n");
 153        return 0;
 154}
 155EXPORT_SYMBOL(request_dma);
 156
 157int set_dma_callback(unsigned int channel, irq_handler_t callback, void *data)
 158{
 159        int ret;
 160        unsigned int irq;
 161
 162        BUG_ON(channel >= MAX_DMA_CHANNELS || !callback ||
 163                        !atomic_read(&dma_ch[channel].chan_status));
 164
 165        irq = channel2irq(channel);
 166        ret = request_irq(irq, callback, 0, dma_ch[channel].device_id, data);
 167        if (ret)
 168                return ret;
 169
 170        dma_ch[channel].irq = irq;
 171        dma_ch[channel].data = data;
 172
 173        return 0;
 174}
 175EXPORT_SYMBOL(set_dma_callback);
 176
 177/**
 178 *      clear_dma_buffer - clear DMA fifos for specified channel
 179 *
 180 * Set the Buffer Clear bit in the Configuration register of specific DMA
 181 * channel. This will stop the descriptor based DMA operation.
 182 */
 183static void clear_dma_buffer(unsigned int channel)
 184{
 185        dma_ch[channel].regs->cfg |= RESTART;
 186        SSYNC();
 187        dma_ch[channel].regs->cfg &= ~RESTART;
 188}
 189
 190void free_dma(unsigned int channel)
 191{
 192        pr_debug("freedma() : BEGIN\n");
 193        BUG_ON(channel >= MAX_DMA_CHANNELS ||
 194                        !atomic_read(&dma_ch[channel].chan_status));
 195
 196        /* Halt the DMA */
 197        disable_dma(channel);
 198        clear_dma_buffer(channel);
 199
 200        if (dma_ch[channel].irq)
 201                free_irq(dma_ch[channel].irq, dma_ch[channel].data);
 202
 203        /* Clear the DMA Variable in the Channel */
 204        atomic_set(&dma_ch[channel].chan_status, 0);
 205
 206        pr_debug("freedma() : END\n");
 207}
 208EXPORT_SYMBOL(free_dma);
 209
 210#ifdef CONFIG_PM
 211# ifndef MAX_DMA_SUSPEND_CHANNELS
 212#  define MAX_DMA_SUSPEND_CHANNELS MAX_DMA_CHANNELS
 213# endif
 214# ifndef CONFIG_BF60x
 215int blackfin_dma_suspend(void)
 216{
 217        int i;
 218
 219        for (i = 0; i < MAX_DMA_CHANNELS; ++i) {
 220                if (dma_ch[i].regs->cfg & DMAEN) {
 221                        printk(KERN_ERR "DMA Channel %d failed to suspend\n", i);
 222                        return -EBUSY;
 223                }
 224                if (i < MAX_DMA_SUSPEND_CHANNELS)
 225                        dma_ch[i].saved_peripheral_map = dma_ch[i].regs->peripheral_map;
 226        }
 227
 228#if ANOMALY_05000480
 229        bfin_write_DMAC_TC_PER(0x0);
 230#endif
 231        return 0;
 232}
 233
 234void blackfin_dma_resume(void)
 235{
 236        int i;
 237
 238        for (i = 0; i < MAX_DMA_CHANNELS; ++i) {
 239                dma_ch[i].regs->cfg = 0;
 240                if (i < MAX_DMA_SUSPEND_CHANNELS)
 241                        dma_ch[i].regs->peripheral_map = dma_ch[i].saved_peripheral_map;
 242        }
 243#if ANOMALY_05000480
 244        bfin_write_DMAC_TC_PER(0x0111);
 245#endif
 246}
 247# else
 248int blackfin_dma_suspend(void)
 249{
 250        return 0;
 251}
 252
 253void blackfin_dma_resume(void)
 254{
 255}
 256#endif
 257#endif
 258
 259/**
 260 *      blackfin_dma_early_init - minimal DMA init
 261 *
 262 * Setup a few DMA registers so we can safely do DMA transfers early on in
 263 * the kernel booting process.  Really this just means using dma_memcpy().
 264 */
 265void __init blackfin_dma_early_init(void)
 266{
 267        early_shadow_stamp();
 268        bfin_write_MDMA_S0_CONFIG(0);
 269        bfin_write_MDMA_S1_CONFIG(0);
 270}
 271
 272void __init early_dma_memcpy(void *pdst, const void *psrc, size_t size)
 273{
 274        unsigned long dst = (unsigned long)pdst;
 275        unsigned long src = (unsigned long)psrc;
 276        struct dma_register *dst_ch, *src_ch;
 277
 278        early_shadow_stamp();
 279
 280        /* We assume that everything is 4 byte aligned, so include
 281         * a basic sanity check
 282         */
 283        BUG_ON(dst % 4);
 284        BUG_ON(src % 4);
 285        BUG_ON(size % 4);
 286
 287        src_ch = 0;
 288        /* Find an avalible memDMA channel */
 289        while (1) {
 290                if (src_ch == (struct dma_register *)MDMA_S0_NEXT_DESC_PTR) {
 291                        dst_ch = (struct dma_register *)MDMA_D1_NEXT_DESC_PTR;
 292                        src_ch = (struct dma_register *)MDMA_S1_NEXT_DESC_PTR;
 293                } else {
 294                        dst_ch = (struct dma_register *)MDMA_D0_NEXT_DESC_PTR;
 295                        src_ch = (struct dma_register *)MDMA_S0_NEXT_DESC_PTR;
 296                }
 297
 298                if (!DMA_MMR_READ(&src_ch->cfg))
 299                        break;
 300                else if (DMA_MMR_READ(&dst_ch->irq_status) & DMA_DONE) {
 301                        DMA_MMR_WRITE(&src_ch->cfg, 0);
 302                        break;
 303                }
 304        }
 305
 306        /* Force a sync in case a previous config reset on this channel
 307         * occurred.  This is needed so subsequent writes to DMA registers
 308         * are not spuriously lost/corrupted.
 309         */
 310        __builtin_bfin_ssync();
 311
 312        /* Destination */
 313        bfin_write32(&dst_ch->start_addr, dst);
 314        DMA_MMR_WRITE(&dst_ch->x_count, size >> 2);
 315        DMA_MMR_WRITE(&dst_ch->x_modify, 1 << 2);
 316        DMA_MMR_WRITE(&dst_ch->irq_status, DMA_DONE | DMA_ERR);
 317
 318        /* Source */
 319        bfin_write32(&src_ch->start_addr, src);
 320        DMA_MMR_WRITE(&src_ch->x_count, size >> 2);
 321        DMA_MMR_WRITE(&src_ch->x_modify, 1 << 2);
 322        DMA_MMR_WRITE(&src_ch->irq_status, DMA_DONE | DMA_ERR);
 323
 324        /* Enable */
 325        DMA_MMR_WRITE(&src_ch->cfg, DMAEN | WDSIZE_32);
 326        DMA_MMR_WRITE(&dst_ch->cfg, WNR | DI_EN_X | DMAEN | WDSIZE_32);
 327
 328        /* Since we are atomic now, don't use the workaround ssync */
 329        __builtin_bfin_ssync();
 330
 331#ifdef CONFIG_BF60x
 332        /* Work around a possible MDMA anomaly. Running 2 MDMA channels to
 333         * transfer DDR data to L1 SRAM may corrupt data.
 334         * Should be reverted after this issue is root caused.
 335         */
 336        while (!(DMA_MMR_READ(&dst_ch->irq_status) & DMA_DONE))
 337                continue;
 338#endif
 339}
 340
 341void __init early_dma_memcpy_done(void)
 342{
 343        early_shadow_stamp();
 344
 345        while ((bfin_read_MDMA_S0_CONFIG() && !(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)) ||
 346               (bfin_read_MDMA_S1_CONFIG() && !(bfin_read_MDMA_D1_IRQ_STATUS() & DMA_DONE)))
 347                continue;
 348
 349        bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
 350        bfin_write_MDMA_D1_IRQ_STATUS(DMA_DONE | DMA_ERR);
 351        /*
 352         * Now that DMA is done, we would normally flush cache, but
 353         * i/d cache isn't running this early, so we don't bother,
 354         * and just clear out the DMA channel for next time
 355         */
 356        bfin_write_MDMA_S0_CONFIG(0);
 357        bfin_write_MDMA_S1_CONFIG(0);
 358        bfin_write_MDMA_D0_CONFIG(0);
 359        bfin_write_MDMA_D1_CONFIG(0);
 360
 361        __builtin_bfin_ssync();
 362}
 363
 364#if defined(CH_MEM_STREAM3_SRC) && defined(CONFIG_BF60x)
 365#define bfin_read_MDMA_S_CONFIG bfin_read_MDMA_S3_CONFIG
 366#define bfin_write_MDMA_S_CONFIG bfin_write_MDMA_S3_CONFIG
 367#define bfin_write_MDMA_S_START_ADDR bfin_write_MDMA_S3_START_ADDR
 368#define bfin_write_MDMA_S_IRQ_STATUS bfin_write_MDMA_S3_IRQ_STATUS
 369#define bfin_write_MDMA_S_X_COUNT bfin_write_MDMA_S3_X_COUNT
 370#define bfin_write_MDMA_S_X_MODIFY bfin_write_MDMA_S3_X_MODIFY
 371#define bfin_write_MDMA_S_Y_COUNT bfin_write_MDMA_S3_Y_COUNT
 372#define bfin_write_MDMA_S_Y_MODIFY bfin_write_MDMA_S3_Y_MODIFY
 373#define bfin_write_MDMA_D_CONFIG bfin_write_MDMA_D3_CONFIG
 374#define bfin_write_MDMA_D_START_ADDR bfin_write_MDMA_D3_START_ADDR
 375#define bfin_read_MDMA_D_IRQ_STATUS bfin_read_MDMA_D3_IRQ_STATUS
 376#define bfin_write_MDMA_D_IRQ_STATUS bfin_write_MDMA_D3_IRQ_STATUS
 377#define bfin_write_MDMA_D_X_COUNT bfin_write_MDMA_D3_X_COUNT
 378#define bfin_write_MDMA_D_X_MODIFY bfin_write_MDMA_D3_X_MODIFY
 379#define bfin_write_MDMA_D_Y_COUNT bfin_write_MDMA_D3_Y_COUNT
 380#define bfin_write_MDMA_D_Y_MODIFY bfin_write_MDMA_D3_Y_MODIFY
 381#else
 382#define bfin_read_MDMA_S_CONFIG bfin_read_MDMA_S0_CONFIG
 383#define bfin_write_MDMA_S_CONFIG bfin_write_MDMA_S0_CONFIG
 384#define bfin_write_MDMA_S_START_ADDR bfin_write_MDMA_S0_START_ADDR
 385#define bfin_write_MDMA_S_IRQ_STATUS bfin_write_MDMA_S0_IRQ_STATUS
 386#define bfin_write_MDMA_S_X_COUNT bfin_write_MDMA_S0_X_COUNT
 387#define bfin_write_MDMA_S_X_MODIFY bfin_write_MDMA_S0_X_MODIFY
 388#define bfin_write_MDMA_S_Y_COUNT bfin_write_MDMA_S0_Y_COUNT
 389#define bfin_write_MDMA_S_Y_MODIFY bfin_write_MDMA_S0_Y_MODIFY
 390#define bfin_write_MDMA_D_CONFIG bfin_write_MDMA_D0_CONFIG
 391#define bfin_write_MDMA_D_START_ADDR bfin_write_MDMA_D0_START_ADDR
 392#define bfin_read_MDMA_D_IRQ_STATUS bfin_read_MDMA_D0_IRQ_STATUS
 393#define bfin_write_MDMA_D_IRQ_STATUS bfin_write_MDMA_D0_IRQ_STATUS
 394#define bfin_write_MDMA_D_X_COUNT bfin_write_MDMA_D0_X_COUNT
 395#define bfin_write_MDMA_D_X_MODIFY bfin_write_MDMA_D0_X_MODIFY
 396#define bfin_write_MDMA_D_Y_COUNT bfin_write_MDMA_D0_Y_COUNT
 397#define bfin_write_MDMA_D_Y_MODIFY bfin_write_MDMA_D0_Y_MODIFY
 398#endif
 399
 400/**
 401 *      __dma_memcpy - program the MDMA registers
 402 *
 403 * Actually program MDMA0 and wait for the transfer to finish.  Disable IRQs
 404 * while programming registers so that everything is fully configured.  Wait
 405 * for DMA to finish with IRQs enabled.  If interrupted, the initial DMA_DONE
 406 * check will make sure we don't clobber any existing transfer.
 407 */
 408static void __dma_memcpy(u32 daddr, s16 dmod, u32 saddr, s16 smod, size_t cnt, u32 conf)
 409{
 410        static DEFINE_SPINLOCK(mdma_lock);
 411        unsigned long flags;
 412
 413        spin_lock_irqsave(&mdma_lock, flags);
 414
 415        /* Force a sync in case a previous config reset on this channel
 416         * occurred.  This is needed so subsequent writes to DMA registers
 417         * are not spuriously lost/corrupted.  Do it under irq lock and
 418         * without the anomaly version (because we are atomic already).
 419         */
 420        __builtin_bfin_ssync();
 421
 422        if (bfin_read_MDMA_S_CONFIG())
 423                while (!(bfin_read_MDMA_D_IRQ_STATUS() & DMA_DONE))
 424                        continue;
 425
 426        if (conf & DMA2D) {
 427                /* For larger bit sizes, we've already divided down cnt so it
 428                 * is no longer a multiple of 64k.  So we have to break down
 429                 * the limit here so it is a multiple of the incoming size.
 430                 * There is no limitation here in terms of total size other
 431                 * than the hardware though as the bits lost in the shift are
 432                 * made up by MODIFY (== we can hit the whole address space).
 433                 * X: (2^(16 - 0)) * 1 == (2^(16 - 1)) * 2 == (2^(16 - 2)) * 4
 434                 */
 435                u32 shift = abs(dmod) >> 1;
 436                size_t ycnt = cnt >> (16 - shift);
 437                cnt = 1 << (16 - shift);
 438                bfin_write_MDMA_D_Y_COUNT(ycnt);
 439                bfin_write_MDMA_S_Y_COUNT(ycnt);
 440                bfin_write_MDMA_D_Y_MODIFY(dmod);
 441                bfin_write_MDMA_S_Y_MODIFY(smod);
 442        }
 443
 444        bfin_write_MDMA_D_START_ADDR(daddr);
 445        bfin_write_MDMA_D_X_COUNT(cnt);
 446        bfin_write_MDMA_D_X_MODIFY(dmod);
 447        bfin_write_MDMA_D_IRQ_STATUS(DMA_DONE | DMA_ERR);
 448
 449        bfin_write_MDMA_S_START_ADDR(saddr);
 450        bfin_write_MDMA_S_X_COUNT(cnt);
 451        bfin_write_MDMA_S_X_MODIFY(smod);
 452        bfin_write_MDMA_S_IRQ_STATUS(DMA_DONE | DMA_ERR);
 453
 454        bfin_write_MDMA_S_CONFIG(DMAEN | conf);
 455        if (conf & DMA2D)
 456                bfin_write_MDMA_D_CONFIG(WNR | DI_EN_Y | DMAEN | conf);
 457        else
 458                bfin_write_MDMA_D_CONFIG(WNR | DI_EN_X | DMAEN | conf);
 459
 460        spin_unlock_irqrestore(&mdma_lock, flags);
 461
 462        SSYNC();
 463
 464        while (!(bfin_read_MDMA_D_IRQ_STATUS() & DMA_DONE))
 465                if (bfin_read_MDMA_S_CONFIG())
 466                        continue;
 467                else
 468                        return;
 469
 470        bfin_write_MDMA_D_IRQ_STATUS(DMA_DONE | DMA_ERR);
 471
 472        bfin_write_MDMA_S_CONFIG(0);
 473        bfin_write_MDMA_D_CONFIG(0);
 474}
 475
 476/**
 477 *      _dma_memcpy - translate C memcpy settings into MDMA settings
 478 *
 479 * Handle all the high level steps before we touch the MDMA registers.  So
 480 * handle direction, tweaking of sizes, and formatting of addresses.
 481 */
 482static void *_dma_memcpy(void *pdst, const void *psrc, size_t size)
 483{
 484        u32 conf, shift;
 485        s16 mod;
 486        unsigned long dst = (unsigned long)pdst;
 487        unsigned long src = (unsigned long)psrc;
 488
 489        if (size == 0)
 490                return NULL;
 491
 492        if (dst % 4 == 0 && src % 4 == 0 && size % 4 == 0) {
 493                conf = WDSIZE_32;
 494                shift = 2;
 495        } else if (dst % 2 == 0 && src % 2 == 0 && size % 2 == 0) {
 496                conf = WDSIZE_16;
 497                shift = 1;
 498        } else {
 499                conf = WDSIZE_8;
 500                shift = 0;
 501        }
 502
 503        /* If the two memory regions have a chance of overlapping, make
 504         * sure the memcpy still works as expected.  Do this by having the
 505         * copy run backwards instead.
 506         */
 507        mod = 1 << shift;
 508        if (src < dst) {
 509                mod *= -1;
 510                dst += size + mod;
 511                src += size + mod;
 512        }
 513        size >>= shift;
 514
 515#ifndef DMA_MMR_SIZE_32
 516        if (size > 0x10000)
 517                conf |= DMA2D;
 518#endif
 519
 520        __dma_memcpy(dst, mod, src, mod, size, conf);
 521
 522        return pdst;
 523}
 524
 525/**
 526 *      dma_memcpy - DMA memcpy under mutex lock
 527 *
 528 * Do not check arguments before starting the DMA memcpy.  Break the transfer
 529 * up into two pieces.  The first transfer is in multiples of 64k and the
 530 * second transfer is the piece smaller than 64k.
 531 */
 532void *dma_memcpy(void *pdst, const void *psrc, size_t size)
 533{
 534        unsigned long dst = (unsigned long)pdst;
 535        unsigned long src = (unsigned long)psrc;
 536
 537        if (bfin_addr_dcacheable(src))
 538                blackfin_dcache_flush_range(src, src + size);
 539
 540        if (bfin_addr_dcacheable(dst))
 541                blackfin_dcache_invalidate_range(dst, dst + size);
 542
 543        return dma_memcpy_nocache(pdst, psrc, size);
 544}
 545EXPORT_SYMBOL(dma_memcpy);
 546
 547/**
 548 *      dma_memcpy_nocache - DMA memcpy under mutex lock
 549 *      - No cache flush/invalidate
 550 *
 551 * Do not check arguments before starting the DMA memcpy.  Break the transfer
 552 * up into two pieces.  The first transfer is in multiples of 64k and the
 553 * second transfer is the piece smaller than 64k.
 554 */
 555void *dma_memcpy_nocache(void *pdst, const void *psrc, size_t size)
 556{
 557#ifdef DMA_MMR_SIZE_32
 558        _dma_memcpy(pdst, psrc, size);
 559#else
 560        size_t bulk, rest;
 561
 562        bulk = size & ~0xffff;
 563        rest = size - bulk;
 564        if (bulk)
 565                _dma_memcpy(pdst, psrc, bulk);
 566        _dma_memcpy(pdst + bulk, psrc + bulk, rest);
 567#endif
 568        return pdst;
 569}
 570EXPORT_SYMBOL(dma_memcpy_nocache);
 571
 572/**
 573 *      safe_dma_memcpy - DMA memcpy w/argument checking
 574 *
 575 * Verify arguments are safe before heading to dma_memcpy().
 576 */
 577void *safe_dma_memcpy(void *dst, const void *src, size_t size)
 578{
 579        if (!access_ok(VERIFY_WRITE, dst, size))
 580                return NULL;
 581        if (!access_ok(VERIFY_READ, src, size))
 582                return NULL;
 583        return dma_memcpy(dst, src, size);
 584}
 585EXPORT_SYMBOL(safe_dma_memcpy);
 586
 587static void _dma_out(unsigned long addr, unsigned long buf, unsigned DMA_MMR_SIZE_TYPE len,
 588                     u16 size, u16 dma_size)
 589{
 590        blackfin_dcache_flush_range(buf, buf + len * size);
 591        __dma_memcpy(addr, 0, buf, size, len, dma_size);
 592}
 593
 594static void _dma_in(unsigned long addr, unsigned long buf, unsigned DMA_MMR_SIZE_TYPE len,
 595                    u16 size, u16 dma_size)
 596{
 597        blackfin_dcache_invalidate_range(buf, buf + len * size);
 598        __dma_memcpy(buf, size, addr, 0, len, dma_size);
 599}
 600
 601#define MAKE_DMA_IO(io, bwl, isize, dmasize, cnst) \
 602void dma_##io##s##bwl(unsigned long addr, cnst void *buf, unsigned DMA_MMR_SIZE_TYPE len) \
 603{ \
 604        _dma_##io(addr, (unsigned long)buf, len, isize, WDSIZE_##dmasize); \
 605} \
 606EXPORT_SYMBOL(dma_##io##s##bwl)
 607MAKE_DMA_IO(out, b, 1,  8, const);
 608MAKE_DMA_IO(in,  b, 1,  8, );
 609MAKE_DMA_IO(out, w, 2, 16, const);
 610MAKE_DMA_IO(in,  w, 2, 16, );
 611MAKE_DMA_IO(out, l, 4, 32, const);
 612MAKE_DMA_IO(in,  l, 4, 32, );
 613