linux/arch/mips/alchemy/common/dbdma.c
<<
>>
Prefs
   1/*
   2 *
   3 * BRIEF MODULE DESCRIPTION
   4 *      The Descriptor Based DMA channel manager that first appeared
   5 *      on the Au1550.  I started with dma.c, but I think all that is
   6 *      left is this initial comment :-)
   7 *
   8 * Copyright 2004 Embedded Edge, LLC
   9 *      dan@embeddededge.com
  10 *
  11 *  This program is free software; you can redistribute  it and/or modify it
  12 *  under  the terms of  the GNU General  Public License as published by the
  13 *  Free Software Foundation;  either version 2 of the  License, or (at your
  14 *  option) any later version.
  15 *
  16 *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
  17 *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
  18 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
  19 *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
  20 *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  21 *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
  22 *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  23 *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
  24 *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  25 *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  26 *
  27 *  You should have received a copy of the  GNU General Public License along
  28 *  with this program; if not, write  to the Free Software Foundation, Inc.,
  29 *  675 Mass Ave, Cambridge, MA 02139, USA.
  30 *
  31 */
  32
  33#include <linux/init.h>
  34#include <linux/kernel.h>
  35#include <linux/slab.h>
  36#include <linux/spinlock.h>
  37#include <linux/interrupt.h>
  38#include <linux/module.h>
  39#include <linux/syscore_ops.h>
  40#include <asm/mach-au1x00/au1000.h>
  41#include <asm/mach-au1x00/au1xxx_dbdma.h>
  42
  43/*
  44 * The Descriptor Based DMA supports up to 16 channels.
  45 *
  46 * There are 32 devices defined. We keep an internal structure
  47 * of devices using these channels, along with additional
  48 * information.
  49 *
  50 * We allocate the descriptors and allow access to them through various
  51 * functions.  The drivers allocate the data buffers and assign them
  52 * to the descriptors.
  53 */
  54static DEFINE_SPINLOCK(au1xxx_dbdma_spin_lock);
  55
  56/* I couldn't find a macro that did this... */
  57#define ALIGN_ADDR(x, a)        ((((u32)(x)) + (a-1)) & ~(a-1))
  58
  59static dbdma_global_t *dbdma_gptr =
  60                        (dbdma_global_t *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
  61static int dbdma_initialized;
  62
  63static dbdev_tab_t *dbdev_tab;
  64
  65static dbdev_tab_t au1550_dbdev_tab[] __initdata = {
  66        /* UARTS */
  67        { AU1550_DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8, 0x11100004, 0, 0 },
  68        { AU1550_DSCR_CMD0_UART0_RX, DEV_FLAGS_IN,  0, 8, 0x11100000, 0, 0 },
  69        { AU1550_DSCR_CMD0_UART3_TX, DEV_FLAGS_OUT, 0, 8, 0x11400004, 0, 0 },
  70        { AU1550_DSCR_CMD0_UART3_RX, DEV_FLAGS_IN,  0, 8, 0x11400000, 0, 0 },
  71
  72        /* EXT DMA */
  73        { AU1550_DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 },
  74        { AU1550_DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 },
  75        { AU1550_DSCR_CMD0_DMA_REQ2, 0, 0, 0, 0x00000000, 0, 0 },
  76        { AU1550_DSCR_CMD0_DMA_REQ3, 0, 0, 0, 0x00000000, 0, 0 },
  77
  78        /* USB DEV */
  79        { AU1550_DSCR_CMD0_USBDEV_RX0, DEV_FLAGS_IN,  4, 8, 0x10200000, 0, 0 },
  80        { AU1550_DSCR_CMD0_USBDEV_TX0, DEV_FLAGS_OUT, 4, 8, 0x10200004, 0, 0 },
  81        { AU1550_DSCR_CMD0_USBDEV_TX1, DEV_FLAGS_OUT, 4, 8, 0x10200008, 0, 0 },
  82        { AU1550_DSCR_CMD0_USBDEV_TX2, DEV_FLAGS_OUT, 4, 8, 0x1020000c, 0, 0 },
  83        { AU1550_DSCR_CMD0_USBDEV_RX3, DEV_FLAGS_IN,  4, 8, 0x10200010, 0, 0 },
  84        { AU1550_DSCR_CMD0_USBDEV_RX4, DEV_FLAGS_IN,  4, 8, 0x10200014, 0, 0 },
  85
  86        /* PSCs */
  87        { AU1550_DSCR_CMD0_PSC0_TX, DEV_FLAGS_OUT, 0, 0, 0x11a0001c, 0, 0 },
  88        { AU1550_DSCR_CMD0_PSC0_RX, DEV_FLAGS_IN,  0, 0, 0x11a0001c, 0, 0 },
  89        { AU1550_DSCR_CMD0_PSC1_TX, DEV_FLAGS_OUT, 0, 0, 0x11b0001c, 0, 0 },
  90        { AU1550_DSCR_CMD0_PSC1_RX, DEV_FLAGS_IN,  0, 0, 0x11b0001c, 0, 0 },
  91        { AU1550_DSCR_CMD0_PSC2_TX, DEV_FLAGS_OUT, 0, 0, 0x10a0001c, 0, 0 },
  92        { AU1550_DSCR_CMD0_PSC2_RX, DEV_FLAGS_IN,  0, 0, 0x10a0001c, 0, 0 },
  93        { AU1550_DSCR_CMD0_PSC3_TX, DEV_FLAGS_OUT, 0, 0, 0x10b0001c, 0, 0 },
  94        { AU1550_DSCR_CMD0_PSC3_RX, DEV_FLAGS_IN,  0, 0, 0x10b0001c, 0, 0 },
  95
  96        { AU1550_DSCR_CMD0_PCI_WRITE,  0, 0, 0, 0x00000000, 0, 0 },  /* PCI */
  97        { AU1550_DSCR_CMD0_NAND_FLASH, 0, 0, 0, 0x00000000, 0, 0 }, /* NAND */
  98
  99        /* MAC 0 */
 100        { AU1550_DSCR_CMD0_MAC0_RX, DEV_FLAGS_IN,  0, 0, 0x00000000, 0, 0 },
 101        { AU1550_DSCR_CMD0_MAC0_TX, DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 },
 102
 103        /* MAC 1 */
 104        { AU1550_DSCR_CMD0_MAC1_RX, DEV_FLAGS_IN,  0, 0, 0x00000000, 0, 0 },
 105        { AU1550_DSCR_CMD0_MAC1_TX, DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 },
 106
 107        { DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 108        { DSCR_CMD0_ALWAYS,   DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 109};
 110
 111static dbdev_tab_t au1200_dbdev_tab[] __initdata = {
 112        { AU1200_DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8, 0x11100004, 0, 0 },
 113        { AU1200_DSCR_CMD0_UART0_RX, DEV_FLAGS_IN,  0, 8, 0x11100000, 0, 0 },
 114        { AU1200_DSCR_CMD0_UART1_TX, DEV_FLAGS_OUT, 0, 8, 0x11200004, 0, 0 },
 115        { AU1200_DSCR_CMD0_UART1_RX, DEV_FLAGS_IN,  0, 8, 0x11200000, 0, 0 },
 116
 117        { AU1200_DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 },
 118        { AU1200_DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 },
 119
 120        { AU1200_DSCR_CMD0_MAE_BE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 121        { AU1200_DSCR_CMD0_MAE_FE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 122        { AU1200_DSCR_CMD0_MAE_BOTH, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 123        { AU1200_DSCR_CMD0_LCD, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 124
 125        { AU1200_DSCR_CMD0_SDMS_TX0, DEV_FLAGS_OUT, 4, 8, 0x10600000, 0, 0 },
 126        { AU1200_DSCR_CMD0_SDMS_RX0, DEV_FLAGS_IN,  4, 8, 0x10600004, 0, 0 },
 127        { AU1200_DSCR_CMD0_SDMS_TX1, DEV_FLAGS_OUT, 4, 8, 0x10680000, 0, 0 },
 128        { AU1200_DSCR_CMD0_SDMS_RX1, DEV_FLAGS_IN,  4, 8, 0x10680004, 0, 0 },
 129
 130        { AU1200_DSCR_CMD0_AES_RX, DEV_FLAGS_IN , 4, 32, 0x10300008, 0, 0 },
 131        { AU1200_DSCR_CMD0_AES_TX, DEV_FLAGS_OUT, 4, 32, 0x10300004, 0, 0 },
 132
 133        { AU1200_DSCR_CMD0_PSC0_TX,   DEV_FLAGS_OUT, 0, 16, 0x11a0001c, 0, 0 },
 134        { AU1200_DSCR_CMD0_PSC0_RX,   DEV_FLAGS_IN,  0, 16, 0x11a0001c, 0, 0 },
 135        { AU1200_DSCR_CMD0_PSC0_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 136        { AU1200_DSCR_CMD0_PSC1_TX,   DEV_FLAGS_OUT, 0, 16, 0x11b0001c, 0, 0 },
 137        { AU1200_DSCR_CMD0_PSC1_RX,   DEV_FLAGS_IN,  0, 16, 0x11b0001c, 0, 0 },
 138        { AU1200_DSCR_CMD0_PSC1_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 139
 140        { AU1200_DSCR_CMD0_CIM_RXA,  DEV_FLAGS_IN, 0, 32, 0x14004020, 0, 0 },
 141        { AU1200_DSCR_CMD0_CIM_RXB,  DEV_FLAGS_IN, 0, 32, 0x14004040, 0, 0 },
 142        { AU1200_DSCR_CMD0_CIM_RXC,  DEV_FLAGS_IN, 0, 32, 0x14004060, 0, 0 },
 143        { AU1200_DSCR_CMD0_CIM_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 144
 145        { AU1200_DSCR_CMD0_NAND_FLASH, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
 146
 147        { DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 148        { DSCR_CMD0_ALWAYS,   DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 149};
 150
 151static dbdev_tab_t au1300_dbdev_tab[] __initdata = {
 152        { AU1300_DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8,  0x10100004, 0, 0 },
 153        { AU1300_DSCR_CMD0_UART0_RX, DEV_FLAGS_IN,  0, 8,  0x10100000, 0, 0 },
 154        { AU1300_DSCR_CMD0_UART1_TX, DEV_FLAGS_OUT, 0, 8,  0x10101004, 0, 0 },
 155        { AU1300_DSCR_CMD0_UART1_RX, DEV_FLAGS_IN,  0, 8,  0x10101000, 0, 0 },
 156        { AU1300_DSCR_CMD0_UART2_TX, DEV_FLAGS_OUT, 0, 8,  0x10102004, 0, 0 },
 157        { AU1300_DSCR_CMD0_UART2_RX, DEV_FLAGS_IN,  0, 8,  0x10102000, 0, 0 },
 158        { AU1300_DSCR_CMD0_UART3_TX, DEV_FLAGS_OUT, 0, 8,  0x10103004, 0, 0 },
 159        { AU1300_DSCR_CMD0_UART3_RX, DEV_FLAGS_IN,  0, 8,  0x10103000, 0, 0 },
 160
 161        { AU1300_DSCR_CMD0_SDMS_TX0, DEV_FLAGS_OUT, 4, 8,  0x10600000, 0, 0 },
 162        { AU1300_DSCR_CMD0_SDMS_RX0, DEV_FLAGS_IN,  4, 8,  0x10600004, 0, 0 },
 163        { AU1300_DSCR_CMD0_SDMS_TX1, DEV_FLAGS_OUT, 8, 8,  0x10601000, 0, 0 },
 164        { AU1300_DSCR_CMD0_SDMS_RX1, DEV_FLAGS_IN,  8, 8,  0x10601004, 0, 0 },
 165
 166        { AU1300_DSCR_CMD0_AES_RX, DEV_FLAGS_IN ,   4, 32, 0x10300008, 0, 0 },
 167        { AU1300_DSCR_CMD0_AES_TX, DEV_FLAGS_OUT,   4, 32, 0x10300004, 0, 0 },
 168
 169        { AU1300_DSCR_CMD0_PSC0_TX, DEV_FLAGS_OUT,  0, 16, 0x10a0001c, 0, 0 },
 170        { AU1300_DSCR_CMD0_PSC0_RX, DEV_FLAGS_IN,   0, 16, 0x10a0001c, 0, 0 },
 171        { AU1300_DSCR_CMD0_PSC1_TX, DEV_FLAGS_OUT,  0, 16, 0x10a0101c, 0, 0 },
 172        { AU1300_DSCR_CMD0_PSC1_RX, DEV_FLAGS_IN,   0, 16, 0x10a0101c, 0, 0 },
 173        { AU1300_DSCR_CMD0_PSC2_TX, DEV_FLAGS_OUT,  0, 16, 0x10a0201c, 0, 0 },
 174        { AU1300_DSCR_CMD0_PSC2_RX, DEV_FLAGS_IN,   0, 16, 0x10a0201c, 0, 0 },
 175        { AU1300_DSCR_CMD0_PSC3_TX, DEV_FLAGS_OUT,  0, 16, 0x10a0301c, 0, 0 },
 176        { AU1300_DSCR_CMD0_PSC3_RX, DEV_FLAGS_IN,   0, 16, 0x10a0301c, 0, 0 },
 177
 178        { AU1300_DSCR_CMD0_LCD, DEV_FLAGS_ANYUSE,   0, 0,  0x00000000, 0, 0 },
 179        { AU1300_DSCR_CMD0_NAND_FLASH, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
 180
 181        { AU1300_DSCR_CMD0_SDMS_TX2, DEV_FLAGS_OUT, 4, 8,  0x10602000, 0, 0 },
 182        { AU1300_DSCR_CMD0_SDMS_RX2, DEV_FLAGS_IN,  4, 8,  0x10602004, 0, 0 },
 183
 184        { AU1300_DSCR_CMD0_CIM_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 185
 186        { AU1300_DSCR_CMD0_UDMA, DEV_FLAGS_ANYUSE,  0, 32, 0x14001810, 0, 0 },
 187
 188        { AU1300_DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 },
 189        { AU1300_DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 },
 190
 191        { DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 192        { DSCR_CMD0_ALWAYS,   DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
 193};
 194
 195/* 32 predefined plus 32 custom */
 196#define DBDEV_TAB_SIZE          64
 197
 198static chan_tab_t *chan_tab_ptr[NUM_DBDMA_CHANS];
 199
 200static dbdev_tab_t *find_dbdev_id(u32 id)
 201{
 202        int i;
 203        dbdev_tab_t *p;
 204        for (i = 0; i < DBDEV_TAB_SIZE; ++i) {
 205                p = &dbdev_tab[i];
 206                if (p->dev_id == id)
 207                        return p;
 208        }
 209        return NULL;
 210}
 211
 212void *au1xxx_ddma_get_nextptr_virt(au1x_ddma_desc_t *dp)
 213{
 214        return phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
 215}
 216EXPORT_SYMBOL(au1xxx_ddma_get_nextptr_virt);
 217
 218u32 au1xxx_ddma_add_device(dbdev_tab_t *dev)
 219{
 220        u32 ret = 0;
 221        dbdev_tab_t *p;
 222        static u16 new_id = 0x1000;
 223
 224        p = find_dbdev_id(~0);
 225        if (NULL != p) {
 226                memcpy(p, dev, sizeof(dbdev_tab_t));
 227                p->dev_id = DSCR_DEV2CUSTOM_ID(new_id, dev->dev_id);
 228                ret = p->dev_id;
 229                new_id++;
 230#if 0
 231                printk(KERN_DEBUG "add_device: id:%x flags:%x padd:%x\n",
 232                                  p->dev_id, p->dev_flags, p->dev_physaddr);
 233#endif
 234        }
 235
 236        return ret;
 237}
 238EXPORT_SYMBOL(au1xxx_ddma_add_device);
 239
 240void au1xxx_ddma_del_device(u32 devid)
 241{
 242        dbdev_tab_t *p = find_dbdev_id(devid);
 243
 244        if (p != NULL) {
 245                memset(p, 0, sizeof(dbdev_tab_t));
 246                p->dev_id = ~0;
 247        }
 248}
 249EXPORT_SYMBOL(au1xxx_ddma_del_device);
 250
 251/* Allocate a channel and return a non-zero descriptor if successful. */
 252u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
 253       void (*callback)(int, void *), void *callparam)
 254{
 255        unsigned long   flags;
 256        u32             used, chan;
 257        u32             dcp;
 258        int             i;
 259        dbdev_tab_t     *stp, *dtp;
 260        chan_tab_t      *ctp;
 261        au1x_dma_chan_t *cp;
 262
 263        /*
 264         * We do the intialization on the first channel allocation.
 265         * We have to wait because of the interrupt handler initialization
 266         * which can't be done successfully during board set up.
 267         */
 268        if (!dbdma_initialized)
 269                return 0;
 270
 271        stp = find_dbdev_id(srcid);
 272        if (stp == NULL)
 273                return 0;
 274        dtp = find_dbdev_id(destid);
 275        if (dtp == NULL)
 276                return 0;
 277
 278        used = 0;
 279
 280        /* Check to see if we can get both channels. */
 281        spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags);
 282        if (!(stp->dev_flags & DEV_FLAGS_INUSE) ||
 283             (stp->dev_flags & DEV_FLAGS_ANYUSE)) {
 284                /* Got source */
 285                stp->dev_flags |= DEV_FLAGS_INUSE;
 286                if (!(dtp->dev_flags & DEV_FLAGS_INUSE) ||
 287                     (dtp->dev_flags & DEV_FLAGS_ANYUSE)) {
 288                        /* Got destination */
 289                        dtp->dev_flags |= DEV_FLAGS_INUSE;
 290                } else {
 291                        /* Can't get dest.  Release src. */
 292                        stp->dev_flags &= ~DEV_FLAGS_INUSE;
 293                        used++;
 294                }
 295        } else
 296                used++;
 297        spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags);
 298
 299        if (used)
 300                return 0;
 301
 302        /* Let's see if we can allocate a channel for it. */
 303        ctp = NULL;
 304        chan = 0;
 305        spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags);
 306        for (i = 0; i < NUM_DBDMA_CHANS; i++)
 307                if (chan_tab_ptr[i] == NULL) {
 308                        /*
 309                         * If kmalloc fails, it is caught below same
 310                         * as a channel not available.
 311                         */
 312                        ctp = kmalloc(sizeof(chan_tab_t), GFP_ATOMIC);
 313                        chan_tab_ptr[i] = ctp;
 314                        break;
 315                }
 316        spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags);
 317
 318        if (ctp != NULL) {
 319                memset(ctp, 0, sizeof(chan_tab_t));
 320                ctp->chan_index = chan = i;
 321                dcp = KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
 322                dcp += (0x0100 * chan);
 323                ctp->chan_ptr = (au1x_dma_chan_t *)dcp;
 324                cp = (au1x_dma_chan_t *)dcp;
 325                ctp->chan_src = stp;
 326                ctp->chan_dest = dtp;
 327                ctp->chan_callback = callback;
 328                ctp->chan_callparam = callparam;
 329
 330                /* Initialize channel configuration. */
 331                i = 0;
 332                if (stp->dev_intlevel)
 333                        i |= DDMA_CFG_SED;
 334                if (stp->dev_intpolarity)
 335                        i |= DDMA_CFG_SP;
 336                if (dtp->dev_intlevel)
 337                        i |= DDMA_CFG_DED;
 338                if (dtp->dev_intpolarity)
 339                        i |= DDMA_CFG_DP;
 340                if ((stp->dev_flags & DEV_FLAGS_SYNC) ||
 341                        (dtp->dev_flags & DEV_FLAGS_SYNC))
 342                                i |= DDMA_CFG_SYNC;
 343                cp->ddma_cfg = i;
 344                wmb(); /* drain writebuffer */
 345
 346                /*
 347                 * Return a non-zero value that can be used to find the channel
 348                 * information in subsequent operations.
 349                 */
 350                return (u32)(&chan_tab_ptr[chan]);
 351        }
 352
 353        /* Release devices */
 354        stp->dev_flags &= ~DEV_FLAGS_INUSE;
 355        dtp->dev_flags &= ~DEV_FLAGS_INUSE;
 356
 357        return 0;
 358}
 359EXPORT_SYMBOL(au1xxx_dbdma_chan_alloc);
 360
 361/*
 362 * Set the device width if source or destination is a FIFO.
 363 * Should be 8, 16, or 32 bits.
 364 */
 365u32 au1xxx_dbdma_set_devwidth(u32 chanid, int bits)
 366{
 367        u32             rv;
 368        chan_tab_t      *ctp;
 369        dbdev_tab_t     *stp, *dtp;
 370
 371        ctp = *((chan_tab_t **)chanid);
 372        stp = ctp->chan_src;
 373        dtp = ctp->chan_dest;
 374        rv = 0;
 375
 376        if (stp->dev_flags & DEV_FLAGS_IN) {    /* Source in fifo */
 377                rv = stp->dev_devwidth;
 378                stp->dev_devwidth = bits;
 379        }
 380        if (dtp->dev_flags & DEV_FLAGS_OUT) {   /* Destination out fifo */
 381                rv = dtp->dev_devwidth;
 382                dtp->dev_devwidth = bits;
 383        }
 384
 385        return rv;
 386}
 387EXPORT_SYMBOL(au1xxx_dbdma_set_devwidth);
 388
 389/* Allocate a descriptor ring, initializing as much as possible. */
 390u32 au1xxx_dbdma_ring_alloc(u32 chanid, int entries)
 391{
 392        int                     i;
 393        u32                     desc_base, srcid, destid;
 394        u32                     cmd0, cmd1, src1, dest1;
 395        u32                     src0, dest0;
 396        chan_tab_t              *ctp;
 397        dbdev_tab_t             *stp, *dtp;
 398        au1x_ddma_desc_t        *dp;
 399
 400        /*
 401         * I guess we could check this to be within the
 402         * range of the table......
 403         */
 404        ctp = *((chan_tab_t **)chanid);
 405        stp = ctp->chan_src;
 406        dtp = ctp->chan_dest;
 407
 408        /*
 409         * The descriptors must be 32-byte aligned.  There is a
 410         * possibility the allocation will give us such an address,
 411         * and if we try that first we are likely to not waste larger
 412         * slabs of memory.
 413         */
 414        desc_base = (u32)kmalloc(entries * sizeof(au1x_ddma_desc_t),
 415                                 GFP_KERNEL|GFP_DMA);
 416        if (desc_base == 0)
 417                return 0;
 418
 419        if (desc_base & 0x1f) {
 420                /*
 421                 * Lost....do it again, allocate extra, and round
 422                 * the address base.
 423                 */
 424                kfree((const void *)desc_base);
 425                i = entries * sizeof(au1x_ddma_desc_t);
 426                i += (sizeof(au1x_ddma_desc_t) - 1);
 427                desc_base = (u32)kmalloc(i, GFP_KERNEL|GFP_DMA);
 428                if (desc_base == 0)
 429                        return 0;
 430
 431                ctp->cdb_membase = desc_base;
 432                desc_base = ALIGN_ADDR(desc_base, sizeof(au1x_ddma_desc_t));
 433        } else
 434                ctp->cdb_membase = desc_base;
 435
 436        dp = (au1x_ddma_desc_t *)desc_base;
 437
 438        /* Keep track of the base descriptor. */
 439        ctp->chan_desc_base = dp;
 440
 441        /* Initialize the rings with as much information as we know. */
 442        srcid = stp->dev_id;
 443        destid = dtp->dev_id;
 444
 445        cmd0 = cmd1 = src1 = dest1 = 0;
 446        src0 = dest0 = 0;
 447
 448        cmd0 |= DSCR_CMD0_SID(srcid);
 449        cmd0 |= DSCR_CMD0_DID(destid);
 450        cmd0 |= DSCR_CMD0_IE | DSCR_CMD0_CV;
 451        cmd0 |= DSCR_CMD0_ST(DSCR_CMD0_ST_NOCHANGE);
 452
 453        /* Is it mem to mem transfer? */
 454        if (((DSCR_CUSTOM2DEV_ID(srcid) == DSCR_CMD0_THROTTLE) ||
 455             (DSCR_CUSTOM2DEV_ID(srcid) == DSCR_CMD0_ALWAYS)) &&
 456            ((DSCR_CUSTOM2DEV_ID(destid) == DSCR_CMD0_THROTTLE) ||
 457             (DSCR_CUSTOM2DEV_ID(destid) == DSCR_CMD0_ALWAYS)))
 458                cmd0 |= DSCR_CMD0_MEM;
 459
 460        switch (stp->dev_devwidth) {
 461        case 8:
 462                cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_BYTE);
 463                break;
 464        case 16:
 465                cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_HALFWORD);
 466                break;
 467        case 32:
 468        default:
 469                cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_WORD);
 470                break;
 471        }
 472
 473        switch (dtp->dev_devwidth) {
 474        case 8:
 475                cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_BYTE);
 476                break;
 477        case 16:
 478                cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_HALFWORD);
 479                break;
 480        case 32:
 481        default:
 482                cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_WORD);
 483                break;
 484        }
 485
 486        /*
 487         * If the device is marked as an in/out FIFO, ensure it is
 488         * set non-coherent.
 489         */
 490        if (stp->dev_flags & DEV_FLAGS_IN)
 491                cmd0 |= DSCR_CMD0_SN;           /* Source in FIFO */
 492        if (dtp->dev_flags & DEV_FLAGS_OUT)
 493                cmd0 |= DSCR_CMD0_DN;           /* Destination out FIFO */
 494
 495        /*
 496         * Set up source1.  For now, assume no stride and increment.
 497         * A channel attribute update can change this later.
 498         */
 499        switch (stp->dev_tsize) {
 500        case 1:
 501                src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE1);
 502                break;
 503        case 2:
 504                src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE2);
 505                break;
 506        case 4:
 507                src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE4);
 508                break;
 509        case 8:
 510        default:
 511                src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE8);
 512                break;
 513        }
 514
 515        /* If source input is FIFO, set static address. */
 516        if (stp->dev_flags & DEV_FLAGS_IN) {
 517                if (stp->dev_flags & DEV_FLAGS_BURSTABLE)
 518                        src1 |= DSCR_SRC1_SAM(DSCR_xAM_BURST);
 519                else
 520                        src1 |= DSCR_SRC1_SAM(DSCR_xAM_STATIC);
 521        }
 522
 523        if (stp->dev_physaddr)
 524                src0 = stp->dev_physaddr;
 525
 526        /*
 527         * Set up dest1.  For now, assume no stride and increment.
 528         * A channel attribute update can change this later.
 529         */
 530        switch (dtp->dev_tsize) {
 531        case 1:
 532                dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE1);
 533                break;
 534        case 2:
 535                dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE2);
 536                break;
 537        case 4:
 538                dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE4);
 539                break;
 540        case 8:
 541        default:
 542                dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE8);
 543                break;
 544        }
 545
 546        /* If destination output is FIFO, set static address. */
 547        if (dtp->dev_flags & DEV_FLAGS_OUT) {
 548                if (dtp->dev_flags & DEV_FLAGS_BURSTABLE)
 549                        dest1 |= DSCR_DEST1_DAM(DSCR_xAM_BURST);
 550                else
 551                        dest1 |= DSCR_DEST1_DAM(DSCR_xAM_STATIC);
 552        }
 553
 554        if (dtp->dev_physaddr)
 555                dest0 = dtp->dev_physaddr;
 556
 557#if 0
 558                printk(KERN_DEBUG "did:%x sid:%x cmd0:%x cmd1:%x source0:%x "
 559                                  "source1:%x dest0:%x dest1:%x\n",
 560                                  dtp->dev_id, stp->dev_id, cmd0, cmd1, src0,
 561                                  src1, dest0, dest1);
 562#endif
 563        for (i = 0; i < entries; i++) {
 564                dp->dscr_cmd0 = cmd0;
 565                dp->dscr_cmd1 = cmd1;
 566                dp->dscr_source0 = src0;
 567                dp->dscr_source1 = src1;
 568                dp->dscr_dest0 = dest0;
 569                dp->dscr_dest1 = dest1;
 570                dp->dscr_stat = 0;
 571                dp->sw_context = 0;
 572                dp->sw_status = 0;
 573                dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(dp + 1));
 574                dp++;
 575        }
 576
 577        /* Make last descrptor point to the first. */
 578        dp--;
 579        dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(ctp->chan_desc_base));
 580        ctp->get_ptr = ctp->put_ptr = ctp->cur_ptr = ctp->chan_desc_base;
 581
 582        return (u32)ctp->chan_desc_base;
 583}
 584EXPORT_SYMBOL(au1xxx_dbdma_ring_alloc);
 585
 586/*
 587 * Put a source buffer into the DMA ring.
 588 * This updates the source pointer and byte count.  Normally used
 589 * for memory to fifo transfers.
 590 */
 591u32 au1xxx_dbdma_put_source(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
 592{
 593        chan_tab_t              *ctp;
 594        au1x_ddma_desc_t        *dp;
 595
 596        /*
 597         * I guess we could check this to be within the
 598         * range of the table......
 599         */
 600        ctp = *(chan_tab_t **)chanid;
 601
 602        /*
 603         * We should have multiple callers for a particular channel,
 604         * an interrupt doesn't affect this pointer nor the descriptor,
 605         * so no locking should be needed.
 606         */
 607        dp = ctp->put_ptr;
 608
 609        /*
 610         * If the descriptor is valid, we are way ahead of the DMA
 611         * engine, so just return an error condition.
 612         */
 613        if (dp->dscr_cmd0 & DSCR_CMD0_V)
 614                return 0;
 615
 616        /* Load up buffer address and byte count. */
 617        dp->dscr_source0 = buf & ~0UL;
 618        dp->dscr_cmd1 = nbytes;
 619        /* Check flags */
 620        if (flags & DDMA_FLAGS_IE)
 621                dp->dscr_cmd0 |= DSCR_CMD0_IE;
 622        if (flags & DDMA_FLAGS_NOIE)
 623                dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
 624
 625        /*
 626         * There is an errata on the Au1200/Au1550 parts that could result
 627         * in "stale" data being DMA'ed. It has to do with the snoop logic on
 628         * the cache eviction buffer.  DMA_NONCOHERENT is on by default for
 629         * these parts. If it is fixed in the future, these dma_cache_inv will
 630         * just be nothing more than empty macros. See io.h.
 631         */
 632        dma_cache_wback_inv((unsigned long)buf, nbytes);
 633        dp->dscr_cmd0 |= DSCR_CMD0_V;   /* Let it rip */
 634        wmb(); /* drain writebuffer */
 635        dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
 636        ctp->chan_ptr->ddma_dbell = 0;
 637
 638        /* Get next descriptor pointer. */
 639        ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
 640
 641        /* Return something non-zero. */
 642        return nbytes;
 643}
 644EXPORT_SYMBOL(au1xxx_dbdma_put_source);
 645
 646/* Put a destination buffer into the DMA ring.
 647 * This updates the destination pointer and byte count.  Normally used
 648 * to place an empty buffer into the ring for fifo to memory transfers.
 649 */
 650u32 au1xxx_dbdma_put_dest(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
 651{
 652        chan_tab_t              *ctp;
 653        au1x_ddma_desc_t        *dp;
 654
 655        /* I guess we could check this to be within the
 656         * range of the table......
 657         */
 658        ctp = *((chan_tab_t **)chanid);
 659
 660        /* We should have multiple callers for a particular channel,
 661         * an interrupt doesn't affect this pointer nor the descriptor,
 662         * so no locking should be needed.
 663         */
 664        dp = ctp->put_ptr;
 665
 666        /* If the descriptor is valid, we are way ahead of the DMA
 667         * engine, so just return an error condition.
 668         */
 669        if (dp->dscr_cmd0 & DSCR_CMD0_V)
 670                return 0;
 671
 672        /* Load up buffer address and byte count */
 673
 674        /* Check flags  */
 675        if (flags & DDMA_FLAGS_IE)
 676                dp->dscr_cmd0 |= DSCR_CMD0_IE;
 677        if (flags & DDMA_FLAGS_NOIE)
 678                dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
 679
 680        dp->dscr_dest0 = buf & ~0UL;
 681        dp->dscr_cmd1 = nbytes;
 682#if 0
 683        printk(KERN_DEBUG "cmd0:%x cmd1:%x source0:%x source1:%x dest0:%x dest1:%x\n",
 684                          dp->dscr_cmd0, dp->dscr_cmd1, dp->dscr_source0,
 685                          dp->dscr_source1, dp->dscr_dest0, dp->dscr_dest1);
 686#endif
 687        /*
 688         * There is an errata on the Au1200/Au1550 parts that could result in
 689         * "stale" data being DMA'ed. It has to do with the snoop logic on the
 690         * cache eviction buffer.  DMA_NONCOHERENT is on by default for these
 691         * parts. If it is fixed in the future, these dma_cache_inv will just
 692         * be nothing more than empty macros. See io.h.
 693         */
 694        dma_cache_inv((unsigned long)buf, nbytes);
 695        dp->dscr_cmd0 |= DSCR_CMD0_V;   /* Let it rip */
 696        wmb(); /* drain writebuffer */
 697        dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
 698        ctp->chan_ptr->ddma_dbell = 0;
 699
 700        /* Get next descriptor pointer. */
 701        ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
 702
 703        /* Return something non-zero. */
 704        return nbytes;
 705}
 706EXPORT_SYMBOL(au1xxx_dbdma_put_dest);
 707
 708/*
 709 * Get a destination buffer into the DMA ring.
 710 * Normally used to get a full buffer from the ring during fifo
 711 * to memory transfers.  This does not set the valid bit, you will
 712 * have to put another destination buffer to keep the DMA going.
 713 */
 714u32 au1xxx_dbdma_get_dest(u32 chanid, void **buf, int *nbytes)
 715{
 716        chan_tab_t              *ctp;
 717        au1x_ddma_desc_t        *dp;
 718        u32                     rv;
 719
 720        /*
 721         * I guess we could check this to be within the
 722         * range of the table......
 723         */
 724        ctp = *((chan_tab_t **)chanid);
 725
 726        /*
 727         * We should have multiple callers for a particular channel,
 728         * an interrupt doesn't affect this pointer nor the descriptor,
 729         * so no locking should be needed.
 730         */
 731        dp = ctp->get_ptr;
 732
 733        /*
 734         * If the descriptor is valid, we are way ahead of the DMA
 735         * engine, so just return an error condition.
 736         */
 737        if (dp->dscr_cmd0 & DSCR_CMD0_V)
 738                return 0;
 739
 740        /* Return buffer address and byte count. */
 741        *buf = (void *)(phys_to_virt(dp->dscr_dest0));
 742        *nbytes = dp->dscr_cmd1;
 743        rv = dp->dscr_stat;
 744
 745        /* Get next descriptor pointer. */
 746        ctp->get_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
 747
 748        /* Return something non-zero. */
 749        return rv;
 750}
 751EXPORT_SYMBOL_GPL(au1xxx_dbdma_get_dest);
 752
 753void au1xxx_dbdma_stop(u32 chanid)
 754{
 755        chan_tab_t      *ctp;
 756        au1x_dma_chan_t *cp;
 757        int halt_timeout = 0;
 758
 759        ctp = *((chan_tab_t **)chanid);
 760
 761        cp = ctp->chan_ptr;
 762        cp->ddma_cfg &= ~DDMA_CFG_EN;   /* Disable channel */
 763        wmb(); /* drain writebuffer */
 764        while (!(cp->ddma_stat & DDMA_STAT_H)) {
 765                udelay(1);
 766                halt_timeout++;
 767                if (halt_timeout > 100) {
 768                        printk(KERN_WARNING "warning: DMA channel won't halt\n");
 769                        break;
 770                }
 771        }
 772        /* clear current desc valid and doorbell */
 773        cp->ddma_stat |= (DDMA_STAT_DB | DDMA_STAT_V);
 774        wmb(); /* drain writebuffer */
 775}
 776EXPORT_SYMBOL(au1xxx_dbdma_stop);
 777
 778/*
 779 * Start using the current descriptor pointer.  If the DBDMA encounters
 780 * a non-valid descriptor, it will stop.  In this case, we can just
 781 * continue by adding a buffer to the list and starting again.
 782 */
 783void au1xxx_dbdma_start(u32 chanid)
 784{
 785        chan_tab_t      *ctp;
 786        au1x_dma_chan_t *cp;
 787
 788        ctp = *((chan_tab_t **)chanid);
 789        cp = ctp->chan_ptr;
 790        cp->ddma_desptr = virt_to_phys(ctp->cur_ptr);
 791        cp->ddma_cfg |= DDMA_CFG_EN;    /* Enable channel */
 792        wmb(); /* drain writebuffer */
 793        cp->ddma_dbell = 0;
 794        wmb(); /* drain writebuffer */
 795}
 796EXPORT_SYMBOL(au1xxx_dbdma_start);
 797
 798void au1xxx_dbdma_reset(u32 chanid)
 799{
 800        chan_tab_t              *ctp;
 801        au1x_ddma_desc_t        *dp;
 802
 803        au1xxx_dbdma_stop(chanid);
 804
 805        ctp = *((chan_tab_t **)chanid);
 806        ctp->get_ptr = ctp->put_ptr = ctp->cur_ptr = ctp->chan_desc_base;
 807
 808        /* Run through the descriptors and reset the valid indicator. */
 809        dp = ctp->chan_desc_base;
 810
 811        do {
 812                dp->dscr_cmd0 &= ~DSCR_CMD0_V;
 813                /*
 814                 * Reset our software status -- this is used to determine
 815                 * if a descriptor is in use by upper level software. Since
 816                 * posting can reset 'V' bit.
 817                 */
 818                dp->sw_status = 0;
 819                dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
 820        } while (dp != ctp->chan_desc_base);
 821}
 822EXPORT_SYMBOL(au1xxx_dbdma_reset);
 823
 824u32 au1xxx_get_dma_residue(u32 chanid)
 825{
 826        chan_tab_t      *ctp;
 827        au1x_dma_chan_t *cp;
 828        u32             rv;
 829
 830        ctp = *((chan_tab_t **)chanid);
 831        cp = ctp->chan_ptr;
 832
 833        /* This is only valid if the channel is stopped. */
 834        rv = cp->ddma_bytecnt;
 835        wmb(); /* drain writebuffer */
 836
 837        return rv;
 838}
 839EXPORT_SYMBOL_GPL(au1xxx_get_dma_residue);
 840
 841void au1xxx_dbdma_chan_free(u32 chanid)
 842{
 843        chan_tab_t      *ctp;
 844        dbdev_tab_t     *stp, *dtp;
 845
 846        ctp = *((chan_tab_t **)chanid);
 847        stp = ctp->chan_src;
 848        dtp = ctp->chan_dest;
 849
 850        au1xxx_dbdma_stop(chanid);
 851
 852        kfree((void *)ctp->cdb_membase);
 853
 854        stp->dev_flags &= ~DEV_FLAGS_INUSE;
 855        dtp->dev_flags &= ~DEV_FLAGS_INUSE;
 856        chan_tab_ptr[ctp->chan_index] = NULL;
 857
 858        kfree(ctp);
 859}
 860EXPORT_SYMBOL(au1xxx_dbdma_chan_free);
 861
 862static irqreturn_t dbdma_interrupt(int irq, void *dev_id)
 863{
 864        u32 intstat;
 865        u32 chan_index;
 866        chan_tab_t              *ctp;
 867        au1x_ddma_desc_t        *dp;
 868        au1x_dma_chan_t *cp;
 869
 870        intstat = dbdma_gptr->ddma_intstat;
 871        wmb(); /* drain writebuffer */
 872        chan_index = __ffs(intstat);
 873
 874        ctp = chan_tab_ptr[chan_index];
 875        cp = ctp->chan_ptr;
 876        dp = ctp->cur_ptr;
 877
 878        /* Reset interrupt. */
 879        cp->ddma_irq = 0;
 880        wmb(); /* drain writebuffer */
 881
 882        if (ctp->chan_callback)
 883                ctp->chan_callback(irq, ctp->chan_callparam);
 884
 885        ctp->cur_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
 886        return IRQ_RETVAL(1);
 887}
 888
 889void au1xxx_dbdma_dump(u32 chanid)
 890{
 891        chan_tab_t       *ctp;
 892        au1x_ddma_desc_t *dp;
 893        dbdev_tab_t      *stp, *dtp;
 894        au1x_dma_chan_t  *cp;
 895        u32 i            = 0;
 896
 897        ctp = *((chan_tab_t **)chanid);
 898        stp = ctp->chan_src;
 899        dtp = ctp->chan_dest;
 900        cp = ctp->chan_ptr;
 901
 902        printk(KERN_DEBUG "Chan %x, stp %x (dev %d)  dtp %x (dev %d)\n",
 903                          (u32)ctp, (u32)stp, stp - dbdev_tab, (u32)dtp,
 904                          dtp - dbdev_tab);
 905        printk(KERN_DEBUG "desc base %x, get %x, put %x, cur %x\n",
 906                          (u32)(ctp->chan_desc_base), (u32)(ctp->get_ptr),
 907                          (u32)(ctp->put_ptr), (u32)(ctp->cur_ptr));
 908
 909        printk(KERN_DEBUG "dbdma chan %x\n", (u32)cp);
 910        printk(KERN_DEBUG "cfg %08x, desptr %08x, statptr %08x\n",
 911                          cp->ddma_cfg, cp->ddma_desptr, cp->ddma_statptr);
 912        printk(KERN_DEBUG "dbell %08x, irq %08x, stat %08x, bytecnt %08x\n",
 913                          cp->ddma_dbell, cp->ddma_irq, cp->ddma_stat,
 914                          cp->ddma_bytecnt);
 915
 916        /* Run through the descriptors */
 917        dp = ctp->chan_desc_base;
 918
 919        do {
 920                printk(KERN_DEBUG "Dp[%d]= %08x, cmd0 %08x, cmd1 %08x\n",
 921                                  i++, (u32)dp, dp->dscr_cmd0, dp->dscr_cmd1);
 922                printk(KERN_DEBUG "src0 %08x, src1 %08x, dest0 %08x, dest1 %08x\n",
 923                                  dp->dscr_source0, dp->dscr_source1,
 924                                  dp->dscr_dest0, dp->dscr_dest1);
 925                printk(KERN_DEBUG "stat %08x, nxtptr %08x\n",
 926                                  dp->dscr_stat, dp->dscr_nxtptr);
 927                dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
 928        } while (dp != ctp->chan_desc_base);
 929}
 930
 931/* Put a descriptor into the DMA ring.
 932 * This updates the source/destination pointers and byte count.
 933 */
 934u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr)
 935{
 936        chan_tab_t *ctp;
 937        au1x_ddma_desc_t *dp;
 938        u32 nbytes = 0;
 939
 940        /*
 941         * I guess we could check this to be within the
 942         * range of the table......
 943         */
 944        ctp = *((chan_tab_t **)chanid);
 945
 946        /*
 947         * We should have multiple callers for a particular channel,
 948         * an interrupt doesn't affect this pointer nor the descriptor,
 949         * so no locking should be needed.
 950         */
 951        dp = ctp->put_ptr;
 952
 953        /*
 954         * If the descriptor is valid, we are way ahead of the DMA
 955         * engine, so just return an error condition.
 956         */
 957        if (dp->dscr_cmd0 & DSCR_CMD0_V)
 958                return 0;
 959
 960        /* Load up buffer addresses and byte count. */
 961        dp->dscr_dest0 = dscr->dscr_dest0;
 962        dp->dscr_source0 = dscr->dscr_source0;
 963        dp->dscr_dest1 = dscr->dscr_dest1;
 964        dp->dscr_source1 = dscr->dscr_source1;
 965        dp->dscr_cmd1 = dscr->dscr_cmd1;
 966        nbytes = dscr->dscr_cmd1;
 967        /* Allow the caller to specifiy if an interrupt is generated */
 968        dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
 969        dp->dscr_cmd0 |= dscr->dscr_cmd0 | DSCR_CMD0_V;
 970        ctp->chan_ptr->ddma_dbell = 0;
 971
 972        /* Get next descriptor pointer. */
 973        ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
 974
 975        /* Return something non-zero. */
 976        return nbytes;
 977}
 978
 979
 980static unsigned long alchemy_dbdma_pm_data[NUM_DBDMA_CHANS + 1][6];
 981
 982static int alchemy_dbdma_suspend(void)
 983{
 984        int i;
 985        void __iomem *addr;
 986
 987        addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
 988        alchemy_dbdma_pm_data[0][0] = __raw_readl(addr + 0x00);
 989        alchemy_dbdma_pm_data[0][1] = __raw_readl(addr + 0x04);
 990        alchemy_dbdma_pm_data[0][2] = __raw_readl(addr + 0x08);
 991        alchemy_dbdma_pm_data[0][3] = __raw_readl(addr + 0x0c);
 992
 993        /* save channel configurations */
 994        addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
 995        for (i = 1; i <= NUM_DBDMA_CHANS; i++) {
 996                alchemy_dbdma_pm_data[i][0] = __raw_readl(addr + 0x00);
 997                alchemy_dbdma_pm_data[i][1] = __raw_readl(addr + 0x04);
 998                alchemy_dbdma_pm_data[i][2] = __raw_readl(addr + 0x08);
 999                alchemy_dbdma_pm_data[i][3] = __raw_readl(addr + 0x0c);
1000                alchemy_dbdma_pm_data[i][4] = __raw_readl(addr + 0x10);
1001                alchemy_dbdma_pm_data[i][5] = __raw_readl(addr + 0x14);
1002
1003                /* halt channel */
1004                __raw_writel(alchemy_dbdma_pm_data[i][0] & ~1, addr + 0x00);
1005                wmb();
1006                while (!(__raw_readl(addr + 0x14) & 1))
1007                        wmb();
1008
1009                addr += 0x100;  /* next channel base */
1010        }
1011        /* disable channel interrupts */
1012        addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
1013        __raw_writel(0, addr + 0x0c);
1014        wmb();
1015
1016        return 0;
1017}
1018
1019static void alchemy_dbdma_resume(void)
1020{
1021        int i;
1022        void __iomem *addr;
1023
1024        addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
1025        __raw_writel(alchemy_dbdma_pm_data[0][0], addr + 0x00);
1026        __raw_writel(alchemy_dbdma_pm_data[0][1], addr + 0x04);
1027        __raw_writel(alchemy_dbdma_pm_data[0][2], addr + 0x08);
1028        __raw_writel(alchemy_dbdma_pm_data[0][3], addr + 0x0c);
1029
1030        /* restore channel configurations */
1031        addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
1032        for (i = 1; i <= NUM_DBDMA_CHANS; i++) {
1033                __raw_writel(alchemy_dbdma_pm_data[i][0], addr + 0x00);
1034                __raw_writel(alchemy_dbdma_pm_data[i][1], addr + 0x04);
1035                __raw_writel(alchemy_dbdma_pm_data[i][2], addr + 0x08);
1036                __raw_writel(alchemy_dbdma_pm_data[i][3], addr + 0x0c);
1037                __raw_writel(alchemy_dbdma_pm_data[i][4], addr + 0x10);
1038                __raw_writel(alchemy_dbdma_pm_data[i][5], addr + 0x14);
1039                wmb();
1040                addr += 0x100;  /* next channel base */
1041        }
1042}
1043
1044static struct syscore_ops alchemy_dbdma_syscore_ops = {
1045        .suspend        = alchemy_dbdma_suspend,
1046        .resume         = alchemy_dbdma_resume,
1047};
1048
1049static int __init dbdma_setup(unsigned int irq, dbdev_tab_t *idtable)
1050{
1051        int ret;
1052
1053        dbdev_tab = kzalloc(sizeof(dbdev_tab_t) * DBDEV_TAB_SIZE, GFP_KERNEL);
1054        if (!dbdev_tab)
1055                return -ENOMEM;
1056
1057        memcpy(dbdev_tab, idtable, 32 * sizeof(dbdev_tab_t));
1058        for (ret = 32; ret < DBDEV_TAB_SIZE; ret++)
1059                dbdev_tab[ret].dev_id = ~0;
1060
1061        dbdma_gptr->ddma_config = 0;
1062        dbdma_gptr->ddma_throttle = 0;
1063        dbdma_gptr->ddma_inten = 0xffff;
1064        wmb(); /* drain writebuffer */
1065
1066        ret = request_irq(irq, dbdma_interrupt, 0, "dbdma", (void *)dbdma_gptr);
1067        if (ret)
1068                printk(KERN_ERR "Cannot grab DBDMA interrupt!\n");
1069        else {
1070                dbdma_initialized = 1;
1071                register_syscore_ops(&alchemy_dbdma_syscore_ops);
1072        }
1073
1074        return ret;
1075}
1076
1077static int __init alchemy_dbdma_init(void)
1078{
1079        switch (alchemy_get_cputype()) {
1080        case ALCHEMY_CPU_AU1550:
1081                return dbdma_setup(AU1550_DDMA_INT, au1550_dbdev_tab);
1082        case ALCHEMY_CPU_AU1200:
1083                return dbdma_setup(AU1200_DDMA_INT, au1200_dbdev_tab);
1084        case ALCHEMY_CPU_AU1300:
1085                return dbdma_setup(AU1300_DDMA_INT, au1300_dbdev_tab);
1086        }
1087        return 0;
1088}
1089subsys_initcall(alchemy_dbdma_init);
1090