linux/arch/powerpc/platforms/pasemi/dma_lib.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2006-2007 PA Semi, Inc
   3 *
   4 * Common functions for DMA access on PA Semi PWRficient
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  18 */
  19
  20#include <linux/kernel.h>
  21#include <linux/export.h>
  22#include <linux/pci.h>
  23#include <linux/slab.h>
  24#include <linux/of.h>
  25#include <linux/sched.h>
  26
  27#include <asm/pasemi_dma.h>
  28
  29#define MAX_TXCH 64
  30#define MAX_RXCH 64
  31#define MAX_FLAGS 64
  32#define MAX_FUN 8
  33
  34static struct pasdma_status *dma_status;
  35
  36static void __iomem *iob_regs;
  37static void __iomem *mac_regs[6];
  38static void __iomem *dma_regs;
  39
  40static int base_hw_irq;
  41
  42static int num_txch, num_rxch;
  43
  44static struct pci_dev *dma_pdev;
  45
  46/* Bitmaps to handle allocation of channels */
  47
  48static DECLARE_BITMAP(txch_free, MAX_TXCH);
  49static DECLARE_BITMAP(rxch_free, MAX_RXCH);
  50static DECLARE_BITMAP(flags_free, MAX_FLAGS);
  51static DECLARE_BITMAP(fun_free, MAX_FUN);
  52
  53/* pasemi_read_iob_reg - read IOB register
  54 * @reg: Register to read (offset into PCI CFG space)
  55 */
  56unsigned int pasemi_read_iob_reg(unsigned int reg)
  57{
  58        return in_le32(iob_regs+reg);
  59}
  60EXPORT_SYMBOL(pasemi_read_iob_reg);
  61
  62/* pasemi_write_iob_reg - write IOB register
  63 * @reg: Register to write to (offset into PCI CFG space)
  64 * @val: Value to write
  65 */
  66void pasemi_write_iob_reg(unsigned int reg, unsigned int val)
  67{
  68        out_le32(iob_regs+reg, val);
  69}
  70EXPORT_SYMBOL(pasemi_write_iob_reg);
  71
  72/* pasemi_read_mac_reg - read MAC register
  73 * @intf: MAC interface
  74 * @reg: Register to read (offset into PCI CFG space)
  75 */
  76unsigned int pasemi_read_mac_reg(int intf, unsigned int reg)
  77{
  78        return in_le32(mac_regs[intf]+reg);
  79}
  80EXPORT_SYMBOL(pasemi_read_mac_reg);
  81
  82/* pasemi_write_mac_reg - write MAC register
  83 * @intf: MAC interface
  84 * @reg: Register to write to (offset into PCI CFG space)
  85 * @val: Value to write
  86 */
  87void pasemi_write_mac_reg(int intf, unsigned int reg, unsigned int val)
  88{
  89        out_le32(mac_regs[intf]+reg, val);
  90}
  91EXPORT_SYMBOL(pasemi_write_mac_reg);
  92
  93/* pasemi_read_dma_reg - read DMA register
  94 * @reg: Register to read (offset into PCI CFG space)
  95 */
  96unsigned int pasemi_read_dma_reg(unsigned int reg)
  97{
  98        return in_le32(dma_regs+reg);
  99}
 100EXPORT_SYMBOL(pasemi_read_dma_reg);
 101
 102/* pasemi_write_dma_reg - write DMA register
 103 * @reg: Register to write to (offset into PCI CFG space)
 104 * @val: Value to write
 105 */
 106void pasemi_write_dma_reg(unsigned int reg, unsigned int val)
 107{
 108        out_le32(dma_regs+reg, val);
 109}
 110EXPORT_SYMBOL(pasemi_write_dma_reg);
 111
 112static int pasemi_alloc_tx_chan(enum pasemi_dmachan_type type)
 113{
 114        int bit;
 115        int start, limit;
 116
 117        switch (type & (TXCHAN_EVT0|TXCHAN_EVT1)) {
 118        case TXCHAN_EVT0:
 119                start = 0;
 120                limit = 10;
 121                break;
 122        case TXCHAN_EVT1:
 123                start = 10;
 124                limit = MAX_TXCH;
 125                break;
 126        default:
 127                start = 0;
 128                limit = MAX_TXCH;
 129                break;
 130        }
 131retry:
 132        bit = find_next_bit(txch_free, MAX_TXCH, start);
 133        if (bit >= limit)
 134                return -ENOSPC;
 135        if (!test_and_clear_bit(bit, txch_free))
 136                goto retry;
 137
 138        return bit;
 139}
 140
 141static void pasemi_free_tx_chan(int chan)
 142{
 143        BUG_ON(test_bit(chan, txch_free));
 144        set_bit(chan, txch_free);
 145}
 146
 147static int pasemi_alloc_rx_chan(void)
 148{
 149        int bit;
 150retry:
 151        bit = find_first_bit(rxch_free, MAX_RXCH);
 152        if (bit >= MAX_TXCH)
 153                return -ENOSPC;
 154        if (!test_and_clear_bit(bit, rxch_free))
 155                goto retry;
 156
 157        return bit;
 158}
 159
 160static void pasemi_free_rx_chan(int chan)
 161{
 162        BUG_ON(test_bit(chan, rxch_free));
 163        set_bit(chan, rxch_free);
 164}
 165
 166/* pasemi_dma_alloc_chan - Allocate a DMA channel
 167 * @type: Type of channel to allocate
 168 * @total_size: Total size of structure to allocate (to allow for more
 169 *              room behind the structure to be used by the client)
 170 * @offset: Offset in bytes from start of the total structure to the beginning
 171 *          of struct pasemi_dmachan. Needed when struct pasemi_dmachan is
 172 *          not the first member of the client structure.
 173 *
 174 * pasemi_dma_alloc_chan allocates a DMA channel for use by a client. The
 175 * type argument specifies whether it's a RX or TX channel, and in the case
 176 * of TX channels which group it needs to belong to (if any).
 177 *
 178 * Returns a pointer to the total structure allocated on success, NULL
 179 * on failure.
 180 */
 181void *pasemi_dma_alloc_chan(enum pasemi_dmachan_type type,
 182                            int total_size, int offset)
 183{
 184        void *buf;
 185        struct pasemi_dmachan *chan;
 186        int chno;
 187
 188        BUG_ON(total_size < sizeof(struct pasemi_dmachan));
 189
 190        buf = kzalloc(total_size, GFP_KERNEL);
 191
 192        if (!buf)
 193                return NULL;
 194        chan = buf + offset;
 195
 196        chan->priv = buf;
 197
 198        switch (type & (TXCHAN|RXCHAN)) {
 199        case RXCHAN:
 200                chno = pasemi_alloc_rx_chan();
 201                chan->chno = chno;
 202                chan->irq = irq_create_mapping(NULL,
 203                                               base_hw_irq + num_txch + chno);
 204                chan->status = &dma_status->rx_sta[chno];
 205                break;
 206        case TXCHAN:
 207                chno = pasemi_alloc_tx_chan(type);
 208                chan->chno = chno;
 209                chan->irq = irq_create_mapping(NULL, base_hw_irq + chno);
 210                chan->status = &dma_status->tx_sta[chno];
 211                break;
 212        }
 213
 214        chan->chan_type = type;
 215
 216        return chan;
 217}
 218EXPORT_SYMBOL(pasemi_dma_alloc_chan);
 219
 220/* pasemi_dma_free_chan - Free a previously allocated channel
 221 * @chan: Channel to free
 222 *
 223 * Frees a previously allocated channel. It will also deallocate any
 224 * descriptor ring associated with the channel, if allocated.
 225 */
 226void pasemi_dma_free_chan(struct pasemi_dmachan *chan)
 227{
 228        if (chan->ring_virt)
 229                pasemi_dma_free_ring(chan);
 230
 231        switch (chan->chan_type & (RXCHAN|TXCHAN)) {
 232        case RXCHAN:
 233                pasemi_free_rx_chan(chan->chno);
 234                break;
 235        case TXCHAN:
 236                pasemi_free_tx_chan(chan->chno);
 237                break;
 238        }
 239
 240        kfree(chan->priv);
 241}
 242EXPORT_SYMBOL(pasemi_dma_free_chan);
 243
 244/* pasemi_dma_alloc_ring - Allocate descriptor ring for a channel
 245 * @chan: Channel for which to allocate
 246 * @ring_size: Ring size in 64-bit (8-byte) words
 247 *
 248 * Allocate a descriptor ring for a channel. Returns 0 on success, errno
 249 * on failure. The passed in struct pasemi_dmachan is updated with the
 250 * virtual and DMA addresses of the ring.
 251 */
 252int pasemi_dma_alloc_ring(struct pasemi_dmachan *chan, int ring_size)
 253{
 254        BUG_ON(chan->ring_virt);
 255
 256        chan->ring_size = ring_size;
 257
 258        chan->ring_virt = dma_alloc_coherent(&dma_pdev->dev,
 259                                             ring_size * sizeof(u64),
 260                                             &chan->ring_dma, GFP_KERNEL);
 261
 262        if (!chan->ring_virt)
 263                return -ENOMEM;
 264
 265        memset(chan->ring_virt, 0, ring_size * sizeof(u64));
 266
 267        return 0;
 268}
 269EXPORT_SYMBOL(pasemi_dma_alloc_ring);
 270
 271/* pasemi_dma_free_ring - Free an allocated descriptor ring for a channel
 272 * @chan: Channel for which to free the descriptor ring
 273 *
 274 * Frees a previously allocated descriptor ring for a channel.
 275 */
 276void pasemi_dma_free_ring(struct pasemi_dmachan *chan)
 277{
 278        BUG_ON(!chan->ring_virt);
 279
 280        dma_free_coherent(&dma_pdev->dev, chan->ring_size * sizeof(u64),
 281                          chan->ring_virt, chan->ring_dma);
 282        chan->ring_virt = NULL;
 283        chan->ring_size = 0;
 284        chan->ring_dma = 0;
 285}
 286EXPORT_SYMBOL(pasemi_dma_free_ring);
 287
 288/* pasemi_dma_start_chan - Start a DMA channel
 289 * @chan: Channel to start
 290 * @cmdsta: Additional CCMDSTA/TCMDSTA bits to write
 291 *
 292 * Enables (starts) a DMA channel with optional additional arguments.
 293 */
 294void pasemi_dma_start_chan(const struct pasemi_dmachan *chan, const u32 cmdsta)
 295{
 296        if (chan->chan_type == RXCHAN)
 297                pasemi_write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan->chno),
 298                                     cmdsta | PAS_DMA_RXCHAN_CCMDSTA_EN);
 299        else
 300                pasemi_write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan->chno),
 301                                     cmdsta | PAS_DMA_TXCHAN_TCMDSTA_EN);
 302}
 303EXPORT_SYMBOL(pasemi_dma_start_chan);
 304
 305/* pasemi_dma_stop_chan - Stop a DMA channel
 306 * @chan: Channel to stop
 307 *
 308 * Stops (disables) a DMA channel. This is done by setting the ST bit in the
 309 * CMDSTA register and waiting on the ACT (active) bit to clear, then
 310 * finally disabling the whole channel.
 311 *
 312 * This function will only try for a short while for the channel to stop, if
 313 * it doesn't it will return failure.
 314 *
 315 * Returns 1 on success, 0 on failure.
 316 */
 317#define MAX_RETRIES 5000
 318int pasemi_dma_stop_chan(const struct pasemi_dmachan *chan)
 319{
 320        int reg, retries;
 321        u32 sta;
 322
 323        if (chan->chan_type == RXCHAN) {
 324                reg = PAS_DMA_RXCHAN_CCMDSTA(chan->chno);
 325                pasemi_write_dma_reg(reg, PAS_DMA_RXCHAN_CCMDSTA_ST);
 326                for (retries = 0; retries < MAX_RETRIES; retries++) {
 327                        sta = pasemi_read_dma_reg(reg);
 328                        if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)) {
 329                                pasemi_write_dma_reg(reg, 0);
 330                                return 1;
 331                        }
 332                        cond_resched();
 333                }
 334        } else {
 335                reg = PAS_DMA_TXCHAN_TCMDSTA(chan->chno);
 336                pasemi_write_dma_reg(reg, PAS_DMA_TXCHAN_TCMDSTA_ST);
 337                for (retries = 0; retries < MAX_RETRIES; retries++) {
 338                        sta = pasemi_read_dma_reg(reg);
 339                        if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)) {
 340                                pasemi_write_dma_reg(reg, 0);
 341                                return 1;
 342                        }
 343                        cond_resched();
 344                }
 345        }
 346
 347        return 0;
 348}
 349EXPORT_SYMBOL(pasemi_dma_stop_chan);
 350
 351/* pasemi_dma_alloc_buf - Allocate a buffer to use for DMA
 352 * @chan: Channel to allocate for
 353 * @size: Size of buffer in bytes
 354 * @handle: DMA handle
 355 *
 356 * Allocate a buffer to be used by the DMA engine for read/write,
 357 * similar to dma_alloc_coherent().
 358 *
 359 * Returns the virtual address of the buffer, or NULL in case of failure.
 360 */
 361void *pasemi_dma_alloc_buf(struct pasemi_dmachan *chan, int size,
 362                           dma_addr_t *handle)
 363{
 364        return dma_alloc_coherent(&dma_pdev->dev, size, handle, GFP_KERNEL);
 365}
 366EXPORT_SYMBOL(pasemi_dma_alloc_buf);
 367
 368/* pasemi_dma_free_buf - Free a buffer used for DMA
 369 * @chan: Channel the buffer was allocated for
 370 * @size: Size of buffer in bytes
 371 * @handle: DMA handle
 372 *
 373 * Frees a previously allocated buffer.
 374 */
 375void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size,
 376                         dma_addr_t *handle)
 377{
 378        dma_free_coherent(&dma_pdev->dev, size, handle, GFP_KERNEL);
 379}
 380EXPORT_SYMBOL(pasemi_dma_free_buf);
 381
 382/* pasemi_dma_alloc_flag - Allocate a flag (event) for channel synchronization
 383 *
 384 * Allocates a flag for use with channel synchronization (event descriptors).
 385 * Returns allocated flag (0-63), < 0 on error.
 386 */
 387int pasemi_dma_alloc_flag(void)
 388{
 389        int bit;
 390
 391retry:
 392        bit = find_next_bit(flags_free, MAX_FLAGS, 0);
 393        if (bit >= MAX_FLAGS)
 394                return -ENOSPC;
 395        if (!test_and_clear_bit(bit, flags_free))
 396                goto retry;
 397
 398        return bit;
 399}
 400EXPORT_SYMBOL(pasemi_dma_alloc_flag);
 401
 402
 403/* pasemi_dma_free_flag - Deallocates a flag (event)
 404 * @flag: Flag number to deallocate
 405 *
 406 * Frees up a flag so it can be reused for other purposes.
 407 */
 408void pasemi_dma_free_flag(int flag)
 409{
 410        BUG_ON(test_bit(flag, flags_free));
 411        BUG_ON(flag >= MAX_FLAGS);
 412        set_bit(flag, flags_free);
 413}
 414EXPORT_SYMBOL(pasemi_dma_free_flag);
 415
 416
 417/* pasemi_dma_set_flag - Sets a flag (event) to 1
 418 * @flag: Flag number to set active
 419 *
 420 * Sets the flag provided to 1.
 421 */
 422void pasemi_dma_set_flag(int flag)
 423{
 424        BUG_ON(flag >= MAX_FLAGS);
 425        if (flag < 32)
 426                pasemi_write_dma_reg(PAS_DMA_TXF_SFLG0, 1 << flag);
 427        else
 428                pasemi_write_dma_reg(PAS_DMA_TXF_SFLG1, 1 << flag);
 429}
 430EXPORT_SYMBOL(pasemi_dma_set_flag);
 431
 432/* pasemi_dma_clear_flag - Sets a flag (event) to 0
 433 * @flag: Flag number to set inactive
 434 *
 435 * Sets the flag provided to 0.
 436 */
 437void pasemi_dma_clear_flag(int flag)
 438{
 439        BUG_ON(flag >= MAX_FLAGS);
 440        if (flag < 32)
 441                pasemi_write_dma_reg(PAS_DMA_TXF_CFLG0, 1 << flag);
 442        else
 443                pasemi_write_dma_reg(PAS_DMA_TXF_CFLG1, 1 << flag);
 444}
 445EXPORT_SYMBOL(pasemi_dma_clear_flag);
 446
 447/* pasemi_dma_alloc_fun - Allocate a function engine
 448 *
 449 * Allocates a function engine to use for crypto/checksum offload
 450 * Returns allocated engine (0-8), < 0 on error.
 451 */
 452int pasemi_dma_alloc_fun(void)
 453{
 454        int bit;
 455
 456retry:
 457        bit = find_next_bit(fun_free, MAX_FLAGS, 0);
 458        if (bit >= MAX_FLAGS)
 459                return -ENOSPC;
 460        if (!test_and_clear_bit(bit, fun_free))
 461                goto retry;
 462
 463        return bit;
 464}
 465EXPORT_SYMBOL(pasemi_dma_alloc_fun);
 466
 467
 468/* pasemi_dma_free_fun - Deallocates a function engine
 469 * @flag: Engine number to deallocate
 470 *
 471 * Frees up a function engine so it can be used for other purposes.
 472 */
 473void pasemi_dma_free_fun(int fun)
 474{
 475        BUG_ON(test_bit(fun, fun_free));
 476        BUG_ON(fun >= MAX_FLAGS);
 477        set_bit(fun, fun_free);
 478}
 479EXPORT_SYMBOL(pasemi_dma_free_fun);
 480
 481
 482static void *map_onedev(struct pci_dev *p, int index)
 483{
 484        struct device_node *dn;
 485        void __iomem *ret;
 486
 487        dn = pci_device_to_OF_node(p);
 488        if (!dn)
 489                goto fallback;
 490
 491        ret = of_iomap(dn, index);
 492        if (!ret)
 493                goto fallback;
 494
 495        return ret;
 496fallback:
 497        /* This is hardcoded and ugly, but we have some firmware versions
 498         * that don't provide the register space in the device tree. Luckily
 499         * they are at well-known locations so we can just do the math here.
 500         */
 501        return ioremap(0xe0000000 + (p->devfn << 12), 0x2000);
 502}
 503
 504/* pasemi_dma_init - Initialize the PA Semi DMA library
 505 *
 506 * This function initializes the DMA library. It must be called before
 507 * any other function in the library.
 508 *
 509 * Returns 0 on success, errno on failure.
 510 */
 511int pasemi_dma_init(void)
 512{
 513        static DEFINE_SPINLOCK(init_lock);
 514        struct pci_dev *iob_pdev;
 515        struct pci_dev *pdev;
 516        struct resource res;
 517        struct device_node *dn;
 518        int i, intf, err = 0;
 519        unsigned long timeout;
 520        u32 tmp;
 521
 522        if (!machine_is(pasemi))
 523                return -ENODEV;
 524
 525        spin_lock(&init_lock);
 526
 527        /* Make sure we haven't already initialized */
 528        if (dma_pdev)
 529                goto out;
 530
 531        iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
 532        if (!iob_pdev) {
 533                BUG();
 534                printk(KERN_WARNING "Can't find I/O Bridge\n");
 535                err = -ENODEV;
 536                goto out;
 537        }
 538        iob_regs = map_onedev(iob_pdev, 0);
 539
 540        dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
 541        if (!dma_pdev) {
 542                BUG();
 543                printk(KERN_WARNING "Can't find DMA controller\n");
 544                err = -ENODEV;
 545                goto out;
 546        }
 547        dma_regs = map_onedev(dma_pdev, 0);
 548        base_hw_irq = virq_to_hw(dma_pdev->irq);
 549
 550        pci_read_config_dword(dma_pdev, PAS_DMA_CAP_TXCH, &tmp);
 551        num_txch = (tmp & PAS_DMA_CAP_TXCH_TCHN_M) >> PAS_DMA_CAP_TXCH_TCHN_S;
 552
 553        pci_read_config_dword(dma_pdev, PAS_DMA_CAP_RXCH, &tmp);
 554        num_rxch = (tmp & PAS_DMA_CAP_RXCH_RCHN_M) >> PAS_DMA_CAP_RXCH_RCHN_S;
 555
 556        intf = 0;
 557        for (pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa006, NULL);
 558             pdev;
 559             pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa006, pdev))
 560                mac_regs[intf++] = map_onedev(pdev, 0);
 561
 562        pci_dev_put(pdev);
 563
 564        for (pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa005, NULL);
 565             pdev;
 566             pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa005, pdev))
 567                mac_regs[intf++] = map_onedev(pdev, 0);
 568
 569        pci_dev_put(pdev);
 570
 571        dn = pci_device_to_OF_node(iob_pdev);
 572        if (dn)
 573                err = of_address_to_resource(dn, 1, &res);
 574        if (!dn || err) {
 575                /* Fallback for old firmware */
 576                res.start = 0xfd800000;
 577                res.end = res.start + 0x1000;
 578        }
 579        dma_status = __ioremap(res.start, resource_size(&res), 0);
 580        pci_dev_put(iob_pdev);
 581
 582        for (i = 0; i < MAX_TXCH; i++)
 583                __set_bit(i, txch_free);
 584
 585        for (i = 0; i < MAX_RXCH; i++)
 586                __set_bit(i, rxch_free);
 587
 588        timeout = jiffies + HZ;
 589        pasemi_write_dma_reg(PAS_DMA_COM_RXCMD, 0);
 590        while (pasemi_read_dma_reg(PAS_DMA_COM_RXSTA) & 1) {
 591                if (time_after(jiffies, timeout)) {
 592                        pr_warning("Warning: Could not disable RX section\n");
 593                        break;
 594                }
 595        }
 596
 597        timeout = jiffies + HZ;
 598        pasemi_write_dma_reg(PAS_DMA_COM_TXCMD, 0);
 599        while (pasemi_read_dma_reg(PAS_DMA_COM_TXSTA) & 1) {
 600                if (time_after(jiffies, timeout)) {
 601                        pr_warning("Warning: Could not disable TX section\n");
 602                        break;
 603                }
 604        }
 605
 606        /* setup resource allocations for the different DMA sections */
 607        tmp = pasemi_read_dma_reg(PAS_DMA_COM_CFG);
 608        pasemi_write_dma_reg(PAS_DMA_COM_CFG, tmp | 0x18000000);
 609
 610        /* enable tx section */
 611        pasemi_write_dma_reg(PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN);
 612
 613        /* enable rx section */
 614        pasemi_write_dma_reg(PAS_DMA_COM_RXCMD, PAS_DMA_COM_RXCMD_EN);
 615
 616        for (i = 0; i < MAX_FLAGS; i++)
 617                __set_bit(i, flags_free);
 618
 619        for (i = 0; i < MAX_FUN; i++)
 620                __set_bit(i, fun_free);
 621
 622        /* clear all status flags */
 623        pasemi_write_dma_reg(PAS_DMA_TXF_CFLG0, 0xffffffff);
 624        pasemi_write_dma_reg(PAS_DMA_TXF_CFLG1, 0xffffffff);
 625
 626        printk(KERN_INFO "PA Semi PWRficient DMA library initialized "
 627                "(%d tx, %d rx channels)\n", num_txch, num_rxch);
 628
 629out:
 630        spin_unlock(&init_lock);
 631        return err;
 632}
 633EXPORT_SYMBOL(pasemi_dma_init);
 634