linux/arch/arm/mach-bcmring/dma.c
<<
>>
Prefs
   1/*****************************************************************************
   2* Copyright 2004 - 2008 Broadcom Corporation.  All rights reserved.
   3*
   4* Unless you and Broadcom execute a separate written software license
   5* agreement governing use of this software, this software is licensed to you
   6* under the terms of the GNU General Public License version 2, available at
   7* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
   8*
   9* Notwithstanding the above, under no circumstances may you combine this
  10* software in any way with any other Broadcom software provided under a
  11* license other than the GPL, without Broadcom's express prior written
  12* consent.
  13*****************************************************************************/
  14
  15/****************************************************************************/
  16/**
  17*   @file   dma.c
  18*
  19*   @brief  Implements the DMA interface.
  20*/
  21/****************************************************************************/
  22
  23/* ---- Include Files ---------------------------------------------------- */
  24
  25#include <linux/module.h>
  26#include <linux/device.h>
  27#include <linux/dma-mapping.h>
  28#include <linux/interrupt.h>
  29#include <linux/irqreturn.h>
  30#include <linux/proc_fs.h>
  31
  32#include <mach/timer.h>
  33
  34#include <linux/mm.h>
  35#include <linux/pfn.h>
  36#include <asm/atomic.h>
  37#include <mach/dma.h>
  38
  39/* I don't quite understand why dc4 fails when this is set to 1 and DMA is enabled */
  40/* especially since dc4 doesn't use kmalloc'd memory. */
  41
  42#define ALLOW_MAP_OF_KMALLOC_MEMORY 0
  43
  44/* ---- Public Variables ------------------------------------------------- */
  45
  46/* ---- Private Constants and Types -------------------------------------- */
  47
  48#define MAKE_HANDLE(controllerIdx, channelIdx)    (((controllerIdx) << 4) | (channelIdx))
  49
  50#define CONTROLLER_FROM_HANDLE(handle)    (((handle) >> 4) & 0x0f)
  51#define CHANNEL_FROM_HANDLE(handle)       ((handle) & 0x0f)
  52
  53#define DMA_MAP_DEBUG   0
  54
  55#if DMA_MAP_DEBUG
  56#   define  DMA_MAP_PRINT(fmt, args...)   printk("%s: " fmt, __func__,  ## args)
  57#else
  58#   define  DMA_MAP_PRINT(fmt, args...)
  59#endif
  60
  61/* ---- Private Variables ------------------------------------------------ */
  62
  63static DMA_Global_t gDMA;
  64static struct proc_dir_entry *gDmaDir;
  65
  66static atomic_t gDmaStatMemTypeKmalloc = ATOMIC_INIT(0);
  67static atomic_t gDmaStatMemTypeVmalloc = ATOMIC_INIT(0);
  68static atomic_t gDmaStatMemTypeUser = ATOMIC_INIT(0);
  69static atomic_t gDmaStatMemTypeCoherent = ATOMIC_INIT(0);
  70
  71#include "dma_device.c"
  72
  73/* ---- Private Function Prototypes -------------------------------------- */
  74
  75/* ---- Functions  ------------------------------------------------------- */
  76
  77/****************************************************************************/
  78/**
  79*   Displays information for /proc/dma/mem-type
  80*/
  81/****************************************************************************/
  82
  83static int dma_proc_read_mem_type(char *buf, char **start, off_t offset,
  84                                  int count, int *eof, void *data)
  85{
  86        int len = 0;
  87
  88        len += sprintf(buf + len, "dma_map_mem statistics\n");
  89        len +=
  90            sprintf(buf + len, "coherent: %d\n",
  91                    atomic_read(&gDmaStatMemTypeCoherent));
  92        len +=
  93            sprintf(buf + len, "kmalloc:  %d\n",
  94                    atomic_read(&gDmaStatMemTypeKmalloc));
  95        len +=
  96            sprintf(buf + len, "vmalloc:  %d\n",
  97                    atomic_read(&gDmaStatMemTypeVmalloc));
  98        len +=
  99            sprintf(buf + len, "user:     %d\n",
 100                    atomic_read(&gDmaStatMemTypeUser));
 101
 102        return len;
 103}
 104
 105/****************************************************************************/
 106/**
 107*   Displays information for /proc/dma/channels
 108*/
 109/****************************************************************************/
 110
 111static int dma_proc_read_channels(char *buf, char **start, off_t offset,
 112                                  int count, int *eof, void *data)
 113{
 114        int controllerIdx;
 115        int channelIdx;
 116        int limit = count - 200;
 117        int len = 0;
 118        DMA_Channel_t *channel;
 119
 120        if (down_interruptible(&gDMA.lock) < 0) {
 121                return -ERESTARTSYS;
 122        }
 123
 124        for (controllerIdx = 0; controllerIdx < DMA_NUM_CONTROLLERS;
 125             controllerIdx++) {
 126                for (channelIdx = 0; channelIdx < DMA_NUM_CHANNELS;
 127                     channelIdx++) {
 128                        if (len >= limit) {
 129                                break;
 130                        }
 131
 132                        channel =
 133                            &gDMA.controller[controllerIdx].channel[channelIdx];
 134
 135                        len +=
 136                            sprintf(buf + len, "%d:%d ", controllerIdx,
 137                                    channelIdx);
 138
 139                        if ((channel->flags & DMA_CHANNEL_FLAG_IS_DEDICATED) !=
 140                            0) {
 141                                len +=
 142                                    sprintf(buf + len, "Dedicated for %s ",
 143                                            DMA_gDeviceAttribute[channel->
 144                                                                 devType].name);
 145                        } else {
 146                                len += sprintf(buf + len, "Shared ");
 147                        }
 148
 149                        if ((channel->flags & DMA_CHANNEL_FLAG_NO_ISR) != 0) {
 150                                len += sprintf(buf + len, "No ISR ");
 151                        }
 152
 153                        if ((channel->flags & DMA_CHANNEL_FLAG_LARGE_FIFO) != 0) {
 154                                len += sprintf(buf + len, "Fifo: 128 ");
 155                        } else {
 156                                len += sprintf(buf + len, "Fifo: 64  ");
 157                        }
 158
 159                        if ((channel->flags & DMA_CHANNEL_FLAG_IN_USE) != 0) {
 160                                len +=
 161                                    sprintf(buf + len, "InUse by %s",
 162                                            DMA_gDeviceAttribute[channel->
 163                                                                 devType].name);
 164#if (DMA_DEBUG_TRACK_RESERVATION)
 165                                len +=
 166                                    sprintf(buf + len, " (%s:%d)",
 167                                            channel->fileName,
 168                                            channel->lineNum);
 169#endif
 170                        } else {
 171                                len += sprintf(buf + len, "Avail ");
 172                        }
 173
 174                        if (channel->lastDevType != DMA_DEVICE_NONE) {
 175                                len +=
 176                                    sprintf(buf + len, "Last use: %s ",
 177                                            DMA_gDeviceAttribute[channel->
 178                                                                 lastDevType].
 179                                            name);
 180                        }
 181
 182                        len += sprintf(buf + len, "\n");
 183                }
 184        }
 185        up(&gDMA.lock);
 186        *eof = 1;
 187
 188        return len;
 189}
 190
 191/****************************************************************************/
 192/**
 193*   Displays information for /proc/dma/devices
 194*/
 195/****************************************************************************/
 196
 197static int dma_proc_read_devices(char *buf, char **start, off_t offset,
 198                                 int count, int *eof, void *data)
 199{
 200        int limit = count - 200;
 201        int len = 0;
 202        int devIdx;
 203
 204        if (down_interruptible(&gDMA.lock) < 0) {
 205                return -ERESTARTSYS;
 206        }
 207
 208        for (devIdx = 0; devIdx < DMA_NUM_DEVICE_ENTRIES; devIdx++) {
 209                DMA_DeviceAttribute_t *devAttr = &DMA_gDeviceAttribute[devIdx];
 210
 211                if (devAttr->name == NULL) {
 212                        continue;
 213                }
 214
 215                if (len >= limit) {
 216                        break;
 217                }
 218
 219                len += sprintf(buf + len, "%-12s ", devAttr->name);
 220
 221                if ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) != 0) {
 222                        len +=
 223                            sprintf(buf + len, "Dedicated %d:%d ",
 224                                    devAttr->dedicatedController,
 225                                    devAttr->dedicatedChannel);
 226                } else {
 227                        len += sprintf(buf + len, "Shared DMA:");
 228                        if ((devAttr->flags & DMA_DEVICE_FLAG_ON_DMA0) != 0) {
 229                                len += sprintf(buf + len, "0");
 230                        }
 231                        if ((devAttr->flags & DMA_DEVICE_FLAG_ON_DMA1) != 0) {
 232                                len += sprintf(buf + len, "1");
 233                        }
 234                        len += sprintf(buf + len, " ");
 235                }
 236                if ((devAttr->flags & DMA_DEVICE_FLAG_NO_ISR) != 0) {
 237                        len += sprintf(buf + len, "NoISR ");
 238                }
 239                if ((devAttr->flags & DMA_DEVICE_FLAG_ALLOW_LARGE_FIFO) != 0) {
 240                        len += sprintf(buf + len, "Allow-128 ");
 241                }
 242
 243                len +=
 244                    sprintf(buf + len,
 245                            "Xfer #: %Lu Ticks: %Lu Bytes: %Lu DescLen: %u\n",
 246                            devAttr->numTransfers, devAttr->transferTicks,
 247                            devAttr->transferBytes,
 248                            devAttr->ring.bytesAllocated);
 249
 250        }
 251
 252        up(&gDMA.lock);
 253        *eof = 1;
 254
 255        return len;
 256}
 257
 258/****************************************************************************/
 259/**
 260*   Determines if a DMA_Device_t is "valid".
 261*
 262*   @return
 263*       TRUE        - dma device is valid
 264*       FALSE       - dma device isn't valid
 265*/
 266/****************************************************************************/
 267
 268static inline int IsDeviceValid(DMA_Device_t device)
 269{
 270        return (device >= 0) && (device < DMA_NUM_DEVICE_ENTRIES);
 271}
 272
 273/****************************************************************************/
 274/**
 275*   Translates a DMA handle into a pointer to a channel.
 276*
 277*   @return
 278*       non-NULL    - pointer to DMA_Channel_t
 279*       NULL        - DMA Handle was invalid
 280*/
 281/****************************************************************************/
 282
 283static inline DMA_Channel_t *HandleToChannel(DMA_Handle_t handle)
 284{
 285        int controllerIdx;
 286        int channelIdx;
 287
 288        controllerIdx = CONTROLLER_FROM_HANDLE(handle);
 289        channelIdx = CHANNEL_FROM_HANDLE(handle);
 290
 291        if ((controllerIdx > DMA_NUM_CONTROLLERS)
 292            || (channelIdx > DMA_NUM_CHANNELS)) {
 293                return NULL;
 294        }
 295        return &gDMA.controller[controllerIdx].channel[channelIdx];
 296}
 297
 298/****************************************************************************/
 299/**
 300*   Interrupt handler which is called to process DMA interrupts.
 301*/
 302/****************************************************************************/
 303
 304static irqreturn_t dma_interrupt_handler(int irq, void *dev_id)
 305{
 306        DMA_Channel_t *channel;
 307        DMA_DeviceAttribute_t *devAttr;
 308        int irqStatus;
 309
 310        channel = (DMA_Channel_t *) dev_id;
 311
 312        /* Figure out why we were called, and knock down the interrupt */
 313
 314        irqStatus = dmacHw_getInterruptStatus(channel->dmacHwHandle);
 315        dmacHw_clearInterrupt(channel->dmacHwHandle);
 316
 317        if ((channel->devType < 0)
 318            || (channel->devType > DMA_NUM_DEVICE_ENTRIES)) {
 319                printk(KERN_ERR "dma_interrupt_handler: Invalid devType: %d\n",
 320                       channel->devType);
 321                return IRQ_NONE;
 322        }
 323        devAttr = &DMA_gDeviceAttribute[channel->devType];
 324
 325        /* Update stats */
 326
 327        if ((irqStatus & dmacHw_INTERRUPT_STATUS_TRANS) != 0) {
 328                devAttr->transferTicks +=
 329                    (timer_get_tick_count() - devAttr->transferStartTime);
 330        }
 331
 332        if ((irqStatus & dmacHw_INTERRUPT_STATUS_ERROR) != 0) {
 333                printk(KERN_ERR
 334                       "dma_interrupt_handler: devType :%d DMA error (%s)\n",
 335                       channel->devType, devAttr->name);
 336        } else {
 337                devAttr->numTransfers++;
 338                devAttr->transferBytes += devAttr->numBytes;
 339        }
 340
 341        /* Call any installed handler */
 342
 343        if (devAttr->devHandler != NULL) {
 344                devAttr->devHandler(channel->devType, irqStatus,
 345                                    devAttr->userData);
 346        }
 347
 348        return IRQ_HANDLED;
 349}
 350
 351/****************************************************************************/
 352/**
 353*   Allocates memory to hold a descriptor ring. The descriptor ring then
 354*   needs to be populated by making one or more calls to
 355*   dna_add_descriptors.
 356*
 357*   The returned descriptor ring will be automatically initialized.
 358*
 359*   @return
 360*       0           Descriptor ring was allocated successfully
 361*       -EINVAL     Invalid parameters passed in
 362*       -ENOMEM     Unable to allocate memory for the desired number of descriptors.
 363*/
 364/****************************************************************************/
 365
 366int dma_alloc_descriptor_ring(DMA_DescriptorRing_t *ring,       /* Descriptor ring to populate */
 367                              int numDescriptors        /* Number of descriptors that need to be allocated. */
 368    ) {
 369        size_t bytesToAlloc = dmacHw_descriptorLen(numDescriptors);
 370
 371        if ((ring == NULL) || (numDescriptors <= 0)) {
 372                return -EINVAL;
 373        }
 374
 375        ring->physAddr = 0;
 376        ring->descriptorsAllocated = 0;
 377        ring->bytesAllocated = 0;
 378
 379        ring->virtAddr = dma_alloc_writecombine(NULL,
 380                                                     bytesToAlloc,
 381                                                     &ring->physAddr,
 382                                                     GFP_KERNEL);
 383        if (ring->virtAddr == NULL) {
 384                return -ENOMEM;
 385        }
 386
 387        ring->bytesAllocated = bytesToAlloc;
 388        ring->descriptorsAllocated = numDescriptors;
 389
 390        return dma_init_descriptor_ring(ring, numDescriptors);
 391}
 392
 393EXPORT_SYMBOL(dma_alloc_descriptor_ring);
 394
 395/****************************************************************************/
 396/**
 397*   Releases the memory which was previously allocated for a descriptor ring.
 398*/
 399/****************************************************************************/
 400
 401void dma_free_descriptor_ring(DMA_DescriptorRing_t *ring        /* Descriptor to release */
 402    ) {
 403        if (ring->virtAddr != NULL) {
 404                dma_free_writecombine(NULL,
 405                                      ring->bytesAllocated,
 406                                      ring->virtAddr, ring->physAddr);
 407        }
 408
 409        ring->bytesAllocated = 0;
 410        ring->descriptorsAllocated = 0;
 411        ring->virtAddr = NULL;
 412        ring->physAddr = 0;
 413}
 414
 415EXPORT_SYMBOL(dma_free_descriptor_ring);
 416
 417/****************************************************************************/
 418/**
 419*   Initializes a descriptor ring, so that descriptors can be added to it.
 420*   Once a descriptor ring has been allocated, it may be reinitialized for
 421*   use with additional/different regions of memory.
 422*
 423*   Note that if 7 descriptors are allocated, it's perfectly acceptable to
 424*   initialize the ring with a smaller number of descriptors. The amount
 425*   of memory allocated for the descriptor ring will not be reduced, and
 426*   the descriptor ring may be reinitialized later
 427*
 428*   @return
 429*       0           Descriptor ring was initialized successfully
 430*       -ENOMEM     The descriptor which was passed in has insufficient space
 431*                   to hold the desired number of descriptors.
 432*/
 433/****************************************************************************/
 434
 435int dma_init_descriptor_ring(DMA_DescriptorRing_t *ring,        /* Descriptor ring to initialize */
 436                             int numDescriptors /* Number of descriptors to initialize. */
 437    ) {
 438        if (ring->virtAddr == NULL) {
 439                return -EINVAL;
 440        }
 441        if (dmacHw_initDescriptor(ring->virtAddr,
 442                                  ring->physAddr,
 443                                  ring->bytesAllocated, numDescriptors) < 0) {
 444                printk(KERN_ERR
 445                       "dma_init_descriptor_ring: dmacHw_initDescriptor failed\n");
 446                return -ENOMEM;
 447        }
 448
 449        return 0;
 450}
 451
 452EXPORT_SYMBOL(dma_init_descriptor_ring);
 453
 454/****************************************************************************/
 455/**
 456*   Determines the number of descriptors which would be required for a
 457*   transfer of the indicated memory region.
 458*
 459*   This function also needs to know which DMA device this transfer will
 460*   be destined for, so that the appropriate DMA configuration can be retrieved.
 461*   DMA parameters such as transfer width, and whether this is a memory-to-memory
 462*   or memory-to-peripheral, etc can all affect the actual number of descriptors
 463*   required.
 464*
 465*   @return
 466*       > 0     Returns the number of descriptors required for the indicated transfer
 467*       -ENODEV - Device handed in is invalid.
 468*       -EINVAL Invalid parameters
 469*       -ENOMEM Memory exhausted
 470*/
 471/****************************************************************************/
 472
 473int dma_calculate_descriptor_count(DMA_Device_t device, /* DMA Device that this will be associated with */
 474                                   dma_addr_t srcData,  /* Place to get data to write to device */
 475                                   dma_addr_t dstData,  /* Pointer to device data address */
 476                                   size_t numBytes      /* Number of bytes to transfer to the device */
 477    ) {
 478        int numDescriptors;
 479        DMA_DeviceAttribute_t *devAttr;
 480
 481        if (!IsDeviceValid(device)) {
 482                return -ENODEV;
 483        }
 484        devAttr = &DMA_gDeviceAttribute[device];
 485
 486        numDescriptors = dmacHw_calculateDescriptorCount(&devAttr->config,
 487                                                              (void *)srcData,
 488                                                              (void *)dstData,
 489                                                              numBytes);
 490        if (numDescriptors < 0) {
 491                printk(KERN_ERR
 492                       "dma_calculate_descriptor_count: dmacHw_calculateDescriptorCount failed\n");
 493                return -EINVAL;
 494        }
 495
 496        return numDescriptors;
 497}
 498
 499EXPORT_SYMBOL(dma_calculate_descriptor_count);
 500
 501/****************************************************************************/
 502/**
 503*   Adds a region of memory to the descriptor ring. Note that it may take
 504*   multiple descriptors for each region of memory. It is the callers
 505*   responsibility to allocate a sufficiently large descriptor ring.
 506*
 507*   @return
 508*       0       Descriptors were added successfully
 509*       -ENODEV Device handed in is invalid.
 510*       -EINVAL Invalid parameters
 511*       -ENOMEM Memory exhausted
 512*/
 513/****************************************************************************/
 514
 515int dma_add_descriptors(DMA_DescriptorRing_t *ring,     /* Descriptor ring to add descriptors to */
 516                        DMA_Device_t device,    /* DMA Device that descriptors are for */
 517                        dma_addr_t srcData,     /* Place to get data (memory or device) */
 518                        dma_addr_t dstData,     /* Place to put data (memory or device) */
 519                        size_t numBytes /* Number of bytes to transfer to the device */
 520    ) {
 521        int rc;
 522        DMA_DeviceAttribute_t *devAttr;
 523
 524        if (!IsDeviceValid(device)) {
 525                return -ENODEV;
 526        }
 527        devAttr = &DMA_gDeviceAttribute[device];
 528
 529        rc = dmacHw_setDataDescriptor(&devAttr->config,
 530                                      ring->virtAddr,
 531                                      (void *)srcData,
 532                                      (void *)dstData, numBytes);
 533        if (rc < 0) {
 534                printk(KERN_ERR
 535                       "dma_add_descriptors: dmacHw_setDataDescriptor failed with code: %d\n",
 536                       rc);
 537                return -ENOMEM;
 538        }
 539
 540        return 0;
 541}
 542
 543EXPORT_SYMBOL(dma_add_descriptors);
 544
 545/****************************************************************************/
 546/**
 547*   Sets the descriptor ring associated with a device.
 548*
 549*   Once set, the descriptor ring will be associated with the device, even
 550*   across channel request/free calls. Passing in a NULL descriptor ring
 551*   will release any descriptor ring currently associated with the device.
 552*
 553*   Note: If you call dma_transfer, or one of the other dma_alloc_ functions
 554*         the descriptor ring may be released and reallocated.
 555*
 556*   Note: This function will release the descriptor memory for any current
 557*         descriptor ring associated with this device.
 558*
 559*   @return
 560*       0       Descriptors were added successfully
 561*       -ENODEV Device handed in is invalid.
 562*/
 563/****************************************************************************/
 564
 565int dma_set_device_descriptor_ring(DMA_Device_t device, /* Device to update the descriptor ring for. */
 566                                   DMA_DescriptorRing_t *ring   /* Descriptor ring to add descriptors to */
 567    ) {
 568        DMA_DeviceAttribute_t *devAttr;
 569
 570        if (!IsDeviceValid(device)) {
 571                return -ENODEV;
 572        }
 573        devAttr = &DMA_gDeviceAttribute[device];
 574
 575        /* Free the previously allocated descriptor ring */
 576
 577        dma_free_descriptor_ring(&devAttr->ring);
 578
 579        if (ring != NULL) {
 580                /* Copy in the new one */
 581
 582                devAttr->ring = *ring;
 583        }
 584
 585        /* Set things up so that if dma_transfer is called then this descriptor */
 586        /* ring will get freed. */
 587
 588        devAttr->prevSrcData = 0;
 589        devAttr->prevDstData = 0;
 590        devAttr->prevNumBytes = 0;
 591
 592        return 0;
 593}
 594
 595EXPORT_SYMBOL(dma_set_device_descriptor_ring);
 596
 597/****************************************************************************/
 598/**
 599*   Retrieves the descriptor ring associated with a device.
 600*
 601*   @return
 602*       0       Descriptors were added successfully
 603*       -ENODEV Device handed in is invalid.
 604*/
 605/****************************************************************************/
 606
 607int dma_get_device_descriptor_ring(DMA_Device_t device, /* Device to retrieve the descriptor ring for. */
 608                                   DMA_DescriptorRing_t *ring   /* Place to store retrieved ring */
 609    ) {
 610        DMA_DeviceAttribute_t *devAttr;
 611
 612        memset(ring, 0, sizeof(*ring));
 613
 614        if (!IsDeviceValid(device)) {
 615                return -ENODEV;
 616        }
 617        devAttr = &DMA_gDeviceAttribute[device];
 618
 619        *ring = devAttr->ring;
 620
 621        return 0;
 622}
 623
 624EXPORT_SYMBOL(dma_get_device_descriptor_ring);
 625
 626/****************************************************************************/
 627/**
 628*   Configures a DMA channel.
 629*
 630*   @return
 631*       >= 0    - Initialization was successfull.
 632*
 633*       -EBUSY  - Device is currently being used.
 634*       -ENODEV - Device handed in is invalid.
 635*/
 636/****************************************************************************/
 637
 638static int ConfigChannel(DMA_Handle_t handle)
 639{
 640        DMA_Channel_t *channel;
 641        DMA_DeviceAttribute_t *devAttr;
 642        int controllerIdx;
 643
 644        channel = HandleToChannel(handle);
 645        if (channel == NULL) {
 646                return -ENODEV;
 647        }
 648        devAttr = &DMA_gDeviceAttribute[channel->devType];
 649        controllerIdx = CONTROLLER_FROM_HANDLE(handle);
 650
 651        if ((devAttr->flags & DMA_DEVICE_FLAG_PORT_PER_DMAC) != 0) {
 652                if (devAttr->config.transferType ==
 653                    dmacHw_TRANSFER_TYPE_MEM_TO_PERIPHERAL) {
 654                        devAttr->config.dstPeripheralPort =
 655                            devAttr->dmacPort[controllerIdx];
 656                } else if (devAttr->config.transferType ==
 657                           dmacHw_TRANSFER_TYPE_PERIPHERAL_TO_MEM) {
 658                        devAttr->config.srcPeripheralPort =
 659                            devAttr->dmacPort[controllerIdx];
 660                }
 661        }
 662
 663        if (dmacHw_configChannel(channel->dmacHwHandle, &devAttr->config) != 0) {
 664                printk(KERN_ERR "ConfigChannel: dmacHw_configChannel failed\n");
 665                return -EIO;
 666        }
 667
 668        return 0;
 669}
 670
 671/****************************************************************************/
 672/**
 673*   Intializes all of the data structures associated with the DMA.
 674*   @return
 675*       >= 0    - Initialization was successfull.
 676*
 677*       -EBUSY  - Device is currently being used.
 678*       -ENODEV - Device handed in is invalid.
 679*/
 680/****************************************************************************/
 681
 682int dma_init(void)
 683{
 684        int rc = 0;
 685        int controllerIdx;
 686        int channelIdx;
 687        DMA_Device_t devIdx;
 688        DMA_Channel_t *channel;
 689        DMA_Handle_t dedicatedHandle;
 690
 691        memset(&gDMA, 0, sizeof(gDMA));
 692
 693        init_MUTEX_LOCKED(&gDMA.lock);
 694        init_waitqueue_head(&gDMA.freeChannelQ);
 695
 696        /* Initialize the Hardware */
 697
 698        dmacHw_initDma();
 699
 700        /* Start off by marking all of the DMA channels as shared. */
 701
 702        for (controllerIdx = 0; controllerIdx < DMA_NUM_CONTROLLERS;
 703             controllerIdx++) {
 704                for (channelIdx = 0; channelIdx < DMA_NUM_CHANNELS;
 705                     channelIdx++) {
 706                        channel =
 707                            &gDMA.controller[controllerIdx].channel[channelIdx];
 708
 709                        channel->flags = 0;
 710                        channel->devType = DMA_DEVICE_NONE;
 711                        channel->lastDevType = DMA_DEVICE_NONE;
 712
 713#if (DMA_DEBUG_TRACK_RESERVATION)
 714                        channel->fileName = "";
 715                        channel->lineNum = 0;
 716#endif
 717
 718                        channel->dmacHwHandle =
 719                            dmacHw_getChannelHandle(dmacHw_MAKE_CHANNEL_ID
 720                                                    (controllerIdx,
 721                                                     channelIdx));
 722                        dmacHw_initChannel(channel->dmacHwHandle);
 723                }
 724        }
 725
 726        /* Record any special attributes that channels may have */
 727
 728        gDMA.controller[0].channel[0].flags |= DMA_CHANNEL_FLAG_LARGE_FIFO;
 729        gDMA.controller[0].channel[1].flags |= DMA_CHANNEL_FLAG_LARGE_FIFO;
 730        gDMA.controller[1].channel[0].flags |= DMA_CHANNEL_FLAG_LARGE_FIFO;
 731        gDMA.controller[1].channel[1].flags |= DMA_CHANNEL_FLAG_LARGE_FIFO;
 732
 733        /* Now walk through and record the dedicated channels. */
 734
 735        for (devIdx = 0; devIdx < DMA_NUM_DEVICE_ENTRIES; devIdx++) {
 736                DMA_DeviceAttribute_t *devAttr = &DMA_gDeviceAttribute[devIdx];
 737
 738                if (((devAttr->flags & DMA_DEVICE_FLAG_NO_ISR) != 0)
 739                    && ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) == 0)) {
 740                        printk(KERN_ERR
 741                               "DMA Device: %s Can only request NO_ISR for dedicated devices\n",
 742                               devAttr->name);
 743                        rc = -EINVAL;
 744                        goto out;
 745                }
 746
 747                if ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) != 0) {
 748                        /* This is a dedicated device. Mark the channel as being reserved. */
 749
 750                        if (devAttr->dedicatedController >= DMA_NUM_CONTROLLERS) {
 751                                printk(KERN_ERR
 752                                       "DMA Device: %s DMA Controller %d is out of range\n",
 753                                       devAttr->name,
 754                                       devAttr->dedicatedController);
 755                                rc = -EINVAL;
 756                                goto out;
 757                        }
 758
 759                        if (devAttr->dedicatedChannel >= DMA_NUM_CHANNELS) {
 760                                printk(KERN_ERR
 761                                       "DMA Device: %s DMA Channel %d is out of range\n",
 762                                       devAttr->name,
 763                                       devAttr->dedicatedChannel);
 764                                rc = -EINVAL;
 765                                goto out;
 766                        }
 767
 768                        dedicatedHandle =
 769                            MAKE_HANDLE(devAttr->dedicatedController,
 770                                        devAttr->dedicatedChannel);
 771                        channel = HandleToChannel(dedicatedHandle);
 772
 773                        if ((channel->flags & DMA_CHANNEL_FLAG_IS_DEDICATED) !=
 774                            0) {
 775                                printk
 776                                    ("DMA Device: %s attempting to use same DMA Controller:Channel (%d:%d) as %s\n",
 777                                     devAttr->name,
 778                                     devAttr->dedicatedController,
 779                                     devAttr->dedicatedChannel,
 780                                     DMA_gDeviceAttribute[channel->devType].
 781                                     name);
 782                                rc = -EBUSY;
 783                                goto out;
 784                        }
 785
 786                        channel->flags |= DMA_CHANNEL_FLAG_IS_DEDICATED;
 787                        channel->devType = devIdx;
 788
 789                        if (devAttr->flags & DMA_DEVICE_FLAG_NO_ISR) {
 790                                channel->flags |= DMA_CHANNEL_FLAG_NO_ISR;
 791                        }
 792
 793                        /* For dedicated channels, we can go ahead and configure the DMA channel now */
 794                        /* as well. */
 795
 796                        ConfigChannel(dedicatedHandle);
 797                }
 798        }
 799
 800        /* Go through and register the interrupt handlers */
 801
 802        for (controllerIdx = 0; controllerIdx < DMA_NUM_CONTROLLERS;
 803             controllerIdx++) {
 804                for (channelIdx = 0; channelIdx < DMA_NUM_CHANNELS;
 805                     channelIdx++) {
 806                        channel =
 807                            &gDMA.controller[controllerIdx].channel[channelIdx];
 808
 809                        if ((channel->flags & DMA_CHANNEL_FLAG_NO_ISR) == 0) {
 810                                snprintf(channel->name, sizeof(channel->name),
 811                                         "dma %d:%d %s", controllerIdx,
 812                                         channelIdx,
 813                                         channel->devType ==
 814                                         DMA_DEVICE_NONE ? "" :
 815                                         DMA_gDeviceAttribute[channel->devType].
 816                                         name);
 817
 818                                rc =
 819                                     request_irq(IRQ_DMA0C0 +
 820                                                 (controllerIdx *
 821                                                  DMA_NUM_CHANNELS) +
 822                                                 channelIdx,
 823                                                 dma_interrupt_handler,
 824                                                 IRQF_DISABLED, channel->name,
 825                                                 channel);
 826                                if (rc != 0) {
 827                                        printk(KERN_ERR
 828                                               "request_irq for IRQ_DMA%dC%d failed\n",
 829                                               controllerIdx, channelIdx);
 830                                }
 831                        }
 832                }
 833        }
 834
 835        /* Create /proc/dma/channels and /proc/dma/devices */
 836
 837        gDmaDir = create_proc_entry("dma", S_IFDIR | S_IRUGO | S_IXUGO, NULL);
 838
 839        if (gDmaDir == NULL) {
 840                printk(KERN_ERR "Unable to create /proc/dma\n");
 841        } else {
 842                create_proc_read_entry("channels", 0, gDmaDir,
 843                                       dma_proc_read_channels, NULL);
 844                create_proc_read_entry("devices", 0, gDmaDir,
 845                                       dma_proc_read_devices, NULL);
 846                create_proc_read_entry("mem-type", 0, gDmaDir,
 847                                       dma_proc_read_mem_type, NULL);
 848        }
 849
 850out:
 851
 852        up(&gDMA.lock);
 853
 854        return rc;
 855}
 856
 857/****************************************************************************/
 858/**
 859*   Reserves a channel for use with @a dev. If the device is setup to use
 860*   a shared channel, then this function will block until a free channel
 861*   becomes available.
 862*
 863*   @return
 864*       >= 0    - A valid DMA Handle.
 865*       -EBUSY  - Device is currently being used.
 866*       -ENODEV - Device handed in is invalid.
 867*/
 868/****************************************************************************/
 869
 870#if (DMA_DEBUG_TRACK_RESERVATION)
 871DMA_Handle_t dma_request_channel_dbg
 872    (DMA_Device_t dev, const char *fileName, int lineNum)
 873#else
 874DMA_Handle_t dma_request_channel(DMA_Device_t dev)
 875#endif
 876{
 877        DMA_Handle_t handle;
 878        DMA_DeviceAttribute_t *devAttr;
 879        DMA_Channel_t *channel;
 880        int controllerIdx;
 881        int controllerIdx2;
 882        int channelIdx;
 883
 884        if (down_interruptible(&gDMA.lock) < 0) {
 885                return -ERESTARTSYS;
 886        }
 887
 888        if ((dev < 0) || (dev >= DMA_NUM_DEVICE_ENTRIES)) {
 889                handle = -ENODEV;
 890                goto out;
 891        }
 892        devAttr = &DMA_gDeviceAttribute[dev];
 893
 894#if (DMA_DEBUG_TRACK_RESERVATION)
 895        {
 896                char *s;
 897
 898                s = strrchr(fileName, '/');
 899                if (s != NULL) {
 900                        fileName = s + 1;
 901                }
 902        }
 903#endif
 904        if ((devAttr->flags & DMA_DEVICE_FLAG_IN_USE) != 0) {
 905                /* This device has already been requested and not been freed */
 906
 907                printk(KERN_ERR "%s: device %s is already requested\n",
 908                       __func__, devAttr->name);
 909                handle = -EBUSY;
 910                goto out;
 911        }
 912
 913        if ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) != 0) {
 914                /* This device has a dedicated channel. */
 915
 916                channel =
 917                    &gDMA.controller[devAttr->dedicatedController].
 918                    channel[devAttr->dedicatedChannel];
 919                if ((channel->flags & DMA_CHANNEL_FLAG_IN_USE) != 0) {
 920                        handle = -EBUSY;
 921                        goto out;
 922                }
 923
 924                channel->flags |= DMA_CHANNEL_FLAG_IN_USE;
 925                devAttr->flags |= DMA_DEVICE_FLAG_IN_USE;
 926
 927#if (DMA_DEBUG_TRACK_RESERVATION)
 928                channel->fileName = fileName;
 929                channel->lineNum = lineNum;
 930#endif
 931                handle =
 932                    MAKE_HANDLE(devAttr->dedicatedController,
 933                                devAttr->dedicatedChannel);
 934                goto out;
 935        }
 936
 937        /* This device needs to use one of the shared channels. */
 938
 939        handle = DMA_INVALID_HANDLE;
 940        while (handle == DMA_INVALID_HANDLE) {
 941                /* Scan through the shared channels and see if one is available */
 942
 943                for (controllerIdx2 = 0; controllerIdx2 < DMA_NUM_CONTROLLERS;
 944                     controllerIdx2++) {
 945                        /* Check to see if we should try on controller 1 first. */
 946
 947                        controllerIdx = controllerIdx2;
 948                        if ((devAttr->
 949                             flags & DMA_DEVICE_FLAG_ALLOC_DMA1_FIRST) != 0) {
 950                                controllerIdx = 1 - controllerIdx;
 951                        }
 952
 953                        /* See if the device is available on the controller being tested */
 954
 955                        if ((devAttr->
 956                             flags & (DMA_DEVICE_FLAG_ON_DMA0 << controllerIdx))
 957                            != 0) {
 958                                for (channelIdx = 0;
 959                                     channelIdx < DMA_NUM_CHANNELS;
 960                                     channelIdx++) {
 961                                        channel =
 962                                            &gDMA.controller[controllerIdx].
 963                                            channel[channelIdx];
 964
 965                                        if (((channel->
 966                                              flags &
 967                                              DMA_CHANNEL_FLAG_IS_DEDICATED) ==
 968                                             0)
 969                                            &&
 970                                            ((channel->
 971                                              flags & DMA_CHANNEL_FLAG_IN_USE)
 972                                             == 0)) {
 973                                                if (((channel->
 974                                                      flags &
 975                                                      DMA_CHANNEL_FLAG_LARGE_FIFO)
 976                                                     != 0)
 977                                                    &&
 978                                                    ((devAttr->
 979                                                      flags &
 980                                                      DMA_DEVICE_FLAG_ALLOW_LARGE_FIFO)
 981                                                     == 0)) {
 982                                                        /* This channel is a large fifo - don't tie it up */
 983                                                        /* with devices that we don't want using it. */
 984
 985                                                        continue;
 986                                                }
 987
 988                                                channel->flags |=
 989                                                    DMA_CHANNEL_FLAG_IN_USE;
 990                                                channel->devType = dev;
 991                                                devAttr->flags |=
 992                                                    DMA_DEVICE_FLAG_IN_USE;
 993
 994#if (DMA_DEBUG_TRACK_RESERVATION)
 995                                                channel->fileName = fileName;
 996                                                channel->lineNum = lineNum;
 997#endif
 998                                                handle =
 999                                                    MAKE_HANDLE(controllerIdx,
1000                                                                channelIdx);
1001
1002                                                /* Now that we've reserved the channel - we can go ahead and configure it */
1003
1004                                                if (ConfigChannel(handle) != 0) {
1005                                                        handle = -EIO;
1006                                                        printk(KERN_ERR
1007                                                               "dma_request_channel: ConfigChannel failed\n");
1008                                                }
1009                                                goto out;
1010                                        }
1011                                }
1012                        }
1013                }
1014
1015                /* No channels are currently available. Let's wait for one to free up. */
1016
1017                {
1018                        DEFINE_WAIT(wait);
1019
1020                        prepare_to_wait(&gDMA.freeChannelQ, &wait,
1021                                        TASK_INTERRUPTIBLE);
1022                        up(&gDMA.lock);
1023                        schedule();
1024                        finish_wait(&gDMA.freeChannelQ, &wait);
1025
1026                        if (signal_pending(current)) {
1027                                /* We don't currently hold gDMA.lock, so we return directly */
1028
1029                                return -ERESTARTSYS;
1030                        }
1031                }
1032
1033                if (down_interruptible(&gDMA.lock)) {
1034                        return -ERESTARTSYS;
1035                }
1036        }
1037
1038out:
1039        up(&gDMA.lock);
1040
1041        return handle;
1042}
1043
1044/* Create both _dbg and non _dbg functions for modules. */
1045
1046#if (DMA_DEBUG_TRACK_RESERVATION)
1047#undef dma_request_channel
1048DMA_Handle_t dma_request_channel(DMA_Device_t dev)
1049{
1050        return dma_request_channel_dbg(dev, __FILE__, __LINE__);
1051}
1052
1053EXPORT_SYMBOL(dma_request_channel_dbg);
1054#endif
1055EXPORT_SYMBOL(dma_request_channel);
1056
1057/****************************************************************************/
1058/**
1059*   Frees a previously allocated DMA Handle.
1060*/
1061/****************************************************************************/
1062
1063int dma_free_channel(DMA_Handle_t handle        /* DMA handle. */
1064    ) {
1065        int rc = 0;
1066        DMA_Channel_t *channel;
1067        DMA_DeviceAttribute_t *devAttr;
1068
1069        if (down_interruptible(&gDMA.lock) < 0) {
1070                return -ERESTARTSYS;
1071        }
1072
1073        channel = HandleToChannel(handle);
1074        if (channel == NULL) {
1075                rc = -EINVAL;
1076                goto out;
1077        }
1078
1079        devAttr = &DMA_gDeviceAttribute[channel->devType];
1080
1081        if ((channel->flags & DMA_CHANNEL_FLAG_IS_DEDICATED) == 0) {
1082                channel->lastDevType = channel->devType;
1083                channel->devType = DMA_DEVICE_NONE;
1084        }
1085        channel->flags &= ~DMA_CHANNEL_FLAG_IN_USE;
1086        devAttr->flags &= ~DMA_DEVICE_FLAG_IN_USE;
1087
1088out:
1089        up(&gDMA.lock);
1090
1091        wake_up_interruptible(&gDMA.freeChannelQ);
1092
1093        return rc;
1094}
1095
1096EXPORT_SYMBOL(dma_free_channel);
1097
1098/****************************************************************************/
1099/**
1100*   Determines if a given device has been configured as using a shared
1101*   channel.
1102*
1103*   @return
1104*       0           Device uses a dedicated channel
1105*       > zero      Device uses a shared channel
1106*       < zero      Error code
1107*/
1108/****************************************************************************/
1109
1110int dma_device_is_channel_shared(DMA_Device_t device    /* Device to check. */
1111    ) {
1112        DMA_DeviceAttribute_t *devAttr;
1113
1114        if (!IsDeviceValid(device)) {
1115                return -ENODEV;
1116        }
1117        devAttr = &DMA_gDeviceAttribute[device];
1118
1119        return ((devAttr->flags & DMA_DEVICE_FLAG_IS_DEDICATED) == 0);
1120}
1121
1122EXPORT_SYMBOL(dma_device_is_channel_shared);
1123
1124/****************************************************************************/
1125/**
1126*   Allocates buffers for the descriptors. This is normally done automatically
1127*   but needs to be done explicitly when initiating a dma from interrupt
1128*   context.
1129*
1130*   @return
1131*       0       Descriptors were allocated successfully
1132*       -EINVAL Invalid device type for this kind of transfer
1133*               (i.e. the device is _MEM_TO_DEV and not _DEV_TO_MEM)
1134*       -ENOMEM Memory exhausted
1135*/
1136/****************************************************************************/
1137
1138int dma_alloc_descriptors(DMA_Handle_t handle,  /* DMA Handle */
1139                          dmacHw_TRANSFER_TYPE_e transferType,  /* Type of transfer being performed */
1140                          dma_addr_t srcData,   /* Place to get data to write to device */
1141                          dma_addr_t dstData,   /* Pointer to device data address */
1142                          size_t numBytes       /* Number of bytes to transfer to the device */
1143    ) {
1144        DMA_Channel_t *channel;
1145        DMA_DeviceAttribute_t *devAttr;
1146        int numDescriptors;
1147        size_t ringBytesRequired;
1148        int rc = 0;
1149
1150        channel = HandleToChannel(handle);
1151        if (channel == NULL) {
1152                return -ENODEV;
1153        }
1154
1155        devAttr = &DMA_gDeviceAttribute[channel->devType];
1156
1157        if (devAttr->config.transferType != transferType) {
1158                return -EINVAL;
1159        }
1160
1161        /* Figure out how many descriptors we need. */
1162
1163        /* printk("srcData: 0x%08x dstData: 0x%08x, numBytes: %d\n", */
1164        /*        srcData, dstData, numBytes); */
1165
1166        numDescriptors = dmacHw_calculateDescriptorCount(&devAttr->config,
1167                                                              (void *)srcData,
1168                                                              (void *)dstData,
1169                                                              numBytes);
1170        if (numDescriptors < 0) {
1171                printk(KERN_ERR "%s: dmacHw_calculateDescriptorCount failed\n",
1172                       __func__);
1173                return -EINVAL;
1174        }
1175
1176        /* Check to see if we can reuse the existing descriptor ring, or if we need to allocate */
1177        /* a new one. */
1178
1179        ringBytesRequired = dmacHw_descriptorLen(numDescriptors);
1180
1181        /* printk("ringBytesRequired: %d\n", ringBytesRequired); */
1182
1183        if (ringBytesRequired > devAttr->ring.bytesAllocated) {
1184                /* Make sure that this code path is never taken from interrupt context. */
1185                /* It's OK for an interrupt to initiate a DMA transfer, but the descriptor */
1186                /* allocation needs to have already been done. */
1187
1188                might_sleep();
1189
1190                /* Free the old descriptor ring and allocate a new one. */
1191
1192                dma_free_descriptor_ring(&devAttr->ring);
1193
1194                /* And allocate a new one. */
1195
1196                rc =
1197                     dma_alloc_descriptor_ring(&devAttr->ring,
1198                                               numDescriptors);
1199                if (rc < 0) {
1200                        printk(KERN_ERR
1201                               "%s: dma_alloc_descriptor_ring(%d) failed\n",
1202                               __func__, numDescriptors);
1203                        return rc;
1204                }
1205                /* Setup the descriptor for this transfer */
1206
1207                if (dmacHw_initDescriptor(devAttr->ring.virtAddr,
1208                                          devAttr->ring.physAddr,
1209                                          devAttr->ring.bytesAllocated,
1210                                          numDescriptors) < 0) {
1211                        printk(KERN_ERR "%s: dmacHw_initDescriptor failed\n",
1212                               __func__);
1213                        return -EINVAL;
1214                }
1215        } else {
1216                /* We've already got enough ring buffer allocated. All we need to do is reset */
1217                /* any control information, just in case the previous DMA was stopped. */
1218
1219                dmacHw_resetDescriptorControl(devAttr->ring.virtAddr);
1220        }
1221
1222        /* dma_alloc/free both set the prevSrc/DstData to 0. If they happen to be the same */
1223        /* as last time, then we don't need to call setDataDescriptor again. */
1224
1225        if (dmacHw_setDataDescriptor(&devAttr->config,
1226                                     devAttr->ring.virtAddr,
1227                                     (void *)srcData,
1228                                     (void *)dstData, numBytes) < 0) {
1229                printk(KERN_ERR "%s: dmacHw_setDataDescriptor failed\n",
1230                       __func__);
1231                return -EINVAL;
1232        }
1233
1234        /* Remember the critical information for this transfer so that we can eliminate */
1235        /* another call to dma_alloc_descriptors if the caller reuses the same buffers */
1236
1237        devAttr->prevSrcData = srcData;
1238        devAttr->prevDstData = dstData;
1239        devAttr->prevNumBytes = numBytes;
1240
1241        return 0;
1242}
1243
1244EXPORT_SYMBOL(dma_alloc_descriptors);
1245
1246/****************************************************************************/
1247/**
1248*   Allocates and sets up descriptors for a double buffered circular buffer.
1249*
1250*   This is primarily intended to be used for things like the ingress samples
1251*   from a microphone.
1252*
1253*   @return
1254*       > 0     Number of descriptors actually allocated.
1255*       -EINVAL Invalid device type for this kind of transfer
1256*               (i.e. the device is _MEM_TO_DEV and not _DEV_TO_MEM)
1257*       -ENOMEM Memory exhausted
1258*/
1259/****************************************************************************/
1260
1261int dma_alloc_double_dst_descriptors(DMA_Handle_t handle,       /* DMA Handle */
1262                                     dma_addr_t srcData,        /* Physical address of source data */
1263                                     dma_addr_t dstData1,       /* Physical address of first destination buffer */
1264                                     dma_addr_t dstData2,       /* Physical address of second destination buffer */
1265                                     size_t numBytes    /* Number of bytes in each destination buffer */
1266    ) {
1267        DMA_Channel_t *channel;
1268        DMA_DeviceAttribute_t *devAttr;
1269        int numDst1Descriptors;
1270        int numDst2Descriptors;
1271        int numDescriptors;
1272        size_t ringBytesRequired;
1273        int rc = 0;
1274
1275        channel = HandleToChannel(handle);
1276        if (channel == NULL) {
1277                return -ENODEV;
1278        }
1279
1280        devAttr = &DMA_gDeviceAttribute[channel->devType];
1281
1282        /* Figure out how many descriptors we need. */
1283
1284        /* printk("srcData: 0x%08x dstData: 0x%08x, numBytes: %d\n", */
1285        /*        srcData, dstData, numBytes); */
1286
1287        numDst1Descriptors =
1288             dmacHw_calculateDescriptorCount(&devAttr->config, (void *)srcData,
1289                                             (void *)dstData1, numBytes);
1290        if (numDst1Descriptors < 0) {
1291                return -EINVAL;
1292        }
1293        numDst2Descriptors =
1294             dmacHw_calculateDescriptorCount(&devAttr->config, (void *)srcData,
1295                                             (void *)dstData2, numBytes);
1296        if (numDst2Descriptors < 0) {
1297                return -EINVAL;
1298        }
1299        numDescriptors = numDst1Descriptors + numDst2Descriptors;
1300        /* printk("numDescriptors: %d\n", numDescriptors); */
1301
1302        /* Check to see if we can reuse the existing descriptor ring, or if we need to allocate */
1303        /* a new one. */
1304
1305        ringBytesRequired = dmacHw_descriptorLen(numDescriptors);
1306
1307        /* printk("ringBytesRequired: %d\n", ringBytesRequired); */
1308
1309        if (ringBytesRequired > devAttr->ring.bytesAllocated) {
1310                /* Make sure that this code path is never taken from interrupt context. */
1311                /* It's OK for an interrupt to initiate a DMA transfer, but the descriptor */
1312                /* allocation needs to have already been done. */
1313
1314                might_sleep();
1315
1316                /* Free the old descriptor ring and allocate a new one. */
1317
1318                dma_free_descriptor_ring(&devAttr->ring);
1319
1320                /* And allocate a new one. */
1321
1322                rc =
1323                     dma_alloc_descriptor_ring(&devAttr->ring,
1324                                               numDescriptors);
1325                if (rc < 0) {
1326                        printk(KERN_ERR
1327                               "%s: dma_alloc_descriptor_ring(%d) failed\n",
1328                               __func__, ringBytesRequired);
1329                        return rc;
1330                }
1331        }
1332
1333        /* Setup the descriptor for this transfer. Since this function is used with */
1334        /* CONTINUOUS DMA operations, we need to reinitialize every time, otherwise */
1335        /* setDataDescriptor will keep trying to append onto the end. */
1336
1337        if (dmacHw_initDescriptor(devAttr->ring.virtAddr,
1338                                  devAttr->ring.physAddr,
1339                                  devAttr->ring.bytesAllocated,
1340                                  numDescriptors) < 0) {
1341                printk(KERN_ERR "%s: dmacHw_initDescriptor failed\n", __func__);
1342                return -EINVAL;
1343        }
1344
1345        /* dma_alloc/free both set the prevSrc/DstData to 0. If they happen to be the same */
1346        /* as last time, then we don't need to call setDataDescriptor again. */
1347
1348        if (dmacHw_setDataDescriptor(&devAttr->config,
1349                                     devAttr->ring.virtAddr,
1350                                     (void *)srcData,
1351                                     (void *)dstData1, numBytes) < 0) {
1352                printk(KERN_ERR "%s: dmacHw_setDataDescriptor 1 failed\n",
1353                       __func__);
1354                return -EINVAL;
1355        }
1356        if (dmacHw_setDataDescriptor(&devAttr->config,
1357                                     devAttr->ring.virtAddr,
1358                                     (void *)srcData,
1359                                     (void *)dstData2, numBytes) < 0) {
1360                printk(KERN_ERR "%s: dmacHw_setDataDescriptor 2 failed\n",
1361                       __func__);
1362                return -EINVAL;
1363        }
1364
1365        /* You should use dma_start_transfer rather than dma_transfer_xxx so we don't */
1366        /* try to make the 'prev' variables right. */
1367
1368        devAttr->prevSrcData = 0;
1369        devAttr->prevDstData = 0;
1370        devAttr->prevNumBytes = 0;
1371
1372        return numDescriptors;
1373}
1374
1375EXPORT_SYMBOL(dma_alloc_double_dst_descriptors);
1376
1377/****************************************************************************/
1378/**
1379*   Initiates a transfer when the descriptors have already been setup.
1380*
1381*   This is a special case, and normally, the dma_transfer_xxx functions should
1382*   be used.
1383*
1384*   @return
1385*       0       Transfer was started successfully
1386*       -ENODEV Invalid handle
1387*/
1388/****************************************************************************/
1389
1390int dma_start_transfer(DMA_Handle_t handle)
1391{
1392        DMA_Channel_t *channel;
1393        DMA_DeviceAttribute_t *devAttr;
1394
1395        channel = HandleToChannel(handle);
1396        if (channel == NULL) {
1397                return -ENODEV;
1398        }
1399        devAttr = &DMA_gDeviceAttribute[channel->devType];
1400
1401        dmacHw_initiateTransfer(channel->dmacHwHandle, &devAttr->config,
1402                                devAttr->ring.virtAddr);
1403
1404        /* Since we got this far, everything went successfully */
1405
1406        return 0;
1407}
1408
1409EXPORT_SYMBOL(dma_start_transfer);
1410
1411/****************************************************************************/
1412/**
1413*   Stops a previously started DMA transfer.
1414*
1415*   @return
1416*       0       Transfer was stopped successfully
1417*       -ENODEV Invalid handle
1418*/
1419/****************************************************************************/
1420
1421int dma_stop_transfer(DMA_Handle_t handle)
1422{
1423        DMA_Channel_t *channel;
1424
1425        channel = HandleToChannel(handle);
1426        if (channel == NULL) {
1427                return -ENODEV;
1428        }
1429
1430        dmacHw_stopTransfer(channel->dmacHwHandle);
1431
1432        return 0;
1433}
1434
1435EXPORT_SYMBOL(dma_stop_transfer);
1436
1437/****************************************************************************/
1438/**
1439*   Waits for a DMA to complete by polling. This function is only intended
1440*   to be used for testing. Interrupts should be used for most DMA operations.
1441*/
1442/****************************************************************************/
1443
1444int dma_wait_transfer_done(DMA_Handle_t handle)
1445{
1446        DMA_Channel_t *channel;
1447        dmacHw_TRANSFER_STATUS_e status;
1448
1449        channel = HandleToChannel(handle);
1450        if (channel == NULL) {
1451                return -ENODEV;
1452        }
1453
1454        while ((status =
1455                dmacHw_transferCompleted(channel->dmacHwHandle)) ==
1456               dmacHw_TRANSFER_STATUS_BUSY) {
1457                ;
1458        }
1459
1460        if (status == dmacHw_TRANSFER_STATUS_ERROR) {
1461                printk(KERN_ERR "%s: DMA transfer failed\n", __func__);
1462                return -EIO;
1463        }
1464        return 0;
1465}
1466
1467EXPORT_SYMBOL(dma_wait_transfer_done);
1468
1469/****************************************************************************/
1470/**
1471*   Initiates a DMA, allocating the descriptors as required.
1472*
1473*   @return
1474*       0       Transfer was started successfully
1475*       -EINVAL Invalid device type for this kind of transfer
1476*               (i.e. the device is _DEV_TO_MEM and not _MEM_TO_DEV)
1477*/
1478/****************************************************************************/
1479
1480int dma_transfer(DMA_Handle_t handle,   /* DMA Handle */
1481                 dmacHw_TRANSFER_TYPE_e transferType,   /* Type of transfer being performed */
1482                 dma_addr_t srcData,    /* Place to get data to write to device */
1483                 dma_addr_t dstData,    /* Pointer to device data address */
1484                 size_t numBytes        /* Number of bytes to transfer to the device */
1485    ) {
1486        DMA_Channel_t *channel;
1487        DMA_DeviceAttribute_t *devAttr;
1488        int rc = 0;
1489
1490        channel = HandleToChannel(handle);
1491        if (channel == NULL) {
1492                return -ENODEV;
1493        }
1494
1495        devAttr = &DMA_gDeviceAttribute[channel->devType];
1496
1497        if (devAttr->config.transferType != transferType) {
1498                return -EINVAL;
1499        }
1500
1501        /* We keep track of the information about the previous request for this */
1502        /* device, and if the attributes match, then we can use the descriptors we setup */
1503        /* the last time, and not have to reinitialize everything. */
1504
1505        {
1506                rc =
1507                     dma_alloc_descriptors(handle, transferType, srcData,
1508                                           dstData, numBytes);
1509                if (rc != 0) {
1510                        return rc;
1511                }
1512        }
1513
1514        /* And kick off the transfer */
1515
1516        devAttr->numBytes = numBytes;
1517        devAttr->transferStartTime = timer_get_tick_count();
1518
1519        dmacHw_initiateTransfer(channel->dmacHwHandle, &devAttr->config,
1520                                devAttr->ring.virtAddr);
1521
1522        /* Since we got this far, everything went successfully */
1523
1524        return 0;
1525}
1526
1527EXPORT_SYMBOL(dma_transfer);
1528
1529/****************************************************************************/
1530/**
1531*   Set the callback function which will be called when a transfer completes.
1532*   If a NULL callback function is set, then no callback will occur.
1533*
1534*   @note   @a devHandler will be called from IRQ context.
1535*
1536*   @return
1537*       0       - Success
1538*       -ENODEV - Device handed in is invalid.
1539*/
1540/****************************************************************************/
1541
1542int dma_set_device_handler(DMA_Device_t dev,    /* Device to set the callback for. */
1543                           DMA_DeviceHandler_t devHandler,      /* Function to call when the DMA completes */
1544                           void *userData       /* Pointer which will be passed to devHandler. */
1545    ) {
1546        DMA_DeviceAttribute_t *devAttr;
1547        unsigned long flags;
1548
1549        if (!IsDeviceValid(dev)) {
1550                return -ENODEV;
1551        }
1552        devAttr = &DMA_gDeviceAttribute[dev];
1553
1554        local_irq_save(flags);
1555
1556        devAttr->userData = userData;
1557        devAttr->devHandler = devHandler;
1558
1559        local_irq_restore(flags);
1560
1561        return 0;
1562}
1563
1564EXPORT_SYMBOL(dma_set_device_handler);
1565
1566/****************************************************************************/
1567/**
1568*   Initializes a memory mapping structure
1569*/
1570/****************************************************************************/
1571
1572int dma_init_mem_map(DMA_MemMap_t *memMap)
1573{
1574        memset(memMap, 0, sizeof(*memMap));
1575
1576        init_MUTEX(&memMap->lock);
1577
1578        return 0;
1579}
1580
1581EXPORT_SYMBOL(dma_init_mem_map);
1582
1583/****************************************************************************/
1584/**
1585*   Releases any memory currently being held by a memory mapping structure.
1586*/
1587/****************************************************************************/
1588
1589int dma_term_mem_map(DMA_MemMap_t *memMap)
1590{
1591        down(&memMap->lock);    /* Just being paranoid */
1592
1593        /* Free up any allocated memory */
1594
1595        up(&memMap->lock);
1596        memset(memMap, 0, sizeof(*memMap));
1597
1598        return 0;
1599}
1600
1601EXPORT_SYMBOL(dma_term_mem_map);
1602
1603/****************************************************************************/
1604/**
1605*   Looks at a memory address and categorizes it.
1606*
1607*   @return One of the values from the DMA_MemType_t enumeration.
1608*/
1609/****************************************************************************/
1610
1611DMA_MemType_t dma_mem_type(void *addr)
1612{
1613        unsigned long addrVal = (unsigned long)addr;
1614
1615        if (addrVal >= VMALLOC_END) {
1616                /* NOTE: DMA virtual memory space starts at 0xFFxxxxxx */
1617
1618                /* dma_alloc_xxx pages are physically and virtually contiguous */
1619
1620                return DMA_MEM_TYPE_DMA;
1621        }
1622
1623        /* Technically, we could add one more classification. Addresses between VMALLOC_END */
1624        /* and the beginning of the DMA virtual address could be considered to be I/O space. */
1625        /* Right now, nobody cares about this particular classification, so we ignore it. */
1626
1627        if (is_vmalloc_addr(addr)) {
1628                /* Address comes from the vmalloc'd region. Pages are virtually */
1629                /* contiguous but NOT physically contiguous */
1630
1631                return DMA_MEM_TYPE_VMALLOC;
1632        }
1633
1634        if (addrVal >= PAGE_OFFSET) {
1635                /* PAGE_OFFSET is typically 0xC0000000 */
1636
1637                /* kmalloc'd pages are physically contiguous */
1638
1639                return DMA_MEM_TYPE_KMALLOC;
1640        }
1641
1642        return DMA_MEM_TYPE_USER;
1643}
1644
1645EXPORT_SYMBOL(dma_mem_type);
1646
1647/****************************************************************************/
1648/**
1649*   Looks at a memory address and determines if we support DMA'ing to/from
1650*   that type of memory.
1651*
1652*   @return boolean -
1653*               return value != 0 means dma supported
1654*               return value == 0 means dma not supported
1655*/
1656/****************************************************************************/
1657
1658int dma_mem_supports_dma(void *addr)
1659{
1660        DMA_MemType_t memType = dma_mem_type(addr);
1661
1662        return (memType == DMA_MEM_TYPE_DMA)
1663#if ALLOW_MAP_OF_KMALLOC_MEMORY
1664            || (memType == DMA_MEM_TYPE_KMALLOC)
1665#endif
1666            || (memType == DMA_MEM_TYPE_USER);
1667}
1668
1669EXPORT_SYMBOL(dma_mem_supports_dma);
1670
1671/****************************************************************************/
1672/**
1673*   Maps in a memory region such that it can be used for performing a DMA.
1674*
1675*   @return
1676*/
1677/****************************************************************************/
1678
1679int dma_map_start(DMA_MemMap_t *memMap, /* Stores state information about the map */
1680                  enum dma_data_direction dir   /* Direction that the mapping will be going */
1681    ) {
1682        int rc;
1683
1684        down(&memMap->lock);
1685
1686        DMA_MAP_PRINT("memMap: %p\n", memMap);
1687
1688        if (memMap->inUse) {
1689                printk(KERN_ERR "%s: memory map %p is already being used\n",
1690                       __func__, memMap);
1691                rc = -EBUSY;
1692                goto out;
1693        }
1694
1695        memMap->inUse = 1;
1696        memMap->dir = dir;
1697        memMap->numRegionsUsed = 0;
1698
1699        rc = 0;
1700
1701out:
1702
1703        DMA_MAP_PRINT("returning %d", rc);
1704
1705        up(&memMap->lock);
1706
1707        return rc;
1708}
1709
1710EXPORT_SYMBOL(dma_map_start);
1711
1712/****************************************************************************/
1713/**
1714*   Adds a segment of memory to a memory map. Each segment is both
1715*   physically and virtually contiguous.
1716*
1717*   @return     0 on success, error code otherwise.
1718*/
1719/****************************************************************************/
1720
1721static int dma_map_add_segment(DMA_MemMap_t *memMap,    /* Stores state information about the map */
1722                               DMA_Region_t *region,    /* Region that the segment belongs to */
1723                               void *virtAddr,  /* Virtual address of the segment being added */
1724                               dma_addr_t physAddr,     /* Physical address of the segment being added */
1725                               size_t numBytes  /* Number of bytes of the segment being added */
1726    ) {
1727        DMA_Segment_t *segment;
1728
1729        DMA_MAP_PRINT("memMap:%p va:%p pa:0x%x #:%d\n", memMap, virtAddr,
1730                      physAddr, numBytes);
1731
1732        /* Sanity check */
1733
1734        if (((unsigned long)virtAddr < (unsigned long)region->virtAddr)
1735            || (((unsigned long)virtAddr + numBytes)) >
1736            ((unsigned long)region->virtAddr + region->numBytes)) {
1737                printk(KERN_ERR
1738                       "%s: virtAddr %p is outside region @ %p len: %d\n",
1739                       __func__, virtAddr, region->virtAddr, region->numBytes);
1740                return -EINVAL;
1741        }
1742
1743        if (region->numSegmentsUsed > 0) {
1744                /* Check to see if this segment is physically contiguous with the previous one */
1745
1746                segment = &region->segment[region->numSegmentsUsed - 1];
1747
1748                if ((segment->physAddr + segment->numBytes) == physAddr) {
1749                        /* It is - just add on to the end */
1750
1751                        DMA_MAP_PRINT("appending %d bytes to last segment\n",
1752                                      numBytes);
1753
1754                        segment->numBytes += numBytes;
1755
1756                        return 0;
1757                }
1758        }
1759
1760        /* Reallocate to hold more segments, if required. */
1761
1762        if (region->numSegmentsUsed >= region->numSegmentsAllocated) {
1763                DMA_Segment_t *newSegment;
1764                size_t oldSize =
1765                    region->numSegmentsAllocated * sizeof(*newSegment);
1766                int newAlloc = region->numSegmentsAllocated + 4;
1767                size_t newSize = newAlloc * sizeof(*newSegment);
1768
1769                newSegment = kmalloc(newSize, GFP_KERNEL);
1770                if (newSegment == NULL) {
1771                        return -ENOMEM;
1772                }
1773                memcpy(newSegment, region->segment, oldSize);
1774                memset(&((uint8_t *) newSegment)[oldSize], 0,
1775                       newSize - oldSize);
1776                kfree(region->segment);
1777
1778                region->numSegmentsAllocated = newAlloc;
1779                region->segment = newSegment;
1780        }
1781
1782        segment = &region->segment[region->numSegmentsUsed];
1783        region->numSegmentsUsed++;
1784
1785        segment->virtAddr = virtAddr;
1786        segment->physAddr = physAddr;
1787        segment->numBytes = numBytes;
1788
1789        DMA_MAP_PRINT("returning success\n");
1790
1791        return 0;
1792}
1793
1794/****************************************************************************/
1795/**
1796*   Adds a region of memory to a memory map. Each region is virtually
1797*   contiguous, but not necessarily physically contiguous.
1798*
1799*   @return     0 on success, error code otherwise.
1800*/
1801/****************************************************************************/
1802
1803int dma_map_add_region(DMA_MemMap_t *memMap,    /* Stores state information about the map */
1804                       void *mem,       /* Virtual address that we want to get a map of */
1805                       size_t numBytes  /* Number of bytes being mapped */
1806    ) {
1807        unsigned long addr = (unsigned long)mem;
1808        unsigned int offset;
1809        int rc = 0;
1810        DMA_Region_t *region;
1811        dma_addr_t physAddr;
1812
1813        down(&memMap->lock);
1814
1815        DMA_MAP_PRINT("memMap:%p va:%p #:%d\n", memMap, mem, numBytes);
1816
1817        if (!memMap->inUse) {
1818                printk(KERN_ERR "%s: Make sure you call dma_map_start first\n",
1819                       __func__);
1820                rc = -EINVAL;
1821                goto out;
1822        }
1823
1824        /* Reallocate to hold more regions. */
1825
1826        if (memMap->numRegionsUsed >= memMap->numRegionsAllocated) {
1827                DMA_Region_t *newRegion;
1828                size_t oldSize =
1829                    memMap->numRegionsAllocated * sizeof(*newRegion);
1830                int newAlloc = memMap->numRegionsAllocated + 4;
1831                size_t newSize = newAlloc * sizeof(*newRegion);
1832
1833                newRegion = kmalloc(newSize, GFP_KERNEL);
1834                if (newRegion == NULL) {
1835                        rc = -ENOMEM;
1836                        goto out;
1837                }
1838                memcpy(newRegion, memMap->region, oldSize);
1839                memset(&((uint8_t *) newRegion)[oldSize], 0, newSize - oldSize);
1840
1841                kfree(memMap->region);
1842
1843                memMap->numRegionsAllocated = newAlloc;
1844                memMap->region = newRegion;
1845        }
1846
1847        region = &memMap->region[memMap->numRegionsUsed];
1848        memMap->numRegionsUsed++;
1849
1850        offset = addr & ~PAGE_MASK;
1851
1852        region->memType = dma_mem_type(mem);
1853        region->virtAddr = mem;
1854        region->numBytes = numBytes;
1855        region->numSegmentsUsed = 0;
1856        region->numLockedPages = 0;
1857        region->lockedPages = NULL;
1858
1859        switch (region->memType) {
1860        case DMA_MEM_TYPE_VMALLOC:
1861                {
1862                        atomic_inc(&gDmaStatMemTypeVmalloc);
1863
1864                        /* printk(KERN_ERR "%s: vmalloc'd pages are not supported\n", __func__); */
1865
1866                        /* vmalloc'd pages are not physically contiguous */
1867
1868                        rc = -EINVAL;
1869                        break;
1870                }
1871
1872        case DMA_MEM_TYPE_KMALLOC:
1873                {
1874                        atomic_inc(&gDmaStatMemTypeKmalloc);
1875
1876                        /* kmalloc'd pages are physically contiguous, so they'll have exactly */
1877                        /* one segment */
1878
1879#if ALLOW_MAP_OF_KMALLOC_MEMORY
1880                        physAddr =
1881                            dma_map_single(NULL, mem, numBytes, memMap->dir);
1882                        rc = dma_map_add_segment(memMap, region, mem, physAddr,
1883                                                 numBytes);
1884#else
1885                        rc = -EINVAL;
1886#endif
1887                        break;
1888                }
1889
1890        case DMA_MEM_TYPE_DMA:
1891                {
1892                        /* dma_alloc_xxx pages are physically contiguous */
1893
1894                        atomic_inc(&gDmaStatMemTypeCoherent);
1895
1896                        physAddr = (vmalloc_to_pfn(mem) << PAGE_SHIFT) + offset;
1897
1898                        dma_sync_single_for_cpu(NULL, physAddr, numBytes,
1899                                                memMap->dir);
1900                        rc = dma_map_add_segment(memMap, region, mem, physAddr,
1901                                                 numBytes);
1902                        break;
1903                }
1904
1905        case DMA_MEM_TYPE_USER:
1906                {
1907                        size_t firstPageOffset;
1908                        size_t firstPageSize;
1909                        struct page **pages;
1910                        struct task_struct *userTask;
1911
1912                        atomic_inc(&gDmaStatMemTypeUser);
1913
1914#if 1
1915                        /* If the pages are user pages, then the dma_mem_map_set_user_task function */
1916                        /* must have been previously called. */
1917
1918                        if (memMap->userTask == NULL) {
1919                                printk(KERN_ERR
1920                                       "%s: must call dma_mem_map_set_user_task when using user-mode memory\n",
1921                                       __func__);
1922                                return -EINVAL;
1923                        }
1924
1925                        /* User pages need to be locked. */
1926
1927                        firstPageOffset =
1928                            (unsigned long)region->virtAddr & (PAGE_SIZE - 1);
1929                        firstPageSize = PAGE_SIZE - firstPageOffset;
1930
1931                        region->numLockedPages = (firstPageOffset
1932                                                  + region->numBytes +
1933                                                  PAGE_SIZE - 1) / PAGE_SIZE;
1934                        pages =
1935                            kmalloc(region->numLockedPages *
1936                                    sizeof(struct page *), GFP_KERNEL);
1937
1938                        if (pages == NULL) {
1939                                region->numLockedPages = 0;
1940                                return -ENOMEM;
1941                        }
1942
1943                        userTask = memMap->userTask;
1944
1945                        down_read(&userTask->mm->mmap_sem);
1946                        rc = get_user_pages(userTask,   /* task */
1947                                            userTask->mm,       /* mm */
1948                                            (unsigned long)region->virtAddr,    /* start */
1949                                            region->numLockedPages,     /* len */
1950                                            memMap->dir == DMA_FROM_DEVICE,     /* write */
1951                                            0,  /* force */
1952                                            pages,      /* pages (array of pointers to page) */
1953                                            NULL);      /* vmas */
1954                        up_read(&userTask->mm->mmap_sem);
1955
1956                        if (rc != region->numLockedPages) {
1957                                kfree(pages);
1958                                region->numLockedPages = 0;
1959
1960                                if (rc >= 0) {
1961                                        rc = -EINVAL;
1962                                }
1963                        } else {
1964                                uint8_t *virtAddr = region->virtAddr;
1965                                size_t bytesRemaining;
1966                                int pageIdx;
1967
1968                                rc = 0; /* Since get_user_pages returns +ve number */
1969
1970                                region->lockedPages = pages;
1971
1972                                /* We've locked the user pages. Now we need to walk them and figure */
1973                                /* out the physical addresses. */
1974
1975                                /* The first page may be partial */
1976
1977                                dma_map_add_segment(memMap,
1978                                                    region,
1979                                                    virtAddr,
1980                                                    PFN_PHYS(page_to_pfn
1981                                                             (pages[0])) +
1982                                                    firstPageOffset,
1983                                                    firstPageSize);
1984
1985                                virtAddr += firstPageSize;
1986                                bytesRemaining =
1987                                    region->numBytes - firstPageSize;
1988
1989                                for (pageIdx = 1;
1990                                     pageIdx < region->numLockedPages;
1991                                     pageIdx++) {
1992                                        size_t bytesThisPage =
1993                                            (bytesRemaining >
1994                                             PAGE_SIZE ? PAGE_SIZE :
1995                                             bytesRemaining);
1996
1997                                        DMA_MAP_PRINT
1998                                            ("pageIdx:%d pages[pageIdx]=%p pfn=%u phys=%u\n",
1999                                             pageIdx, pages[pageIdx],
2000                                             page_to_pfn(pages[pageIdx]),
2001                                             PFN_PHYS(page_to_pfn
2002                                                      (pages[pageIdx])));
2003
2004                                        dma_map_add_segment(memMap,
2005                                                            region,
2006                                                            virtAddr,
2007                                                            PFN_PHYS(page_to_pfn
2008                                                                     (pages
2009                                                                      [pageIdx])),
2010                                                            bytesThisPage);
2011
2012                                        virtAddr += bytesThisPage;
2013                                        bytesRemaining -= bytesThisPage;
2014                                }
2015                        }
2016#else
2017                        printk(KERN_ERR
2018                               "%s: User mode pages are not yet supported\n",
2019                               __func__);
2020
2021                        /* user pages are not physically contiguous */
2022
2023                        rc = -EINVAL;
2024#endif
2025                        break;
2026                }
2027
2028        default:
2029                {
2030                        printk(KERN_ERR "%s: Unsupported memory type: %d\n",
2031                               __func__, region->memType);
2032
2033                        rc = -EINVAL;
2034                        break;
2035                }
2036        }
2037
2038        if (rc != 0) {
2039                memMap->numRegionsUsed--;
2040        }
2041
2042out:
2043
2044        DMA_MAP_PRINT("returning %d\n", rc);
2045
2046        up(&memMap->lock);
2047
2048        return rc;
2049}
2050
2051EXPORT_SYMBOL(dma_map_add_segment);
2052
2053/****************************************************************************/
2054/**
2055*   Maps in a memory region such that it can be used for performing a DMA.
2056*
2057*   @return     0 on success, error code otherwise.
2058*/
2059/****************************************************************************/
2060
2061int dma_map_mem(DMA_MemMap_t *memMap,   /* Stores state information about the map */
2062                void *mem,      /* Virtual address that we want to get a map of */
2063                size_t numBytes,        /* Number of bytes being mapped */
2064                enum dma_data_direction dir     /* Direction that the mapping will be going */
2065    ) {
2066        int rc;
2067
2068        rc = dma_map_start(memMap, dir);
2069        if (rc == 0) {
2070                rc = dma_map_add_region(memMap, mem, numBytes);
2071                if (rc < 0) {
2072                        /* Since the add fails, this function will fail, and the caller won't */
2073                        /* call unmap, so we need to do it here. */
2074
2075                        dma_unmap(memMap, 0);
2076                }
2077        }
2078
2079        return rc;
2080}
2081
2082EXPORT_SYMBOL(dma_map_mem);
2083
2084/****************************************************************************/
2085/**
2086*   Setup a descriptor ring for a given memory map.
2087*
2088*   It is assumed that the descriptor ring has already been initialized, and
2089*   this routine will only reallocate a new descriptor ring if the existing
2090*   one is too small.
2091*
2092*   @return     0 on success, error code otherwise.
2093*/
2094/****************************************************************************/
2095
2096int dma_map_create_descriptor_ring(DMA_Device_t dev,    /* DMA device (where the ring is stored) */
2097                                   DMA_MemMap_t *memMap,        /* Memory map that will be used */
2098                                   dma_addr_t devPhysAddr       /* Physical address of device */
2099    ) {
2100        int rc;
2101        int numDescriptors;
2102        DMA_DeviceAttribute_t *devAttr;
2103        DMA_Region_t *region;
2104        DMA_Segment_t *segment;
2105        dma_addr_t srcPhysAddr;
2106        dma_addr_t dstPhysAddr;
2107        int regionIdx;
2108        int segmentIdx;
2109
2110        devAttr = &DMA_gDeviceAttribute[dev];
2111
2112        down(&memMap->lock);
2113
2114        /* Figure out how many descriptors we need */
2115
2116        numDescriptors = 0;
2117        for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) {
2118                region = &memMap->region[regionIdx];
2119
2120                for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed;
2121                     segmentIdx++) {
2122                        segment = &region->segment[segmentIdx];
2123
2124                        if (memMap->dir == DMA_TO_DEVICE) {
2125                                srcPhysAddr = segment->physAddr;
2126                                dstPhysAddr = devPhysAddr;
2127                        } else {
2128                                srcPhysAddr = devPhysAddr;
2129                                dstPhysAddr = segment->physAddr;
2130                        }
2131
2132                        rc =
2133                             dma_calculate_descriptor_count(dev, srcPhysAddr,
2134                                                            dstPhysAddr,
2135                                                            segment->
2136                                                            numBytes);
2137                        if (rc < 0) {
2138                                printk(KERN_ERR
2139                                       "%s: dma_calculate_descriptor_count failed: %d\n",
2140                                       __func__, rc);
2141                                goto out;
2142                        }
2143                        numDescriptors += rc;
2144                }
2145        }
2146
2147        /* Adjust the size of the ring, if it isn't big enough */
2148
2149        if (numDescriptors > devAttr->ring.descriptorsAllocated) {
2150                dma_free_descriptor_ring(&devAttr->ring);
2151                rc =
2152                     dma_alloc_descriptor_ring(&devAttr->ring,
2153                                               numDescriptors);
2154                if (rc < 0) {
2155                        printk(KERN_ERR
2156                               "%s: dma_alloc_descriptor_ring failed: %d\n",
2157                               __func__, rc);
2158                        goto out;
2159                }
2160        } else {
2161                rc =
2162                     dma_init_descriptor_ring(&devAttr->ring,
2163                                              numDescriptors);
2164                if (rc < 0) {
2165                        printk(KERN_ERR
2166                               "%s: dma_init_descriptor_ring failed: %d\n",
2167                               __func__, rc);
2168                        goto out;
2169                }
2170        }
2171
2172        /* Populate the descriptors */
2173
2174        for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) {
2175                region = &memMap->region[regionIdx];
2176
2177                for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed;
2178                     segmentIdx++) {
2179                        segment = &region->segment[segmentIdx];
2180
2181                        if (memMap->dir == DMA_TO_DEVICE) {
2182                                srcPhysAddr = segment->physAddr;
2183                                dstPhysAddr = devPhysAddr;
2184                        } else {
2185                                srcPhysAddr = devPhysAddr;
2186                                dstPhysAddr = segment->physAddr;
2187                        }
2188
2189                        rc =
2190                             dma_add_descriptors(&devAttr->ring, dev,
2191                                                 srcPhysAddr, dstPhysAddr,
2192                                                 segment->numBytes);
2193                        if (rc < 0) {
2194                                printk(KERN_ERR
2195                                       "%s: dma_add_descriptors failed: %d\n",
2196                                       __func__, rc);
2197                                goto out;
2198                        }
2199                }
2200        }
2201
2202        rc = 0;
2203
2204out:
2205
2206        up(&memMap->lock);
2207        return rc;
2208}
2209
2210EXPORT_SYMBOL(dma_map_create_descriptor_ring);
2211
2212/****************************************************************************/
2213/**
2214*   Maps in a memory region such that it can be used for performing a DMA.
2215*
2216*   @return
2217*/
2218/****************************************************************************/
2219
2220int dma_unmap(DMA_MemMap_t *memMap,     /* Stores state information about the map */
2221              int dirtied       /* non-zero if any of the pages were modified */
2222    ) {
2223        int regionIdx;
2224        int segmentIdx;
2225        DMA_Region_t *region;
2226        DMA_Segment_t *segment;
2227
2228        for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) {
2229                region = &memMap->region[regionIdx];
2230
2231                for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed;
2232                     segmentIdx++) {
2233                        segment = &region->segment[segmentIdx];
2234
2235                        switch (region->memType) {
2236                        case DMA_MEM_TYPE_VMALLOC:
2237                                {
2238                                        printk(KERN_ERR
2239                                               "%s: vmalloc'd pages are not yet supported\n",
2240                                               __func__);
2241                                        return -EINVAL;
2242                                }
2243
2244                        case DMA_MEM_TYPE_KMALLOC:
2245                                {
2246#if ALLOW_MAP_OF_KMALLOC_MEMORY
2247                                        dma_unmap_single(NULL,
2248                                                         segment->physAddr,
2249                                                         segment->numBytes,
2250                                                         memMap->dir);
2251#endif
2252                                        break;
2253                                }
2254
2255                        case DMA_MEM_TYPE_DMA:
2256                                {
2257                                        dma_sync_single_for_cpu(NULL,
2258                                                                segment->
2259                                                                physAddr,
2260                                                                segment->
2261                                                                numBytes,
2262                                                                memMap->dir);
2263                                        break;
2264                                }
2265
2266                        case DMA_MEM_TYPE_USER:
2267                                {
2268                                        /* Nothing to do here. */
2269
2270                                        break;
2271                                }
2272
2273                        default:
2274                                {
2275                                        printk(KERN_ERR
2276                                               "%s: Unsupported memory type: %d\n",
2277                                               __func__, region->memType);
2278                                        return -EINVAL;
2279                                }
2280                        }
2281
2282                        segment->virtAddr = NULL;
2283                        segment->physAddr = 0;
2284                        segment->numBytes = 0;
2285                }
2286
2287                if (region->numLockedPages > 0) {
2288                        int pageIdx;
2289
2290                        /* Some user pages were locked. We need to go and unlock them now. */
2291
2292                        for (pageIdx = 0; pageIdx < region->numLockedPages;
2293                             pageIdx++) {
2294                                struct page *page =
2295                                    region->lockedPages[pageIdx];
2296
2297                                if (memMap->dir == DMA_FROM_DEVICE) {
2298                                        SetPageDirty(page);
2299                                }
2300                                page_cache_release(page);
2301                        }
2302                        kfree(region->lockedPages);
2303                        region->numLockedPages = 0;
2304                        region->lockedPages = NULL;
2305                }
2306
2307                region->memType = DMA_MEM_TYPE_NONE;
2308                region->virtAddr = NULL;
2309                region->numBytes = 0;
2310                region->numSegmentsUsed = 0;
2311        }
2312        memMap->userTask = NULL;
2313        memMap->numRegionsUsed = 0;
2314        memMap->inUse = 0;
2315
2316        up(&memMap->lock);
2317
2318        return 0;
2319}
2320
2321EXPORT_SYMBOL(dma_unmap);
2322