linux/arch/arm/plat-omap/dma.c
<<
>>
Prefs
   1/*
   2 * linux/arch/arm/plat-omap/dma.c
   3 *
   4 * Copyright (C) 2003 - 2008 Nokia Corporation
   5 * Author: Juha Yrjölä <juha.yrjola@nokia.com>
   6 * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
   7 * Graphics DMA and LCD DMA graphics tranformations
   8 * by Imre Deak <imre.deak@nokia.com>
   9 * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc.
  10 * Merged to support both OMAP1 and OMAP2 by Tony Lindgren <tony@atomide.com>
  11 * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc.
  12 *
  13 * Copyright (C) 2009 Texas Instruments
  14 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
  15 *
  16 * Support functions for the OMAP internal DMA channels.
  17 *
  18 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
  19 * Converted DMA library into DMA platform driver.
  20 *      - G, Manjunath Kondaiah <manjugk@ti.com>
  21 *
  22 * This program is free software; you can redistribute it and/or modify
  23 * it under the terms of the GNU General Public License version 2 as
  24 * published by the Free Software Foundation.
  25 *
  26 */
  27
  28#include <linux/module.h>
  29#include <linux/init.h>
  30#include <linux/sched.h>
  31#include <linux/spinlock.h>
  32#include <linux/errno.h>
  33#include <linux/interrupt.h>
  34#include <linux/irq.h>
  35#include <linux/io.h>
  36#include <linux/slab.h>
  37#include <linux/delay.h>
  38
  39#include <asm/system.h>
  40#include <mach/hardware.h>
  41#include <plat/dma.h>
  42
  43#include <plat/tc.h>
  44
  45#undef DEBUG
  46
  47#ifndef CONFIG_ARCH_OMAP1
  48enum { DMA_CH_ALLOC_DONE, DMA_CH_PARAMS_SET_DONE, DMA_CH_STARTED,
  49        DMA_CH_QUEUED, DMA_CH_NOTSTARTED, DMA_CH_PAUSED, DMA_CH_LINK_ENABLED
  50};
  51
  52enum { DMA_CHAIN_STARTED, DMA_CHAIN_NOTSTARTED };
  53#endif
  54
  55#define OMAP_DMA_ACTIVE                 0x01
  56#define OMAP2_DMA_CSR_CLEAR_MASK        0xffffffff
  57
  58#define OMAP_FUNC_MUX_ARM_BASE          (0xfffe1000 + 0xec)
  59
  60static struct omap_system_dma_plat_info *p;
  61static struct omap_dma_dev_attr *d;
  62
  63static int enable_1510_mode;
  64static u32 errata;
  65
  66static struct omap_dma_global_context_registers {
  67        u32 dma_irqenable_l0;
  68        u32 dma_ocp_sysconfig;
  69        u32 dma_gcr;
  70} omap_dma_global_context;
  71
  72struct dma_link_info {
  73        int *linked_dmach_q;
  74        int no_of_lchs_linked;
  75
  76        int q_count;
  77        int q_tail;
  78        int q_head;
  79
  80        int chain_state;
  81        int chain_mode;
  82
  83};
  84
  85static struct dma_link_info *dma_linked_lch;
  86
  87#ifndef CONFIG_ARCH_OMAP1
  88
  89/* Chain handling macros */
  90#define OMAP_DMA_CHAIN_QINIT(chain_id)                                  \
  91        do {                                                            \
  92                dma_linked_lch[chain_id].q_head =                       \
  93                dma_linked_lch[chain_id].q_tail =                       \
  94                dma_linked_lch[chain_id].q_count = 0;                   \
  95        } while (0)
  96#define OMAP_DMA_CHAIN_QFULL(chain_id)                                  \
  97                (dma_linked_lch[chain_id].no_of_lchs_linked ==          \
  98                dma_linked_lch[chain_id].q_count)
  99#define OMAP_DMA_CHAIN_QLAST(chain_id)                                  \
 100        do {                                                            \
 101                ((dma_linked_lch[chain_id].no_of_lchs_linked-1) ==      \
 102                dma_linked_lch[chain_id].q_count)                       \
 103        } while (0)
 104#define OMAP_DMA_CHAIN_QEMPTY(chain_id)                                 \
 105                (0 == dma_linked_lch[chain_id].q_count)
 106#define __OMAP_DMA_CHAIN_INCQ(end)                                      \
 107        ((end) = ((end)+1) % dma_linked_lch[chain_id].no_of_lchs_linked)
 108#define OMAP_DMA_CHAIN_INCQHEAD(chain_id)                               \
 109        do {                                                            \
 110                __OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_head); \
 111                dma_linked_lch[chain_id].q_count--;                     \
 112        } while (0)
 113
 114#define OMAP_DMA_CHAIN_INCQTAIL(chain_id)                               \
 115        do {                                                            \
 116                __OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_tail); \
 117                dma_linked_lch[chain_id].q_count++; \
 118        } while (0)
 119#endif
 120
 121static int dma_lch_count;
 122static int dma_chan_count;
 123static int omap_dma_reserve_channels;
 124
 125static spinlock_t dma_chan_lock;
 126static struct omap_dma_lch *dma_chan;
 127
 128static inline void disable_lnk(int lch);
 129static void omap_disable_channel_irq(int lch);
 130static inline void omap_enable_channel_irq(int lch);
 131
 132#define REVISIT_24XX()          printk(KERN_ERR "FIXME: no %s on 24xx\n", \
 133                                                __func__);
 134
 135#ifdef CONFIG_ARCH_OMAP15XX
 136/* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */
 137int omap_dma_in_1510_mode(void)
 138{
 139        return enable_1510_mode;
 140}
 141#else
 142#define omap_dma_in_1510_mode()         0
 143#endif
 144
 145#ifdef CONFIG_ARCH_OMAP1
 146static inline int get_gdma_dev(int req)
 147{
 148        u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
 149        int shift = ((req - 1) % 5) * 6;
 150
 151        return ((omap_readl(reg) >> shift) & 0x3f) + 1;
 152}
 153
 154static inline void set_gdma_dev(int req, int dev)
 155{
 156        u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
 157        int shift = ((req - 1) % 5) * 6;
 158        u32 l;
 159
 160        l = omap_readl(reg);
 161        l &= ~(0x3f << shift);
 162        l |= (dev - 1) << shift;
 163        omap_writel(l, reg);
 164}
 165#else
 166#define set_gdma_dev(req, dev)  do {} while (0)
 167#endif
 168
 169void omap_set_dma_priority(int lch, int dst_port, int priority)
 170{
 171        unsigned long reg;
 172        u32 l;
 173
 174        if (cpu_class_is_omap1()) {
 175                switch (dst_port) {
 176                case OMAP_DMA_PORT_OCP_T1:      /* FFFECC00 */
 177                        reg = OMAP_TC_OCPT1_PRIOR;
 178                        break;
 179                case OMAP_DMA_PORT_OCP_T2:      /* FFFECCD0 */
 180                        reg = OMAP_TC_OCPT2_PRIOR;
 181                        break;
 182                case OMAP_DMA_PORT_EMIFF:       /* FFFECC08 */
 183                        reg = OMAP_TC_EMIFF_PRIOR;
 184                        break;
 185                case OMAP_DMA_PORT_EMIFS:       /* FFFECC04 */
 186                        reg = OMAP_TC_EMIFS_PRIOR;
 187                        break;
 188                default:
 189                        BUG();
 190                        return;
 191                }
 192                l = omap_readl(reg);
 193                l &= ~(0xf << 8);
 194                l |= (priority & 0xf) << 8;
 195                omap_writel(l, reg);
 196        }
 197
 198        if (cpu_class_is_omap2()) {
 199                u32 ccr;
 200
 201                ccr = p->dma_read(CCR, lch);
 202                if (priority)
 203                        ccr |= (1 << 6);
 204                else
 205                        ccr &= ~(1 << 6);
 206                p->dma_write(ccr, CCR, lch);
 207        }
 208}
 209EXPORT_SYMBOL(omap_set_dma_priority);
 210
 211void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
 212                                  int frame_count, int sync_mode,
 213                                  int dma_trigger, int src_or_dst_synch)
 214{
 215        u32 l;
 216
 217        l = p->dma_read(CSDP, lch);
 218        l &= ~0x03;
 219        l |= data_type;
 220        p->dma_write(l, CSDP, lch);
 221
 222        if (cpu_class_is_omap1()) {
 223                u16 ccr;
 224
 225                ccr = p->dma_read(CCR, lch);
 226                ccr &= ~(1 << 5);
 227                if (sync_mode == OMAP_DMA_SYNC_FRAME)
 228                        ccr |= 1 << 5;
 229                p->dma_write(ccr, CCR, lch);
 230
 231                ccr = p->dma_read(CCR2, lch);
 232                ccr &= ~(1 << 2);
 233                if (sync_mode == OMAP_DMA_SYNC_BLOCK)
 234                        ccr |= 1 << 2;
 235                p->dma_write(ccr, CCR2, lch);
 236        }
 237
 238        if (cpu_class_is_omap2() && dma_trigger) {
 239                u32 val;
 240
 241                val = p->dma_read(CCR, lch);
 242
 243                /* DMA_SYNCHRO_CONTROL_UPPER depends on the channel number */
 244                val &= ~((1 << 23) | (3 << 19) | 0x1f);
 245                val |= (dma_trigger & ~0x1f) << 14;
 246                val |= dma_trigger & 0x1f;
 247
 248                if (sync_mode & OMAP_DMA_SYNC_FRAME)
 249                        val |= 1 << 5;
 250                else
 251                        val &= ~(1 << 5);
 252
 253                if (sync_mode & OMAP_DMA_SYNC_BLOCK)
 254                        val |= 1 << 18;
 255                else
 256                        val &= ~(1 << 18);
 257
 258                if (src_or_dst_synch == OMAP_DMA_DST_SYNC_PREFETCH) {
 259                        val &= ~(1 << 24);      /* dest synch */
 260                        val |= (1 << 23);       /* Prefetch */
 261                } else if (src_or_dst_synch) {
 262                        val |= 1 << 24;         /* source synch */
 263                } else {
 264                        val &= ~(1 << 24);      /* dest synch */
 265                }
 266                p->dma_write(val, CCR, lch);
 267        }
 268
 269        p->dma_write(elem_count, CEN, lch);
 270        p->dma_write(frame_count, CFN, lch);
 271}
 272EXPORT_SYMBOL(omap_set_dma_transfer_params);
 273
 274void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color)
 275{
 276        BUG_ON(omap_dma_in_1510_mode());
 277
 278        if (cpu_class_is_omap1()) {
 279                u16 w;
 280
 281                w = p->dma_read(CCR2, lch);
 282                w &= ~0x03;
 283
 284                switch (mode) {
 285                case OMAP_DMA_CONSTANT_FILL:
 286                        w |= 0x01;
 287                        break;
 288                case OMAP_DMA_TRANSPARENT_COPY:
 289                        w |= 0x02;
 290                        break;
 291                case OMAP_DMA_COLOR_DIS:
 292                        break;
 293                default:
 294                        BUG();
 295                }
 296                p->dma_write(w, CCR2, lch);
 297
 298                w = p->dma_read(LCH_CTRL, lch);
 299                w &= ~0x0f;
 300                /* Default is channel type 2D */
 301                if (mode) {
 302                        p->dma_write(color, COLOR, lch);
 303                        w |= 1;         /* Channel type G */
 304                }
 305                p->dma_write(w, LCH_CTRL, lch);
 306        }
 307
 308        if (cpu_class_is_omap2()) {
 309                u32 val;
 310
 311                val = p->dma_read(CCR, lch);
 312                val &= ~((1 << 17) | (1 << 16));
 313
 314                switch (mode) {
 315                case OMAP_DMA_CONSTANT_FILL:
 316                        val |= 1 << 16;
 317                        break;
 318                case OMAP_DMA_TRANSPARENT_COPY:
 319                        val |= 1 << 17;
 320                        break;
 321                case OMAP_DMA_COLOR_DIS:
 322                        break;
 323                default:
 324                        BUG();
 325                }
 326                p->dma_write(val, CCR, lch);
 327
 328                color &= 0xffffff;
 329                p->dma_write(color, COLOR, lch);
 330        }
 331}
 332EXPORT_SYMBOL(omap_set_dma_color_mode);
 333
 334void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode)
 335{
 336        if (cpu_class_is_omap2()) {
 337                u32 csdp;
 338
 339                csdp = p->dma_read(CSDP, lch);
 340                csdp &= ~(0x3 << 16);
 341                csdp |= (mode << 16);
 342                p->dma_write(csdp, CSDP, lch);
 343        }
 344}
 345EXPORT_SYMBOL(omap_set_dma_write_mode);
 346
 347void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode)
 348{
 349        if (cpu_class_is_omap1() && !cpu_is_omap15xx()) {
 350                u32 l;
 351
 352                l = p->dma_read(LCH_CTRL, lch);
 353                l &= ~0x7;
 354                l |= mode;
 355                p->dma_write(l, LCH_CTRL, lch);
 356        }
 357}
 358EXPORT_SYMBOL(omap_set_dma_channel_mode);
 359
 360/* Note that src_port is only for omap1 */
 361void omap_set_dma_src_params(int lch, int src_port, int src_amode,
 362                             unsigned long src_start,
 363                             int src_ei, int src_fi)
 364{
 365        u32 l;
 366
 367        if (cpu_class_is_omap1()) {
 368                u16 w;
 369
 370                w = p->dma_read(CSDP, lch);
 371                w &= ~(0x1f << 2);
 372                w |= src_port << 2;
 373                p->dma_write(w, CSDP, lch);
 374        }
 375
 376        l = p->dma_read(CCR, lch);
 377        l &= ~(0x03 << 12);
 378        l |= src_amode << 12;
 379        p->dma_write(l, CCR, lch);
 380
 381        p->dma_write(src_start, CSSA, lch);
 382
 383        p->dma_write(src_ei, CSEI, lch);
 384        p->dma_write(src_fi, CSFI, lch);
 385}
 386EXPORT_SYMBOL(omap_set_dma_src_params);
 387
 388void omap_set_dma_params(int lch, struct omap_dma_channel_params *params)
 389{
 390        omap_set_dma_transfer_params(lch, params->data_type,
 391                                     params->elem_count, params->frame_count,
 392                                     params->sync_mode, params->trigger,
 393                                     params->src_or_dst_synch);
 394        omap_set_dma_src_params(lch, params->src_port,
 395                                params->src_amode, params->src_start,
 396                                params->src_ei, params->src_fi);
 397
 398        omap_set_dma_dest_params(lch, params->dst_port,
 399                                 params->dst_amode, params->dst_start,
 400                                 params->dst_ei, params->dst_fi);
 401        if (params->read_prio || params->write_prio)
 402                omap_dma_set_prio_lch(lch, params->read_prio,
 403                                      params->write_prio);
 404}
 405EXPORT_SYMBOL(omap_set_dma_params);
 406
 407void omap_set_dma_src_index(int lch, int eidx, int fidx)
 408{
 409        if (cpu_class_is_omap2())
 410                return;
 411
 412        p->dma_write(eidx, CSEI, lch);
 413        p->dma_write(fidx, CSFI, lch);
 414}
 415EXPORT_SYMBOL(omap_set_dma_src_index);
 416
 417void omap_set_dma_src_data_pack(int lch, int enable)
 418{
 419        u32 l;
 420
 421        l = p->dma_read(CSDP, lch);
 422        l &= ~(1 << 6);
 423        if (enable)
 424                l |= (1 << 6);
 425        p->dma_write(l, CSDP, lch);
 426}
 427EXPORT_SYMBOL(omap_set_dma_src_data_pack);
 428
 429void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
 430{
 431        unsigned int burst = 0;
 432        u32 l;
 433
 434        l = p->dma_read(CSDP, lch);
 435        l &= ~(0x03 << 7);
 436
 437        switch (burst_mode) {
 438        case OMAP_DMA_DATA_BURST_DIS:
 439                break;
 440        case OMAP_DMA_DATA_BURST_4:
 441                if (cpu_class_is_omap2())
 442                        burst = 0x1;
 443                else
 444                        burst = 0x2;
 445                break;
 446        case OMAP_DMA_DATA_BURST_8:
 447                if (cpu_class_is_omap2()) {
 448                        burst = 0x2;
 449                        break;
 450                }
 451                /*
 452                 * not supported by current hardware on OMAP1
 453                 * w |= (0x03 << 7);
 454                 * fall through
 455                 */
 456        case OMAP_DMA_DATA_BURST_16:
 457                if (cpu_class_is_omap2()) {
 458                        burst = 0x3;
 459                        break;
 460                }
 461                /*
 462                 * OMAP1 don't support burst 16
 463                 * fall through
 464                 */
 465        default:
 466                BUG();
 467        }
 468
 469        l |= (burst << 7);
 470        p->dma_write(l, CSDP, lch);
 471}
 472EXPORT_SYMBOL(omap_set_dma_src_burst_mode);
 473
 474/* Note that dest_port is only for OMAP1 */
 475void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
 476                              unsigned long dest_start,
 477                              int dst_ei, int dst_fi)
 478{
 479        u32 l;
 480
 481        if (cpu_class_is_omap1()) {
 482                l = p->dma_read(CSDP, lch);
 483                l &= ~(0x1f << 9);
 484                l |= dest_port << 9;
 485                p->dma_write(l, CSDP, lch);
 486        }
 487
 488        l = p->dma_read(CCR, lch);
 489        l &= ~(0x03 << 14);
 490        l |= dest_amode << 14;
 491        p->dma_write(l, CCR, lch);
 492
 493        p->dma_write(dest_start, CDSA, lch);
 494
 495        p->dma_write(dst_ei, CDEI, lch);
 496        p->dma_write(dst_fi, CDFI, lch);
 497}
 498EXPORT_SYMBOL(omap_set_dma_dest_params);
 499
 500void omap_set_dma_dest_index(int lch, int eidx, int fidx)
 501{
 502        if (cpu_class_is_omap2())
 503                return;
 504
 505        p->dma_write(eidx, CDEI, lch);
 506        p->dma_write(fidx, CDFI, lch);
 507}
 508EXPORT_SYMBOL(omap_set_dma_dest_index);
 509
 510void omap_set_dma_dest_data_pack(int lch, int enable)
 511{
 512        u32 l;
 513
 514        l = p->dma_read(CSDP, lch);
 515        l &= ~(1 << 13);
 516        if (enable)
 517                l |= 1 << 13;
 518        p->dma_write(l, CSDP, lch);
 519}
 520EXPORT_SYMBOL(omap_set_dma_dest_data_pack);
 521
 522void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
 523{
 524        unsigned int burst = 0;
 525        u32 l;
 526
 527        l = p->dma_read(CSDP, lch);
 528        l &= ~(0x03 << 14);
 529
 530        switch (burst_mode) {
 531        case OMAP_DMA_DATA_BURST_DIS:
 532                break;
 533        case OMAP_DMA_DATA_BURST_4:
 534                if (cpu_class_is_omap2())
 535                        burst = 0x1;
 536                else
 537                        burst = 0x2;
 538                break;
 539        case OMAP_DMA_DATA_BURST_8:
 540                if (cpu_class_is_omap2())
 541                        burst = 0x2;
 542                else
 543                        burst = 0x3;
 544                break;
 545        case OMAP_DMA_DATA_BURST_16:
 546                if (cpu_class_is_omap2()) {
 547                        burst = 0x3;
 548                        break;
 549                }
 550                /*
 551                 * OMAP1 don't support burst 16
 552                 * fall through
 553                 */
 554        default:
 555                printk(KERN_ERR "Invalid DMA burst mode\n");
 556                BUG();
 557                return;
 558        }
 559        l |= (burst << 14);
 560        p->dma_write(l, CSDP, lch);
 561}
 562EXPORT_SYMBOL(omap_set_dma_dest_burst_mode);
 563
 564static inline void omap_enable_channel_irq(int lch)
 565{
 566        u32 status;
 567
 568        /* Clear CSR */
 569        if (cpu_class_is_omap1())
 570                status = p->dma_read(CSR, lch);
 571        else if (cpu_class_is_omap2())
 572                p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
 573
 574        /* Enable some nice interrupts. */
 575        p->dma_write(dma_chan[lch].enabled_irqs, CICR, lch);
 576}
 577
 578static void omap_disable_channel_irq(int lch)
 579{
 580        if (cpu_class_is_omap2())
 581                p->dma_write(0, CICR, lch);
 582}
 583
 584void omap_enable_dma_irq(int lch, u16 bits)
 585{
 586        dma_chan[lch].enabled_irqs |= bits;
 587}
 588EXPORT_SYMBOL(omap_enable_dma_irq);
 589
 590void omap_disable_dma_irq(int lch, u16 bits)
 591{
 592        dma_chan[lch].enabled_irqs &= ~bits;
 593}
 594EXPORT_SYMBOL(omap_disable_dma_irq);
 595
 596static inline void enable_lnk(int lch)
 597{
 598        u32 l;
 599
 600        l = p->dma_read(CLNK_CTRL, lch);
 601
 602        if (cpu_class_is_omap1())
 603                l &= ~(1 << 14);
 604
 605        /* Set the ENABLE_LNK bits */
 606        if (dma_chan[lch].next_lch != -1)
 607                l = dma_chan[lch].next_lch | (1 << 15);
 608
 609#ifndef CONFIG_ARCH_OMAP1
 610        if (cpu_class_is_omap2())
 611                if (dma_chan[lch].next_linked_ch != -1)
 612                        l = dma_chan[lch].next_linked_ch | (1 << 15);
 613#endif
 614
 615        p->dma_write(l, CLNK_CTRL, lch);
 616}
 617
 618static inline void disable_lnk(int lch)
 619{
 620        u32 l;
 621
 622        l = p->dma_read(CLNK_CTRL, lch);
 623
 624        /* Disable interrupts */
 625        if (cpu_class_is_omap1()) {
 626                p->dma_write(0, CICR, lch);
 627                /* Set the STOP_LNK bit */
 628                l |= 1 << 14;
 629        }
 630
 631        if (cpu_class_is_omap2()) {
 632                omap_disable_channel_irq(lch);
 633                /* Clear the ENABLE_LNK bit */
 634                l &= ~(1 << 15);
 635        }
 636
 637        p->dma_write(l, CLNK_CTRL, lch);
 638        dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
 639}
 640
 641static inline void omap2_enable_irq_lch(int lch)
 642{
 643        u32 val;
 644        unsigned long flags;
 645
 646        if (!cpu_class_is_omap2())
 647                return;
 648
 649        spin_lock_irqsave(&dma_chan_lock, flags);
 650        val = p->dma_read(IRQENABLE_L0, lch);
 651        val |= 1 << lch;
 652        p->dma_write(val, IRQENABLE_L0, lch);
 653        spin_unlock_irqrestore(&dma_chan_lock, flags);
 654}
 655
 656static inline void omap2_disable_irq_lch(int lch)
 657{
 658        u32 val;
 659        unsigned long flags;
 660
 661        if (!cpu_class_is_omap2())
 662                return;
 663
 664        spin_lock_irqsave(&dma_chan_lock, flags);
 665        val = p->dma_read(IRQENABLE_L0, lch);
 666        val &= ~(1 << lch);
 667        p->dma_write(val, IRQENABLE_L0, lch);
 668        spin_unlock_irqrestore(&dma_chan_lock, flags);
 669}
 670
 671int omap_request_dma(int dev_id, const char *dev_name,
 672                     void (*callback)(int lch, u16 ch_status, void *data),
 673                     void *data, int *dma_ch_out)
 674{
 675        int ch, free_ch = -1;
 676        unsigned long flags;
 677        struct omap_dma_lch *chan;
 678
 679        spin_lock_irqsave(&dma_chan_lock, flags);
 680        for (ch = 0; ch < dma_chan_count; ch++) {
 681                if (free_ch == -1 && dma_chan[ch].dev_id == -1) {
 682                        free_ch = ch;
 683                        if (dev_id == 0)
 684                                break;
 685                }
 686        }
 687        if (free_ch == -1) {
 688                spin_unlock_irqrestore(&dma_chan_lock, flags);
 689                return -EBUSY;
 690        }
 691        chan = dma_chan + free_ch;
 692        chan->dev_id = dev_id;
 693
 694        if (p->clear_lch_regs)
 695                p->clear_lch_regs(free_ch);
 696
 697        if (cpu_class_is_omap2())
 698                omap_clear_dma(free_ch);
 699
 700        spin_unlock_irqrestore(&dma_chan_lock, flags);
 701
 702        chan->dev_name = dev_name;
 703        chan->callback = callback;
 704        chan->data = data;
 705        chan->flags = 0;
 706
 707#ifndef CONFIG_ARCH_OMAP1
 708        if (cpu_class_is_omap2()) {
 709                chan->chain_id = -1;
 710                chan->next_linked_ch = -1;
 711        }
 712#endif
 713
 714        chan->enabled_irqs = OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
 715
 716        if (cpu_class_is_omap1())
 717                chan->enabled_irqs |= OMAP1_DMA_TOUT_IRQ;
 718        else if (cpu_class_is_omap2())
 719                chan->enabled_irqs |= OMAP2_DMA_MISALIGNED_ERR_IRQ |
 720                        OMAP2_DMA_TRANS_ERR_IRQ;
 721
 722        if (cpu_is_omap16xx()) {
 723                /* If the sync device is set, configure it dynamically. */
 724                if (dev_id != 0) {
 725                        set_gdma_dev(free_ch + 1, dev_id);
 726                        dev_id = free_ch + 1;
 727                }
 728                /*
 729                 * Disable the 1510 compatibility mode and set the sync device
 730                 * id.
 731                 */
 732                p->dma_write(dev_id | (1 << 10), CCR, free_ch);
 733        } else if (cpu_is_omap7xx() || cpu_is_omap15xx()) {
 734                p->dma_write(dev_id, CCR, free_ch);
 735        }
 736
 737        if (cpu_class_is_omap2()) {
 738                omap2_enable_irq_lch(free_ch);
 739                omap_enable_channel_irq(free_ch);
 740                /* Clear the CSR register and IRQ status register */
 741                p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, free_ch);
 742                p->dma_write(1 << free_ch, IRQSTATUS_L0, 0);
 743        }
 744
 745        *dma_ch_out = free_ch;
 746
 747        return 0;
 748}
 749EXPORT_SYMBOL(omap_request_dma);
 750
 751void omap_free_dma(int lch)
 752{
 753        unsigned long flags;
 754
 755        if (dma_chan[lch].dev_id == -1) {
 756                pr_err("omap_dma: trying to free unallocated DMA channel %d\n",
 757                       lch);
 758                return;
 759        }
 760
 761        if (cpu_class_is_omap1()) {
 762                /* Disable all DMA interrupts for the channel. */
 763                p->dma_write(0, CICR, lch);
 764                /* Make sure the DMA transfer is stopped. */
 765                p->dma_write(0, CCR, lch);
 766        }
 767
 768        if (cpu_class_is_omap2()) {
 769                omap2_disable_irq_lch(lch);
 770
 771                /* Clear the CSR register and IRQ status register */
 772                p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
 773                p->dma_write(1 << lch, IRQSTATUS_L0, lch);
 774
 775                /* Disable all DMA interrupts for the channel. */
 776                p->dma_write(0, CICR, lch);
 777
 778                /* Make sure the DMA transfer is stopped. */
 779                p->dma_write(0, CCR, lch);
 780                omap_clear_dma(lch);
 781        }
 782
 783        spin_lock_irqsave(&dma_chan_lock, flags);
 784        dma_chan[lch].dev_id = -1;
 785        dma_chan[lch].next_lch = -1;
 786        dma_chan[lch].callback = NULL;
 787        spin_unlock_irqrestore(&dma_chan_lock, flags);
 788}
 789EXPORT_SYMBOL(omap_free_dma);
 790
 791/**
 792 * @brief omap_dma_set_global_params : Set global priority settings for dma
 793 *
 794 * @param arb_rate
 795 * @param max_fifo_depth
 796 * @param tparams - Number of threads to reserve : DMA_THREAD_RESERVE_NORM
 797 *                                                 DMA_THREAD_RESERVE_ONET
 798 *                                                 DMA_THREAD_RESERVE_TWOT
 799 *                                                 DMA_THREAD_RESERVE_THREET
 800 */
 801void
 802omap_dma_set_global_params(int arb_rate, int max_fifo_depth, int tparams)
 803{
 804        u32 reg;
 805
 806        if (!cpu_class_is_omap2()) {
 807                printk(KERN_ERR "FIXME: no %s on 15xx/16xx\n", __func__);
 808                return;
 809        }
 810
 811        if (max_fifo_depth == 0)
 812                max_fifo_depth = 1;
 813        if (arb_rate == 0)
 814                arb_rate = 1;
 815
 816        reg = 0xff & max_fifo_depth;
 817        reg |= (0x3 & tparams) << 12;
 818        reg |= (arb_rate & 0xff) << 16;
 819
 820        p->dma_write(reg, GCR, 0);
 821}
 822EXPORT_SYMBOL(omap_dma_set_global_params);
 823
 824/**
 825 * @brief omap_dma_set_prio_lch : Set channel wise priority settings
 826 *
 827 * @param lch
 828 * @param read_prio - Read priority
 829 * @param write_prio - Write priority
 830 * Both of the above can be set with one of the following values :
 831 *      DMA_CH_PRIO_HIGH/DMA_CH_PRIO_LOW
 832 */
 833int
 834omap_dma_set_prio_lch(int lch, unsigned char read_prio,
 835                      unsigned char write_prio)
 836{
 837        u32 l;
 838
 839        if (unlikely((lch < 0 || lch >= dma_lch_count))) {
 840                printk(KERN_ERR "Invalid channel id\n");
 841                return -EINVAL;
 842        }
 843        l = p->dma_read(CCR, lch);
 844        l &= ~((1 << 6) | (1 << 26));
 845        if (cpu_is_omap2430() || cpu_is_omap34xx() ||  cpu_is_omap44xx())
 846                l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26);
 847        else
 848                l |= ((read_prio & 0x1) << 6);
 849
 850        p->dma_write(l, CCR, lch);
 851
 852        return 0;
 853}
 854EXPORT_SYMBOL(omap_dma_set_prio_lch);
 855
 856/*
 857 * Clears any DMA state so the DMA engine is ready to restart with new buffers
 858 * through omap_start_dma(). Any buffers in flight are discarded.
 859 */
 860void omap_clear_dma(int lch)
 861{
 862        unsigned long flags;
 863
 864        local_irq_save(flags);
 865        p->clear_dma(lch);
 866        local_irq_restore(flags);
 867}
 868EXPORT_SYMBOL(omap_clear_dma);
 869
 870void omap_start_dma(int lch)
 871{
 872        u32 l;
 873
 874        /*
 875         * The CPC/CDAC register needs to be initialized to zero
 876         * before starting dma transfer.
 877         */
 878        if (cpu_is_omap15xx())
 879                p->dma_write(0, CPC, lch);
 880        else
 881                p->dma_write(0, CDAC, lch);
 882
 883        if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
 884                int next_lch, cur_lch;
 885                char dma_chan_link_map[dma_lch_count];
 886
 887                dma_chan_link_map[lch] = 1;
 888                /* Set the link register of the first channel */
 889                enable_lnk(lch);
 890
 891                memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
 892                cur_lch = dma_chan[lch].next_lch;
 893                do {
 894                        next_lch = dma_chan[cur_lch].next_lch;
 895
 896                        /* The loop case: we've been here already */
 897                        if (dma_chan_link_map[cur_lch])
 898                                break;
 899                        /* Mark the current channel */
 900                        dma_chan_link_map[cur_lch] = 1;
 901
 902                        enable_lnk(cur_lch);
 903                        omap_enable_channel_irq(cur_lch);
 904
 905                        cur_lch = next_lch;
 906                } while (next_lch != -1);
 907        } else if (IS_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS))
 908                p->dma_write(lch, CLNK_CTRL, lch);
 909
 910        omap_enable_channel_irq(lch);
 911
 912        l = p->dma_read(CCR, lch);
 913
 914        if (IS_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING))
 915                        l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
 916        l |= OMAP_DMA_CCR_EN;
 917
 918        p->dma_write(l, CCR, lch);
 919
 920        dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
 921}
 922EXPORT_SYMBOL(omap_start_dma);
 923
 924void omap_stop_dma(int lch)
 925{
 926        u32 l;
 927
 928        /* Disable all interrupts on the channel */
 929        if (cpu_class_is_omap1())
 930                p->dma_write(0, CICR, lch);
 931
 932        l = p->dma_read(CCR, lch);
 933        if (IS_DMA_ERRATA(DMA_ERRATA_i541) &&
 934                        (l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) {
 935                int i = 0;
 936                u32 sys_cf;
 937
 938                /* Configure No-Standby */
 939                l = p->dma_read(OCP_SYSCONFIG, lch);
 940                sys_cf = l;
 941                l &= ~DMA_SYSCONFIG_MIDLEMODE_MASK;
 942                l |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
 943                p->dma_write(l , OCP_SYSCONFIG, 0);
 944
 945                l = p->dma_read(CCR, lch);
 946                l &= ~OMAP_DMA_CCR_EN;
 947                p->dma_write(l, CCR, lch);
 948
 949                /* Wait for sDMA FIFO drain */
 950                l = p->dma_read(CCR, lch);
 951                while (i < 100 && (l & (OMAP_DMA_CCR_RD_ACTIVE |
 952                                        OMAP_DMA_CCR_WR_ACTIVE))) {
 953                        udelay(5);
 954                        i++;
 955                        l = p->dma_read(CCR, lch);
 956                }
 957                if (i >= 100)
 958                        printk(KERN_ERR "DMA drain did not complete on "
 959                                        "lch %d\n", lch);
 960                /* Restore OCP_SYSCONFIG */
 961                p->dma_write(sys_cf, OCP_SYSCONFIG, lch);
 962        } else {
 963                l &= ~OMAP_DMA_CCR_EN;
 964                p->dma_write(l, CCR, lch);
 965        }
 966
 967        if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
 968                int next_lch, cur_lch = lch;
 969                char dma_chan_link_map[dma_lch_count];
 970
 971                memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
 972                do {
 973                        /* The loop case: we've been here already */
 974                        if (dma_chan_link_map[cur_lch])
 975                                break;
 976                        /* Mark the current channel */
 977                        dma_chan_link_map[cur_lch] = 1;
 978
 979                        disable_lnk(cur_lch);
 980
 981                        next_lch = dma_chan[cur_lch].next_lch;
 982                        cur_lch = next_lch;
 983                } while (next_lch != -1);
 984        }
 985
 986        dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
 987}
 988EXPORT_SYMBOL(omap_stop_dma);
 989
 990/*
 991 * Allows changing the DMA callback function or data. This may be needed if
 992 * the driver shares a single DMA channel for multiple dma triggers.
 993 */
 994int omap_set_dma_callback(int lch,
 995                          void (*callback)(int lch, u16 ch_status, void *data),
 996                          void *data)
 997{
 998        unsigned long flags;
 999
1000        if (lch < 0)
1001                return -ENODEV;
1002
1003        spin_lock_irqsave(&dma_chan_lock, flags);
1004        if (dma_chan[lch].dev_id == -1) {
1005                printk(KERN_ERR "DMA callback for not set for free channel\n");
1006                spin_unlock_irqrestore(&dma_chan_lock, flags);
1007                return -EINVAL;
1008        }
1009        dma_chan[lch].callback = callback;
1010        dma_chan[lch].data = data;
1011        spin_unlock_irqrestore(&dma_chan_lock, flags);
1012
1013        return 0;
1014}
1015EXPORT_SYMBOL(omap_set_dma_callback);
1016
1017/*
1018 * Returns current physical source address for the given DMA channel.
1019 * If the channel is running the caller must disable interrupts prior calling
1020 * this function and process the returned value before re-enabling interrupt to
1021 * prevent races with the interrupt handler. Note that in continuous mode there
1022 * is a chance for CSSA_L register overflow inbetween the two reads resulting
1023 * in incorrect return value.
1024 */
1025dma_addr_t omap_get_dma_src_pos(int lch)
1026{
1027        dma_addr_t offset = 0;
1028
1029        if (cpu_is_omap15xx())
1030                offset = p->dma_read(CPC, lch);
1031        else
1032                offset = p->dma_read(CSAC, lch);
1033
1034        if (IS_DMA_ERRATA(DMA_ERRATA_3_3) && offset == 0)
1035                offset = p->dma_read(CSAC, lch);
1036
1037        if (cpu_class_is_omap1())
1038                offset |= (p->dma_read(CSSA, lch) & 0xFFFF0000);
1039
1040        return offset;
1041}
1042EXPORT_SYMBOL(omap_get_dma_src_pos);
1043
1044/*
1045 * Returns current physical destination address for the given DMA channel.
1046 * If the channel is running the caller must disable interrupts prior calling
1047 * this function and process the returned value before re-enabling interrupt to
1048 * prevent races with the interrupt handler. Note that in continuous mode there
1049 * is a chance for CDSA_L register overflow inbetween the two reads resulting
1050 * in incorrect return value.
1051 */
1052dma_addr_t omap_get_dma_dst_pos(int lch)
1053{
1054        dma_addr_t offset = 0;
1055
1056        if (cpu_is_omap15xx())
1057                offset = p->dma_read(CPC, lch);
1058        else
1059                offset = p->dma_read(CDAC, lch);
1060
1061        /*
1062         * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
1063         * read before the DMA controller finished disabling the channel.
1064         */
1065        if (!cpu_is_omap15xx() && offset == 0)
1066                offset = p->dma_read(CDAC, lch);
1067
1068        if (cpu_class_is_omap1())
1069                offset |= (p->dma_read(CDSA, lch) & 0xFFFF0000);
1070
1071        return offset;
1072}
1073EXPORT_SYMBOL(omap_get_dma_dst_pos);
1074
1075int omap_get_dma_active_status(int lch)
1076{
1077        return (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN) != 0;
1078}
1079EXPORT_SYMBOL(omap_get_dma_active_status);
1080
1081int omap_dma_running(void)
1082{
1083        int lch;
1084
1085        if (cpu_class_is_omap1())
1086                if (omap_lcd_dma_running())
1087                        return 1;
1088
1089        for (lch = 0; lch < dma_chan_count; lch++)
1090                if (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN)
1091                        return 1;
1092
1093        return 0;
1094}
1095
1096/*
1097 * lch_queue DMA will start right after lch_head one is finished.
1098 * For this DMA link to start, you still need to start (see omap_start_dma)
1099 * the first one. That will fire up the entire queue.
1100 */
1101void omap_dma_link_lch(int lch_head, int lch_queue)
1102{
1103        if (omap_dma_in_1510_mode()) {
1104                if (lch_head == lch_queue) {
1105                        p->dma_write(p->dma_read(CCR, lch_head) | (3 << 8),
1106                                                                CCR, lch_head);
1107                        return;
1108                }
1109                printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
1110                BUG();
1111                return;
1112        }
1113
1114        if ((dma_chan[lch_head].dev_id == -1) ||
1115            (dma_chan[lch_queue].dev_id == -1)) {
1116                printk(KERN_ERR "omap_dma: trying to link "
1117                       "non requested channels\n");
1118                dump_stack();
1119        }
1120
1121        dma_chan[lch_head].next_lch = lch_queue;
1122}
1123EXPORT_SYMBOL(omap_dma_link_lch);
1124
1125/*
1126 * Once the DMA queue is stopped, we can destroy it.
1127 */
1128void omap_dma_unlink_lch(int lch_head, int lch_queue)
1129{
1130        if (omap_dma_in_1510_mode()) {
1131                if (lch_head == lch_queue) {
1132                        p->dma_write(p->dma_read(CCR, lch_head) & ~(3 << 8),
1133                                                                CCR, lch_head);
1134                        return;
1135                }
1136                printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
1137                BUG();
1138                return;
1139        }
1140
1141        if (dma_chan[lch_head].next_lch != lch_queue ||
1142            dma_chan[lch_head].next_lch == -1) {
1143                printk(KERN_ERR "omap_dma: trying to unlink "
1144                       "non linked channels\n");
1145                dump_stack();
1146        }
1147
1148        if ((dma_chan[lch_head].flags & OMAP_DMA_ACTIVE) ||
1149            (dma_chan[lch_queue].flags & OMAP_DMA_ACTIVE)) {
1150                printk(KERN_ERR "omap_dma: You need to stop the DMA channels "
1151                       "before unlinking\n");
1152                dump_stack();
1153        }
1154
1155        dma_chan[lch_head].next_lch = -1;
1156}
1157EXPORT_SYMBOL(omap_dma_unlink_lch);
1158
1159#ifndef CONFIG_ARCH_OMAP1
1160/* Create chain of DMA channesls */
1161static void create_dma_lch_chain(int lch_head, int lch_queue)
1162{
1163        u32 l;
1164
1165        /* Check if this is the first link in chain */
1166        if (dma_chan[lch_head].next_linked_ch == -1) {
1167                dma_chan[lch_head].next_linked_ch = lch_queue;
1168                dma_chan[lch_head].prev_linked_ch = lch_queue;
1169                dma_chan[lch_queue].next_linked_ch = lch_head;
1170                dma_chan[lch_queue].prev_linked_ch = lch_head;
1171        }
1172
1173        /* a link exists, link the new channel in circular chain */
1174        else {
1175                dma_chan[lch_queue].next_linked_ch =
1176                                        dma_chan[lch_head].next_linked_ch;
1177                dma_chan[lch_queue].prev_linked_ch = lch_head;
1178                dma_chan[lch_head].next_linked_ch = lch_queue;
1179                dma_chan[dma_chan[lch_queue].next_linked_ch].prev_linked_ch =
1180                                        lch_queue;
1181        }
1182
1183        l = p->dma_read(CLNK_CTRL, lch_head);
1184        l &= ~(0x1f);
1185        l |= lch_queue;
1186        p->dma_write(l, CLNK_CTRL, lch_head);
1187
1188        l = p->dma_read(CLNK_CTRL, lch_queue);
1189        l &= ~(0x1f);
1190        l |= (dma_chan[lch_queue].next_linked_ch);
1191        p->dma_write(l, CLNK_CTRL, lch_queue);
1192}
1193
1194/**
1195 * @brief omap_request_dma_chain : Request a chain of DMA channels
1196 *
1197 * @param dev_id - Device id using the dma channel
1198 * @param dev_name - Device name
1199 * @param callback - Call back function
1200 * @chain_id -
1201 * @no_of_chans - Number of channels requested
1202 * @chain_mode - Dynamic or static chaining : OMAP_DMA_STATIC_CHAIN
1203 *                                            OMAP_DMA_DYNAMIC_CHAIN
1204 * @params - Channel parameters
1205 *
1206 * @return - Success : 0
1207 *           Failure: -EINVAL/-ENOMEM
1208 */
1209int omap_request_dma_chain(int dev_id, const char *dev_name,
1210                           void (*callback) (int lch, u16 ch_status,
1211                                             void *data),
1212                           int *chain_id, int no_of_chans, int chain_mode,
1213                           struct omap_dma_channel_params params)
1214{
1215        int *channels;
1216        int i, err;
1217
1218        /* Is the chain mode valid ? */
1219        if (chain_mode != OMAP_DMA_STATIC_CHAIN
1220                        && chain_mode != OMAP_DMA_DYNAMIC_CHAIN) {
1221                printk(KERN_ERR "Invalid chain mode requested\n");
1222                return -EINVAL;
1223        }
1224
1225        if (unlikely((no_of_chans < 1
1226                        || no_of_chans > dma_lch_count))) {
1227                printk(KERN_ERR "Invalid Number of channels requested\n");
1228                return -EINVAL;
1229        }
1230
1231        /*
1232         * Allocate a queue to maintain the status of the channels
1233         * in the chain
1234         */
1235        channels = kmalloc(sizeof(*channels) * no_of_chans, GFP_KERNEL);
1236        if (channels == NULL) {
1237                printk(KERN_ERR "omap_dma: No memory for channel queue\n");
1238                return -ENOMEM;
1239        }
1240
1241        /* request and reserve DMA channels for the chain */
1242        for (i = 0; i < no_of_chans; i++) {
1243                err = omap_request_dma(dev_id, dev_name,
1244                                        callback, NULL, &channels[i]);
1245                if (err < 0) {
1246                        int j;
1247                        for (j = 0; j < i; j++)
1248                                omap_free_dma(channels[j]);
1249                        kfree(channels);
1250                        printk(KERN_ERR "omap_dma: Request failed %d\n", err);
1251                        return err;
1252                }
1253                dma_chan[channels[i]].prev_linked_ch = -1;
1254                dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1255
1256                /*
1257                 * Allowing client drivers to set common parameters now,
1258                 * so that later only relevant (src_start, dest_start
1259                 * and element count) can be set
1260                 */
1261                omap_set_dma_params(channels[i], &params);
1262        }
1263
1264        *chain_id = channels[0];
1265        dma_linked_lch[*chain_id].linked_dmach_q = channels;
1266        dma_linked_lch[*chain_id].chain_mode = chain_mode;
1267        dma_linked_lch[*chain_id].chain_state = DMA_CHAIN_NOTSTARTED;
1268        dma_linked_lch[*chain_id].no_of_lchs_linked = no_of_chans;
1269
1270        for (i = 0; i < no_of_chans; i++)
1271                dma_chan[channels[i]].chain_id = *chain_id;
1272
1273        /* Reset the Queue pointers */
1274        OMAP_DMA_CHAIN_QINIT(*chain_id);
1275
1276        /* Set up the chain */
1277        if (no_of_chans == 1)
1278                create_dma_lch_chain(channels[0], channels[0]);
1279        else {
1280                for (i = 0; i < (no_of_chans - 1); i++)
1281                        create_dma_lch_chain(channels[i], channels[i + 1]);
1282        }
1283
1284        return 0;
1285}
1286EXPORT_SYMBOL(omap_request_dma_chain);
1287
1288/**
1289 * @brief omap_modify_dma_chain_param : Modify the chain's params - Modify the
1290 * params after setting it. Dont do this while dma is running!!
1291 *
1292 * @param chain_id - Chained logical channel id.
1293 * @param params
1294 *
1295 * @return - Success : 0
1296 *           Failure : -EINVAL
1297 */
1298int omap_modify_dma_chain_params(int chain_id,
1299                                struct omap_dma_channel_params params)
1300{
1301        int *channels;
1302        u32 i;
1303
1304        /* Check for input params */
1305        if (unlikely((chain_id < 0
1306                        || chain_id >= dma_lch_count))) {
1307                printk(KERN_ERR "Invalid chain id\n");
1308                return -EINVAL;
1309        }
1310
1311        /* Check if the chain exists */
1312        if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1313                printk(KERN_ERR "Chain doesn't exists\n");
1314                return -EINVAL;
1315        }
1316        channels = dma_linked_lch[chain_id].linked_dmach_q;
1317
1318        for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1319                /*
1320                 * Allowing client drivers to set common parameters now,
1321                 * so that later only relevant (src_start, dest_start
1322                 * and element count) can be set
1323                 */
1324                omap_set_dma_params(channels[i], &params);
1325        }
1326
1327        return 0;
1328}
1329EXPORT_SYMBOL(omap_modify_dma_chain_params);
1330
1331/**
1332 * @brief omap_free_dma_chain - Free all the logical channels in a chain.
1333 *
1334 * @param chain_id
1335 *
1336 * @return - Success : 0
1337 *           Failure : -EINVAL
1338 */
1339int omap_free_dma_chain(int chain_id)
1340{
1341        int *channels;
1342        u32 i;
1343
1344        /* Check for input params */
1345        if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1346                printk(KERN_ERR "Invalid chain id\n");
1347                return -EINVAL;
1348        }
1349
1350        /* Check if the chain exists */
1351        if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1352                printk(KERN_ERR "Chain doesn't exists\n");
1353                return -EINVAL;
1354        }
1355
1356        channels = dma_linked_lch[chain_id].linked_dmach_q;
1357        for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1358                dma_chan[channels[i]].next_linked_ch = -1;
1359                dma_chan[channels[i]].prev_linked_ch = -1;
1360                dma_chan[channels[i]].chain_id = -1;
1361                dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1362                omap_free_dma(channels[i]);
1363        }
1364
1365        kfree(channels);
1366
1367        dma_linked_lch[chain_id].linked_dmach_q = NULL;
1368        dma_linked_lch[chain_id].chain_mode = -1;
1369        dma_linked_lch[chain_id].chain_state = -1;
1370
1371        return (0);
1372}
1373EXPORT_SYMBOL(omap_free_dma_chain);
1374
1375/**
1376 * @brief omap_dma_chain_status - Check if the chain is in
1377 * active / inactive state.
1378 * @param chain_id
1379 *
1380 * @return - Success : OMAP_DMA_CHAIN_ACTIVE/OMAP_DMA_CHAIN_INACTIVE
1381 *           Failure : -EINVAL
1382 */
1383int omap_dma_chain_status(int chain_id)
1384{
1385        /* Check for input params */
1386        if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1387                printk(KERN_ERR "Invalid chain id\n");
1388                return -EINVAL;
1389        }
1390
1391        /* Check if the chain exists */
1392        if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1393                printk(KERN_ERR "Chain doesn't exists\n");
1394                return -EINVAL;
1395        }
1396        pr_debug("CHAINID=%d, qcnt=%d\n", chain_id,
1397                        dma_linked_lch[chain_id].q_count);
1398
1399        if (OMAP_DMA_CHAIN_QEMPTY(chain_id))
1400                return OMAP_DMA_CHAIN_INACTIVE;
1401
1402        return OMAP_DMA_CHAIN_ACTIVE;
1403}
1404EXPORT_SYMBOL(omap_dma_chain_status);
1405
1406/**
1407 * @brief omap_dma_chain_a_transfer - Get a free channel from a chain,
1408 * set the params and start the transfer.
1409 *
1410 * @param chain_id
1411 * @param src_start - buffer start address
1412 * @param dest_start - Dest address
1413 * @param elem_count
1414 * @param frame_count
1415 * @param callbk_data - channel callback parameter data.
1416 *
1417 * @return  - Success : 0
1418 *            Failure: -EINVAL/-EBUSY
1419 */
1420int omap_dma_chain_a_transfer(int chain_id, int src_start, int dest_start,
1421                        int elem_count, int frame_count, void *callbk_data)
1422{
1423        int *channels;
1424        u32 l, lch;
1425        int start_dma = 0;
1426
1427        /*
1428         * if buffer size is less than 1 then there is
1429         * no use of starting the chain
1430         */
1431        if (elem_count < 1) {
1432                printk(KERN_ERR "Invalid buffer size\n");
1433                return -EINVAL;
1434        }
1435
1436        /* Check for input params */
1437        if (unlikely((chain_id < 0
1438                        || chain_id >= dma_lch_count))) {
1439                printk(KERN_ERR "Invalid chain id\n");
1440                return -EINVAL;
1441        }
1442
1443        /* Check if the chain exists */
1444        if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1445                printk(KERN_ERR "Chain doesn't exist\n");
1446                return -EINVAL;
1447        }
1448
1449        /* Check if all the channels in chain are in use */
1450        if (OMAP_DMA_CHAIN_QFULL(chain_id))
1451                return -EBUSY;
1452
1453        /* Frame count may be negative in case of indexed transfers */
1454        channels = dma_linked_lch[chain_id].linked_dmach_q;
1455
1456        /* Get a free channel */
1457        lch = channels[dma_linked_lch[chain_id].q_tail];
1458
1459        /* Store the callback data */
1460        dma_chan[lch].data = callbk_data;
1461
1462        /* Increment the q_tail */
1463        OMAP_DMA_CHAIN_INCQTAIL(chain_id);
1464
1465        /* Set the params to the free channel */
1466        if (src_start != 0)
1467                p->dma_write(src_start, CSSA, lch);
1468        if (dest_start != 0)
1469                p->dma_write(dest_start, CDSA, lch);
1470
1471        /* Write the buffer size */
1472        p->dma_write(elem_count, CEN, lch);
1473        p->dma_write(frame_count, CFN, lch);
1474
1475        /*
1476         * If the chain is dynamically linked,
1477         * then we may have to start the chain if its not active
1478         */
1479        if (dma_linked_lch[chain_id].chain_mode == OMAP_DMA_DYNAMIC_CHAIN) {
1480
1481                /*
1482                 * In Dynamic chain, if the chain is not started,
1483                 * queue the channel
1484                 */
1485                if (dma_linked_lch[chain_id].chain_state ==
1486                                                DMA_CHAIN_NOTSTARTED) {
1487                        /* Enable the link in previous channel */
1488                        if (dma_chan[dma_chan[lch].prev_linked_ch].state ==
1489                                                                DMA_CH_QUEUED)
1490                                enable_lnk(dma_chan[lch].prev_linked_ch);
1491                        dma_chan[lch].state = DMA_CH_QUEUED;
1492                }
1493
1494                /*
1495                 * Chain is already started, make sure its active,
1496                 * if not then start the chain
1497                 */
1498                else {
1499                        start_dma = 1;
1500
1501                        if (dma_chan[dma_chan[lch].prev_linked_ch].state ==
1502                                                        DMA_CH_STARTED) {
1503                                enable_lnk(dma_chan[lch].prev_linked_ch);
1504                                dma_chan[lch].state = DMA_CH_QUEUED;
1505                                start_dma = 0;
1506                                if (0 == ((1 << 7) & p->dma_read(
1507                                        CCR, dma_chan[lch].prev_linked_ch))) {
1508                                        disable_lnk(dma_chan[lch].
1509                                                    prev_linked_ch);
1510                                        pr_debug("\n prev ch is stopped\n");
1511                                        start_dma = 1;
1512                                }
1513                        }
1514
1515                        else if (dma_chan[dma_chan[lch].prev_linked_ch].state
1516                                                        == DMA_CH_QUEUED) {
1517                                enable_lnk(dma_chan[lch].prev_linked_ch);
1518                                dma_chan[lch].state = DMA_CH_QUEUED;
1519                                start_dma = 0;
1520                        }
1521                        omap_enable_channel_irq(lch);
1522
1523                        l = p->dma_read(CCR, lch);
1524
1525                        if ((0 == (l & (1 << 24))))
1526                                l &= ~(1 << 25);
1527                        else
1528                                l |= (1 << 25);
1529                        if (start_dma == 1) {
1530                                if (0 == (l & (1 << 7))) {
1531                                        l |= (1 << 7);
1532                                        dma_chan[lch].state = DMA_CH_STARTED;
1533                                        pr_debug("starting %d\n", lch);
1534                                        p->dma_write(l, CCR, lch);
1535                                } else
1536                                        start_dma = 0;
1537                        } else {
1538                                if (0 == (l & (1 << 7)))
1539                                        p->dma_write(l, CCR, lch);
1540                        }
1541                        dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
1542                }
1543        }
1544
1545        return 0;
1546}
1547EXPORT_SYMBOL(omap_dma_chain_a_transfer);
1548
1549/**
1550 * @brief omap_start_dma_chain_transfers - Start the chain
1551 *
1552 * @param chain_id
1553 *
1554 * @return - Success : 0
1555 *           Failure : -EINVAL/-EBUSY
1556 */
1557int omap_start_dma_chain_transfers(int chain_id)
1558{
1559        int *channels;
1560        u32 l, i;
1561
1562        if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1563                printk(KERN_ERR "Invalid chain id\n");
1564                return -EINVAL;
1565        }
1566
1567        channels = dma_linked_lch[chain_id].linked_dmach_q;
1568
1569        if (dma_linked_lch[channels[0]].chain_state == DMA_CHAIN_STARTED) {
1570                printk(KERN_ERR "Chain is already started\n");
1571                return -EBUSY;
1572        }
1573
1574        if (dma_linked_lch[chain_id].chain_mode == OMAP_DMA_STATIC_CHAIN) {
1575                for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked;
1576                                                                        i++) {
1577                        enable_lnk(channels[i]);
1578                        omap_enable_channel_irq(channels[i]);
1579                }
1580        } else {
1581                omap_enable_channel_irq(channels[0]);
1582        }
1583
1584        l = p->dma_read(CCR, channels[0]);
1585        l |= (1 << 7);
1586        dma_linked_lch[chain_id].chain_state = DMA_CHAIN_STARTED;
1587        dma_chan[channels[0]].state = DMA_CH_STARTED;
1588
1589        if ((0 == (l & (1 << 24))))
1590                l &= ~(1 << 25);
1591        else
1592                l |= (1 << 25);
1593        p->dma_write(l, CCR, channels[0]);
1594
1595        dma_chan[channels[0]].flags |= OMAP_DMA_ACTIVE;
1596
1597        return 0;
1598}
1599EXPORT_SYMBOL(omap_start_dma_chain_transfers);
1600
1601/**
1602 * @brief omap_stop_dma_chain_transfers - Stop the dma transfer of a chain.
1603 *
1604 * @param chain_id
1605 *
1606 * @return - Success : 0
1607 *           Failure : EINVAL
1608 */
1609int omap_stop_dma_chain_transfers(int chain_id)
1610{
1611        int *channels;
1612        u32 l, i;
1613        u32 sys_cf = 0;
1614
1615        /* Check for input params */
1616        if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1617                printk(KERN_ERR "Invalid chain id\n");
1618                return -EINVAL;
1619        }
1620
1621        /* Check if the chain exists */
1622        if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1623                printk(KERN_ERR "Chain doesn't exists\n");
1624                return -EINVAL;
1625        }
1626        channels = dma_linked_lch[chain_id].linked_dmach_q;
1627
1628        if (IS_DMA_ERRATA(DMA_ERRATA_i88)) {
1629                sys_cf = p->dma_read(OCP_SYSCONFIG, 0);
1630                l = sys_cf;
1631                /* Middle mode reg set no Standby */
1632                l &= ~((1 << 12)|(1 << 13));
1633                p->dma_write(l, OCP_SYSCONFIG, 0);
1634        }
1635
1636        for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1637
1638                /* Stop the Channel transmission */
1639                l = p->dma_read(CCR, channels[i]);
1640                l &= ~(1 << 7);
1641                p->dma_write(l, CCR, channels[i]);
1642
1643                /* Disable the link in all the channels */
1644                disable_lnk(channels[i]);
1645                dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1646
1647        }
1648        dma_linked_lch[chain_id].chain_state = DMA_CHAIN_NOTSTARTED;
1649
1650        /* Reset the Queue pointers */
1651        OMAP_DMA_CHAIN_QINIT(chain_id);
1652
1653        if (IS_DMA_ERRATA(DMA_ERRATA_i88))
1654                p->dma_write(sys_cf, OCP_SYSCONFIG, 0);
1655
1656        return 0;
1657}
1658EXPORT_SYMBOL(omap_stop_dma_chain_transfers);
1659
1660/* Get the index of the ongoing DMA in chain */
1661/**
1662 * @brief omap_get_dma_chain_index - Get the element and frame index
1663 * of the ongoing DMA in chain
1664 *
1665 * @param chain_id
1666 * @param ei - Element index
1667 * @param fi - Frame index
1668 *
1669 * @return - Success : 0
1670 *           Failure : -EINVAL
1671 */
1672int omap_get_dma_chain_index(int chain_id, int *ei, int *fi)
1673{
1674        int lch;
1675        int *channels;
1676
1677        /* Check for input params */
1678        if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1679                printk(KERN_ERR "Invalid chain id\n");
1680                return -EINVAL;
1681        }
1682
1683        /* Check if the chain exists */
1684        if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1685                printk(KERN_ERR "Chain doesn't exists\n");
1686                return -EINVAL;
1687        }
1688        if ((!ei) || (!fi))
1689                return -EINVAL;
1690
1691        channels = dma_linked_lch[chain_id].linked_dmach_q;
1692
1693        /* Get the current channel */
1694        lch = channels[dma_linked_lch[chain_id].q_head];
1695
1696        *ei = p->dma_read(CCEN, lch);
1697        *fi = p->dma_read(CCFN, lch);
1698
1699        return 0;
1700}
1701EXPORT_SYMBOL(omap_get_dma_chain_index);
1702
1703/**
1704 * @brief omap_get_dma_chain_dst_pos - Get the destination position of the
1705 * ongoing DMA in chain
1706 *
1707 * @param chain_id
1708 *
1709 * @return - Success : Destination position
1710 *           Failure : -EINVAL
1711 */
1712int omap_get_dma_chain_dst_pos(int chain_id)
1713{
1714        int lch;
1715        int *channels;
1716
1717        /* Check for input params */
1718        if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1719                printk(KERN_ERR "Invalid chain id\n");
1720                return -EINVAL;
1721        }
1722
1723        /* Check if the chain exists */
1724        if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1725                printk(KERN_ERR "Chain doesn't exists\n");
1726                return -EINVAL;
1727        }
1728
1729        channels = dma_linked_lch[chain_id].linked_dmach_q;
1730
1731        /* Get the current channel */
1732        lch = channels[dma_linked_lch[chain_id].q_head];
1733
1734        return p->dma_read(CDAC, lch);
1735}
1736EXPORT_SYMBOL(omap_get_dma_chain_dst_pos);
1737
1738/**
1739 * @brief omap_get_dma_chain_src_pos - Get the source position
1740 * of the ongoing DMA in chain
1741 * @param chain_id
1742 *
1743 * @return - Success : Destination position
1744 *           Failure : -EINVAL
1745 */
1746int omap_get_dma_chain_src_pos(int chain_id)
1747{
1748        int lch;
1749        int *channels;
1750
1751        /* Check for input params */
1752        if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1753                printk(KERN_ERR "Invalid chain id\n");
1754                return -EINVAL;
1755        }
1756
1757        /* Check if the chain exists */
1758        if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1759                printk(KERN_ERR "Chain doesn't exists\n");
1760                return -EINVAL;
1761        }
1762
1763        channels = dma_linked_lch[chain_id].linked_dmach_q;
1764
1765        /* Get the current channel */
1766        lch = channels[dma_linked_lch[chain_id].q_head];
1767
1768        return p->dma_read(CSAC, lch);
1769}
1770EXPORT_SYMBOL(omap_get_dma_chain_src_pos);
1771#endif  /* ifndef CONFIG_ARCH_OMAP1 */
1772
1773/*----------------------------------------------------------------------------*/
1774
1775#ifdef CONFIG_ARCH_OMAP1
1776
1777static int omap1_dma_handle_ch(int ch)
1778{
1779        u32 csr;
1780
1781        if (enable_1510_mode && ch >= 6) {
1782                csr = dma_chan[ch].saved_csr;
1783                dma_chan[ch].saved_csr = 0;
1784        } else
1785                csr = p->dma_read(CSR, ch);
1786        if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
1787                dma_chan[ch + 6].saved_csr = csr >> 7;
1788                csr &= 0x7f;
1789        }
1790        if ((csr & 0x3f) == 0)
1791                return 0;
1792        if (unlikely(dma_chan[ch].dev_id == -1)) {
1793                printk(KERN_WARNING "Spurious interrupt from DMA channel "
1794                       "%d (CSR %04x)\n", ch, csr);
1795                return 0;
1796        }
1797        if (unlikely(csr & OMAP1_DMA_TOUT_IRQ))
1798                printk(KERN_WARNING "DMA timeout with device %d\n",
1799                       dma_chan[ch].dev_id);
1800        if (unlikely(csr & OMAP_DMA_DROP_IRQ))
1801                printk(KERN_WARNING "DMA synchronization event drop occurred "
1802                       "with device %d\n", dma_chan[ch].dev_id);
1803        if (likely(csr & OMAP_DMA_BLOCK_IRQ))
1804                dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
1805        if (likely(dma_chan[ch].callback != NULL))
1806                dma_chan[ch].callback(ch, csr, dma_chan[ch].data);
1807
1808        return 1;
1809}
1810
1811static irqreturn_t omap1_dma_irq_handler(int irq, void *dev_id)
1812{
1813        int ch = ((int) dev_id) - 1;
1814        int handled = 0;
1815
1816        for (;;) {
1817                int handled_now = 0;
1818
1819                handled_now += omap1_dma_handle_ch(ch);
1820                if (enable_1510_mode && dma_chan[ch + 6].saved_csr)
1821                        handled_now += omap1_dma_handle_ch(ch + 6);
1822                if (!handled_now)
1823                        break;
1824                handled += handled_now;
1825        }
1826
1827        return handled ? IRQ_HANDLED : IRQ_NONE;
1828}
1829
1830#else
1831#define omap1_dma_irq_handler   NULL
1832#endif
1833
1834#ifdef CONFIG_ARCH_OMAP2PLUS
1835
1836static int omap2_dma_handle_ch(int ch)
1837{
1838        u32 status = p->dma_read(CSR, ch);
1839
1840        if (!status) {
1841                if (printk_ratelimit())
1842                        printk(KERN_WARNING "Spurious DMA IRQ for lch %d\n",
1843                                ch);
1844                p->dma_write(1 << ch, IRQSTATUS_L0, ch);
1845                return 0;
1846        }
1847        if (unlikely(dma_chan[ch].dev_id == -1)) {
1848                if (printk_ratelimit())
1849                        printk(KERN_WARNING "IRQ %04x for non-allocated DMA"
1850                                        "channel %d\n", status, ch);
1851                return 0;
1852        }
1853        if (unlikely(status & OMAP_DMA_DROP_IRQ))
1854                printk(KERN_INFO
1855                       "DMA synchronization event drop occurred with device "
1856                       "%d\n", dma_chan[ch].dev_id);
1857        if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ)) {
1858                printk(KERN_INFO "DMA transaction error with device %d\n",
1859                       dma_chan[ch].dev_id);
1860                if (IS_DMA_ERRATA(DMA_ERRATA_i378)) {
1861                        u32 ccr;
1862
1863                        ccr = p->dma_read(CCR, ch);
1864                        ccr &= ~OMAP_DMA_CCR_EN;
1865                        p->dma_write(ccr, CCR, ch);
1866                        dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
1867                }
1868        }
1869        if (unlikely(status & OMAP2_DMA_SECURE_ERR_IRQ))
1870                printk(KERN_INFO "DMA secure error with device %d\n",
1871                       dma_chan[ch].dev_id);
1872        if (unlikely(status & OMAP2_DMA_MISALIGNED_ERR_IRQ))
1873                printk(KERN_INFO "DMA misaligned error with device %d\n",
1874                       dma_chan[ch].dev_id);
1875
1876        p->dma_write(status, CSR, ch);
1877        p->dma_write(1 << ch, IRQSTATUS_L0, ch);
1878        /* read back the register to flush the write */
1879        p->dma_read(IRQSTATUS_L0, ch);
1880
1881        /* If the ch is not chained then chain_id will be -1 */
1882        if (dma_chan[ch].chain_id != -1) {
1883                int chain_id = dma_chan[ch].chain_id;
1884                dma_chan[ch].state = DMA_CH_NOTSTARTED;
1885                if (p->dma_read(CLNK_CTRL, ch) & (1 << 15))
1886                        dma_chan[dma_chan[ch].next_linked_ch].state =
1887                                                        DMA_CH_STARTED;
1888                if (dma_linked_lch[chain_id].chain_mode ==
1889                                                OMAP_DMA_DYNAMIC_CHAIN)
1890                        disable_lnk(ch);
1891
1892                if (!OMAP_DMA_CHAIN_QEMPTY(chain_id))
1893                        OMAP_DMA_CHAIN_INCQHEAD(chain_id);
1894
1895                status = p->dma_read(CSR, ch);
1896                p->dma_write(status, CSR, ch);
1897        }
1898
1899        if (likely(dma_chan[ch].callback != NULL))
1900                dma_chan[ch].callback(ch, status, dma_chan[ch].data);
1901
1902        return 0;
1903}
1904
1905/* STATUS register count is from 1-32 while our is 0-31 */
1906static irqreturn_t omap2_dma_irq_handler(int irq, void *dev_id)
1907{
1908        u32 val, enable_reg;
1909        int i;
1910
1911        val = p->dma_read(IRQSTATUS_L0, 0);
1912        if (val == 0) {
1913                if (printk_ratelimit())
1914                        printk(KERN_WARNING "Spurious DMA IRQ\n");
1915                return IRQ_HANDLED;
1916        }
1917        enable_reg = p->dma_read(IRQENABLE_L0, 0);
1918        val &= enable_reg; /* Dispatch only relevant interrupts */
1919        for (i = 0; i < dma_lch_count && val != 0; i++) {
1920                if (val & 1)
1921                        omap2_dma_handle_ch(i);
1922                val >>= 1;
1923        }
1924
1925        return IRQ_HANDLED;
1926}
1927
1928static struct irqaction omap24xx_dma_irq = {
1929        .name = "DMA",
1930        .handler = omap2_dma_irq_handler,
1931        .flags = IRQF_DISABLED
1932};
1933
1934#else
1935static struct irqaction omap24xx_dma_irq;
1936#endif
1937
1938/*----------------------------------------------------------------------------*/
1939
1940void omap_dma_global_context_save(void)
1941{
1942        omap_dma_global_context.dma_irqenable_l0 =
1943                p->dma_read(IRQENABLE_L0, 0);
1944        omap_dma_global_context.dma_ocp_sysconfig =
1945                p->dma_read(OCP_SYSCONFIG, 0);
1946        omap_dma_global_context.dma_gcr = p->dma_read(GCR, 0);
1947}
1948
1949void omap_dma_global_context_restore(void)
1950{
1951        int ch;
1952
1953        p->dma_write(omap_dma_global_context.dma_gcr, GCR, 0);
1954        p->dma_write(omap_dma_global_context.dma_ocp_sysconfig,
1955                OCP_SYSCONFIG, 0);
1956        p->dma_write(omap_dma_global_context.dma_irqenable_l0,
1957                IRQENABLE_L0, 0);
1958
1959        if (IS_DMA_ERRATA(DMA_ROMCODE_BUG))
1960                p->dma_write(0x3 , IRQSTATUS_L0, 0);
1961
1962        for (ch = 0; ch < dma_chan_count; ch++)
1963                if (dma_chan[ch].dev_id != -1)
1964                        omap_clear_dma(ch);
1965}
1966
1967static int __devinit omap_system_dma_probe(struct platform_device *pdev)
1968{
1969        int ch, ret = 0;
1970        int dma_irq;
1971        char irq_name[4];
1972        int irq_rel;
1973
1974        p = pdev->dev.platform_data;
1975        if (!p) {
1976                dev_err(&pdev->dev, "%s: System DMA initialized without"
1977                        "platform data\n", __func__);
1978                return -EINVAL;
1979        }
1980
1981        d                       = p->dma_attr;
1982        errata                  = p->errata;
1983
1984        if ((d->dev_caps & RESERVE_CHANNEL) && omap_dma_reserve_channels
1985                        && (omap_dma_reserve_channels <= dma_lch_count))
1986                d->lch_count    = omap_dma_reserve_channels;
1987
1988        dma_lch_count           = d->lch_count;
1989        dma_chan_count          = dma_lch_count;
1990        dma_chan                = d->chan;
1991        enable_1510_mode        = d->dev_caps & ENABLE_1510_MODE;
1992
1993        if (cpu_class_is_omap2()) {
1994                dma_linked_lch = kzalloc(sizeof(struct dma_link_info) *
1995                                                dma_lch_count, GFP_KERNEL);
1996                if (!dma_linked_lch) {
1997                        ret = -ENOMEM;
1998                        goto exit_dma_lch_fail;
1999                }
2000        }
2001
2002        spin_lock_init(&dma_chan_lock);
2003        for (ch = 0; ch < dma_chan_count; ch++) {
2004                omap_clear_dma(ch);
2005                if (cpu_class_is_omap2())
2006                        omap2_disable_irq_lch(ch);
2007
2008                dma_chan[ch].dev_id = -1;
2009                dma_chan[ch].next_lch = -1;
2010
2011                if (ch >= 6 && enable_1510_mode)
2012                        continue;
2013
2014                if (cpu_class_is_omap1()) {
2015                        /*
2016                         * request_irq() doesn't like dev_id (ie. ch) being
2017                         * zero, so we have to kludge around this.
2018                         */
2019                        sprintf(&irq_name[0], "%d", ch);
2020                        dma_irq = platform_get_irq_byname(pdev, irq_name);
2021
2022                        if (dma_irq < 0) {
2023                                ret = dma_irq;
2024                                goto exit_dma_irq_fail;
2025                        }
2026
2027                        /* INT_DMA_LCD is handled in lcd_dma.c */
2028                        if (dma_irq == INT_DMA_LCD)
2029                                continue;
2030
2031                        ret = request_irq(dma_irq,
2032                                        omap1_dma_irq_handler, 0, "DMA",
2033                                        (void *) (ch + 1));
2034                        if (ret != 0)
2035                                goto exit_dma_irq_fail;
2036                }
2037        }
2038
2039        if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx())
2040                omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE,
2041                                DMA_DEFAULT_FIFO_DEPTH, 0);
2042
2043        if (cpu_class_is_omap2()) {
2044                strcpy(irq_name, "0");
2045                dma_irq = platform_get_irq_byname(pdev, irq_name);
2046                if (dma_irq < 0) {
2047                        dev_err(&pdev->dev, "failed: request IRQ %d", dma_irq);
2048                        goto exit_dma_lch_fail;
2049                }
2050                ret = setup_irq(dma_irq, &omap24xx_dma_irq);
2051                if (ret) {
2052                        dev_err(&pdev->dev, "set_up failed for IRQ %d"
2053                                "for DMA (error %d)\n", dma_irq, ret);
2054                        goto exit_dma_lch_fail;
2055                }
2056        }
2057
2058        /* reserve dma channels 0 and 1 in high security devices */
2059        if (cpu_is_omap34xx() &&
2060                (omap_type() != OMAP2_DEVICE_TYPE_GP)) {
2061                printk(KERN_INFO "Reserving DMA channels 0 and 1 for "
2062                                "HS ROM code\n");
2063                dma_chan[0].dev_id = 0;
2064                dma_chan[1].dev_id = 1;
2065        }
2066        p->show_dma_caps();
2067        return 0;
2068
2069exit_dma_irq_fail:
2070        dev_err(&pdev->dev, "unable to request IRQ %d"
2071                        "for DMA (error %d)\n", dma_irq, ret);
2072        for (irq_rel = 0; irq_rel < ch; irq_rel++) {
2073                dma_irq = platform_get_irq(pdev, irq_rel);
2074                free_irq(dma_irq, (void *)(irq_rel + 1));
2075        }
2076
2077exit_dma_lch_fail:
2078        kfree(p);
2079        kfree(d);
2080        kfree(dma_chan);
2081        return ret;
2082}
2083
2084static int __devexit omap_system_dma_remove(struct platform_device *pdev)
2085{
2086        int dma_irq;
2087
2088        if (cpu_class_is_omap2()) {
2089                char irq_name[4];
2090                strcpy(irq_name, "0");
2091                dma_irq = platform_get_irq_byname(pdev, irq_name);
2092                remove_irq(dma_irq, &omap24xx_dma_irq);
2093        } else {
2094                int irq_rel = 0;
2095                for ( ; irq_rel < dma_chan_count; irq_rel++) {
2096                        dma_irq = platform_get_irq(pdev, irq_rel);
2097                        free_irq(dma_irq, (void *)(irq_rel + 1));
2098                }
2099        }
2100        kfree(p);
2101        kfree(d);
2102        kfree(dma_chan);
2103        return 0;
2104}
2105
2106static struct platform_driver omap_system_dma_driver = {
2107        .probe          = omap_system_dma_probe,
2108        .remove         = omap_system_dma_remove,
2109        .driver         = {
2110                .name   = "omap_dma_system"
2111        },
2112};
2113
2114static int __init omap_system_dma_init(void)
2115{
2116        return platform_driver_register(&omap_system_dma_driver);
2117}
2118arch_initcall(omap_system_dma_init);
2119
2120static void __exit omap_system_dma_exit(void)
2121{
2122        platform_driver_unregister(&omap_system_dma_driver);
2123}
2124
2125MODULE_DESCRIPTION("OMAP SYSTEM DMA DRIVER");
2126MODULE_LICENSE("GPL");
2127MODULE_ALIAS("platform:" DRIVER_NAME);
2128MODULE_AUTHOR("Texas Instruments Inc");
2129
2130/*
2131 * Reserve the omap SDMA channels using cmdline bootarg
2132 * "omap_dma_reserve_ch=". The valid range is 1 to 32
2133 */
2134static int __init omap_dma_cmdline_reserve_ch(char *str)
2135{
2136        if (get_option(&str, &omap_dma_reserve_channels) != 1)
2137                omap_dma_reserve_channels = 0;
2138        return 1;
2139}
2140
2141__setup("omap_dma_reserve_ch=", omap_dma_cmdline_reserve_ch);
2142
2143
2144