linux/arch/arm/plat-omap/dma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * linux/arch/arm/plat-omap/dma.c
   4 *
   5 * Copyright (C) 2003 - 2008 Nokia Corporation
   6 * Author: Juha Yrjölä <juha.yrjola@nokia.com>
   7 * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
   8 * Graphics DMA and LCD DMA graphics tranformations
   9 * by Imre Deak <imre.deak@nokia.com>
  10 * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc.
  11 * Merged to support both OMAP1 and OMAP2 by Tony Lindgren <tony@atomide.com>
  12 * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc.
  13 *
  14 * Copyright (C) 2009 Texas Instruments
  15 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
  16 *
  17 * Support functions for the OMAP internal DMA channels.
  18 *
  19 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
  20 * Converted DMA library into DMA platform driver.
  21 *      - G, Manjunath Kondaiah <manjugk@ti.com>
  22 */
  23
  24#include <linux/module.h>
  25#include <linux/init.h>
  26#include <linux/sched.h>
  27#include <linux/spinlock.h>
  28#include <linux/errno.h>
  29#include <linux/interrupt.h>
  30#include <linux/irq.h>
  31#include <linux/io.h>
  32#include <linux/slab.h>
  33#include <linux/delay.h>
  34
  35#include <linux/omap-dma.h>
  36
  37#ifdef CONFIG_ARCH_OMAP1
  38#include <mach/soc.h>
  39#endif
  40
  41/*
  42 * MAX_LOGICAL_DMA_CH_COUNT: the maximum number of logical DMA
  43 * channels that an instance of the SDMA IP block can support.  Used
  44 * to size arrays.  (The actual maximum on a particular SoC may be less
  45 * than this -- for example, OMAP1 SDMA instances only support 17 logical
  46 * DMA channels.)
  47 */
  48#define MAX_LOGICAL_DMA_CH_COUNT                32
  49
  50#undef DEBUG
  51
  52#ifndef CONFIG_ARCH_OMAP1
  53enum { DMA_CH_ALLOC_DONE, DMA_CH_PARAMS_SET_DONE, DMA_CH_STARTED,
  54        DMA_CH_QUEUED, DMA_CH_NOTSTARTED, DMA_CH_PAUSED, DMA_CH_LINK_ENABLED
  55};
  56
  57enum { DMA_CHAIN_STARTED, DMA_CHAIN_NOTSTARTED };
  58#endif
  59
  60#define OMAP_DMA_ACTIVE                 0x01
  61#define OMAP2_DMA_CSR_CLEAR_MASK        0xffffffff
  62
  63#define OMAP_FUNC_MUX_ARM_BASE          (0xfffe1000 + 0xec)
  64
  65static struct omap_system_dma_plat_info *p;
  66static struct omap_dma_dev_attr *d;
  67static void omap_clear_dma(int lch);
  68static int omap_dma_set_prio_lch(int lch, unsigned char read_prio,
  69                                 unsigned char write_prio);
  70static int enable_1510_mode;
  71static u32 errata;
  72
  73static struct omap_dma_global_context_registers {
  74        u32 dma_irqenable_l0;
  75        u32 dma_irqenable_l1;
  76        u32 dma_ocp_sysconfig;
  77        u32 dma_gcr;
  78} omap_dma_global_context;
  79
  80struct dma_link_info {
  81        int *linked_dmach_q;
  82        int no_of_lchs_linked;
  83
  84        int q_count;
  85        int q_tail;
  86        int q_head;
  87
  88        int chain_state;
  89        int chain_mode;
  90
  91};
  92
  93static struct dma_link_info *dma_linked_lch;
  94
  95#ifndef CONFIG_ARCH_OMAP1
  96
  97/* Chain handling macros */
  98#define OMAP_DMA_CHAIN_QINIT(chain_id)                                  \
  99        do {                                                            \
 100                dma_linked_lch[chain_id].q_head =                       \
 101                dma_linked_lch[chain_id].q_tail =                       \
 102                dma_linked_lch[chain_id].q_count = 0;                   \
 103        } while (0)
 104#define OMAP_DMA_CHAIN_QFULL(chain_id)                                  \
 105                (dma_linked_lch[chain_id].no_of_lchs_linked ==          \
 106                dma_linked_lch[chain_id].q_count)
 107#define OMAP_DMA_CHAIN_QLAST(chain_id)                                  \
 108        do {                                                            \
 109                ((dma_linked_lch[chain_id].no_of_lchs_linked-1) ==      \
 110                dma_linked_lch[chain_id].q_count)                       \
 111        } while (0)
 112#define OMAP_DMA_CHAIN_QEMPTY(chain_id)                                 \
 113                (0 == dma_linked_lch[chain_id].q_count)
 114#define __OMAP_DMA_CHAIN_INCQ(end)                                      \
 115        ((end) = ((end)+1) % dma_linked_lch[chain_id].no_of_lchs_linked)
 116#define OMAP_DMA_CHAIN_INCQHEAD(chain_id)                               \
 117        do {                                                            \
 118                __OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_head); \
 119                dma_linked_lch[chain_id].q_count--;                     \
 120        } while (0)
 121
 122#define OMAP_DMA_CHAIN_INCQTAIL(chain_id)                               \
 123        do {                                                            \
 124                __OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_tail); \
 125                dma_linked_lch[chain_id].q_count++; \
 126        } while (0)
 127#endif
 128
 129static int dma_lch_count;
 130static int dma_chan_count;
 131static int omap_dma_reserve_channels;
 132
 133static spinlock_t dma_chan_lock;
 134static struct omap_dma_lch *dma_chan;
 135
 136static inline void disable_lnk(int lch);
 137static void omap_disable_channel_irq(int lch);
 138static inline void omap_enable_channel_irq(int lch);
 139
 140#define REVISIT_24XX()          printk(KERN_ERR "FIXME: no %s on 24xx\n", \
 141                                                __func__);
 142
 143#ifdef CONFIG_ARCH_OMAP15XX
 144/* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */
 145static int omap_dma_in_1510_mode(void)
 146{
 147        return enable_1510_mode;
 148}
 149#else
 150#define omap_dma_in_1510_mode()         0
 151#endif
 152
 153#ifdef CONFIG_ARCH_OMAP1
 154static inline void set_gdma_dev(int req, int dev)
 155{
 156        u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
 157        int shift = ((req - 1) % 5) * 6;
 158        u32 l;
 159
 160        l = omap_readl(reg);
 161        l &= ~(0x3f << shift);
 162        l |= (dev - 1) << shift;
 163        omap_writel(l, reg);
 164}
 165#else
 166#define set_gdma_dev(req, dev)  do {} while (0)
 167#define omap_readl(reg)         0
 168#define omap_writel(val, reg)   do {} while (0)
 169#endif
 170
 171#ifdef CONFIG_ARCH_OMAP1
 172void omap_set_dma_priority(int lch, int dst_port, int priority)
 173{
 174        unsigned long reg;
 175        u32 l;
 176
 177        if (dma_omap1()) {
 178                switch (dst_port) {
 179                case OMAP_DMA_PORT_OCP_T1:      /* FFFECC00 */
 180                        reg = OMAP_TC_OCPT1_PRIOR;
 181                        break;
 182                case OMAP_DMA_PORT_OCP_T2:      /* FFFECCD0 */
 183                        reg = OMAP_TC_OCPT2_PRIOR;
 184                        break;
 185                case OMAP_DMA_PORT_EMIFF:       /* FFFECC08 */
 186                        reg = OMAP_TC_EMIFF_PRIOR;
 187                        break;
 188                case OMAP_DMA_PORT_EMIFS:       /* FFFECC04 */
 189                        reg = OMAP_TC_EMIFS_PRIOR;
 190                        break;
 191                default:
 192                        BUG();
 193                        return;
 194                }
 195                l = omap_readl(reg);
 196                l &= ~(0xf << 8);
 197                l |= (priority & 0xf) << 8;
 198                omap_writel(l, reg);
 199        }
 200}
 201#endif
 202
 203#ifdef CONFIG_ARCH_OMAP2PLUS
 204void omap_set_dma_priority(int lch, int dst_port, int priority)
 205{
 206        u32 ccr;
 207
 208        ccr = p->dma_read(CCR, lch);
 209        if (priority)
 210                ccr |= (1 << 6);
 211        else
 212                ccr &= ~(1 << 6);
 213        p->dma_write(ccr, CCR, lch);
 214}
 215#endif
 216EXPORT_SYMBOL(omap_set_dma_priority);
 217
 218void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
 219                                  int frame_count, int sync_mode,
 220                                  int dma_trigger, int src_or_dst_synch)
 221{
 222        u32 l;
 223
 224        l = p->dma_read(CSDP, lch);
 225        l &= ~0x03;
 226        l |= data_type;
 227        p->dma_write(l, CSDP, lch);
 228
 229        if (dma_omap1()) {
 230                u16 ccr;
 231
 232                ccr = p->dma_read(CCR, lch);
 233                ccr &= ~(1 << 5);
 234                if (sync_mode == OMAP_DMA_SYNC_FRAME)
 235                        ccr |= 1 << 5;
 236                p->dma_write(ccr, CCR, lch);
 237
 238                ccr = p->dma_read(CCR2, lch);
 239                ccr &= ~(1 << 2);
 240                if (sync_mode == OMAP_DMA_SYNC_BLOCK)
 241                        ccr |= 1 << 2;
 242                p->dma_write(ccr, CCR2, lch);
 243        }
 244
 245        if (dma_omap2plus() && dma_trigger) {
 246                u32 val;
 247
 248                val = p->dma_read(CCR, lch);
 249
 250                /* DMA_SYNCHRO_CONTROL_UPPER depends on the channel number */
 251                val &= ~((1 << 23) | (3 << 19) | 0x1f);
 252                val |= (dma_trigger & ~0x1f) << 14;
 253                val |= dma_trigger & 0x1f;
 254
 255                if (sync_mode & OMAP_DMA_SYNC_FRAME)
 256                        val |= 1 << 5;
 257                else
 258                        val &= ~(1 << 5);
 259
 260                if (sync_mode & OMAP_DMA_SYNC_BLOCK)
 261                        val |= 1 << 18;
 262                else
 263                        val &= ~(1 << 18);
 264
 265                if (src_or_dst_synch == OMAP_DMA_DST_SYNC_PREFETCH) {
 266                        val &= ~(1 << 24);      /* dest synch */
 267                        val |= (1 << 23);       /* Prefetch */
 268                } else if (src_or_dst_synch) {
 269                        val |= 1 << 24;         /* source synch */
 270                } else {
 271                        val &= ~(1 << 24);      /* dest synch */
 272                }
 273                p->dma_write(val, CCR, lch);
 274        }
 275
 276        p->dma_write(elem_count, CEN, lch);
 277        p->dma_write(frame_count, CFN, lch);
 278}
 279EXPORT_SYMBOL(omap_set_dma_transfer_params);
 280
 281void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode)
 282{
 283        if (dma_omap2plus()) {
 284                u32 csdp;
 285
 286                csdp = p->dma_read(CSDP, lch);
 287                csdp &= ~(0x3 << 16);
 288                csdp |= (mode << 16);
 289                p->dma_write(csdp, CSDP, lch);
 290        }
 291}
 292EXPORT_SYMBOL(omap_set_dma_write_mode);
 293
 294void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode)
 295{
 296        if (dma_omap1() && !dma_omap15xx()) {
 297                u32 l;
 298
 299                l = p->dma_read(LCH_CTRL, lch);
 300                l &= ~0x7;
 301                l |= mode;
 302                p->dma_write(l, LCH_CTRL, lch);
 303        }
 304}
 305EXPORT_SYMBOL(omap_set_dma_channel_mode);
 306
 307/* Note that src_port is only for omap1 */
 308void omap_set_dma_src_params(int lch, int src_port, int src_amode,
 309                             unsigned long src_start,
 310                             int src_ei, int src_fi)
 311{
 312        u32 l;
 313
 314        if (dma_omap1()) {
 315                u16 w;
 316
 317                w = p->dma_read(CSDP, lch);
 318                w &= ~(0x1f << 2);
 319                w |= src_port << 2;
 320                p->dma_write(w, CSDP, lch);
 321        }
 322
 323        l = p->dma_read(CCR, lch);
 324        l &= ~(0x03 << 12);
 325        l |= src_amode << 12;
 326        p->dma_write(l, CCR, lch);
 327
 328        p->dma_write(src_start, CSSA, lch);
 329
 330        p->dma_write(src_ei, CSEI, lch);
 331        p->dma_write(src_fi, CSFI, lch);
 332}
 333EXPORT_SYMBOL(omap_set_dma_src_params);
 334
 335void omap_set_dma_params(int lch, struct omap_dma_channel_params *params)
 336{
 337        omap_set_dma_transfer_params(lch, params->data_type,
 338                                     params->elem_count, params->frame_count,
 339                                     params->sync_mode, params->trigger,
 340                                     params->src_or_dst_synch);
 341        omap_set_dma_src_params(lch, params->src_port,
 342                                params->src_amode, params->src_start,
 343                                params->src_ei, params->src_fi);
 344
 345        omap_set_dma_dest_params(lch, params->dst_port,
 346                                 params->dst_amode, params->dst_start,
 347                                 params->dst_ei, params->dst_fi);
 348        if (params->read_prio || params->write_prio)
 349                omap_dma_set_prio_lch(lch, params->read_prio,
 350                                      params->write_prio);
 351}
 352EXPORT_SYMBOL(omap_set_dma_params);
 353
 354void omap_set_dma_src_data_pack(int lch, int enable)
 355{
 356        u32 l;
 357
 358        l = p->dma_read(CSDP, lch);
 359        l &= ~(1 << 6);
 360        if (enable)
 361                l |= (1 << 6);
 362        p->dma_write(l, CSDP, lch);
 363}
 364EXPORT_SYMBOL(omap_set_dma_src_data_pack);
 365
 366void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
 367{
 368        unsigned int burst = 0;
 369        u32 l;
 370
 371        l = p->dma_read(CSDP, lch);
 372        l &= ~(0x03 << 7);
 373
 374        switch (burst_mode) {
 375        case OMAP_DMA_DATA_BURST_DIS:
 376                break;
 377        case OMAP_DMA_DATA_BURST_4:
 378                if (dma_omap2plus())
 379                        burst = 0x1;
 380                else
 381                        burst = 0x2;
 382                break;
 383        case OMAP_DMA_DATA_BURST_8:
 384                if (dma_omap2plus()) {
 385                        burst = 0x2;
 386                        break;
 387                }
 388                /*
 389                 * not supported by current hardware on OMAP1
 390                 * w |= (0x03 << 7);
 391                 */
 392                /* fall through */
 393        case OMAP_DMA_DATA_BURST_16:
 394                if (dma_omap2plus()) {
 395                        burst = 0x3;
 396                        break;
 397                }
 398                /* OMAP1 don't support burst 16 */
 399                /* fall through */
 400        default:
 401                BUG();
 402        }
 403
 404        l |= (burst << 7);
 405        p->dma_write(l, CSDP, lch);
 406}
 407EXPORT_SYMBOL(omap_set_dma_src_burst_mode);
 408
 409/* Note that dest_port is only for OMAP1 */
 410void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
 411                              unsigned long dest_start,
 412                              int dst_ei, int dst_fi)
 413{
 414        u32 l;
 415
 416        if (dma_omap1()) {
 417                l = p->dma_read(CSDP, lch);
 418                l &= ~(0x1f << 9);
 419                l |= dest_port << 9;
 420                p->dma_write(l, CSDP, lch);
 421        }
 422
 423        l = p->dma_read(CCR, lch);
 424        l &= ~(0x03 << 14);
 425        l |= dest_amode << 14;
 426        p->dma_write(l, CCR, lch);
 427
 428        p->dma_write(dest_start, CDSA, lch);
 429
 430        p->dma_write(dst_ei, CDEI, lch);
 431        p->dma_write(dst_fi, CDFI, lch);
 432}
 433EXPORT_SYMBOL(omap_set_dma_dest_params);
 434
 435void omap_set_dma_dest_data_pack(int lch, int enable)
 436{
 437        u32 l;
 438
 439        l = p->dma_read(CSDP, lch);
 440        l &= ~(1 << 13);
 441        if (enable)
 442                l |= 1 << 13;
 443        p->dma_write(l, CSDP, lch);
 444}
 445EXPORT_SYMBOL(omap_set_dma_dest_data_pack);
 446
 447void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
 448{
 449        unsigned int burst = 0;
 450        u32 l;
 451
 452        l = p->dma_read(CSDP, lch);
 453        l &= ~(0x03 << 14);
 454
 455        switch (burst_mode) {
 456        case OMAP_DMA_DATA_BURST_DIS:
 457                break;
 458        case OMAP_DMA_DATA_BURST_4:
 459                if (dma_omap2plus())
 460                        burst = 0x1;
 461                else
 462                        burst = 0x2;
 463                break;
 464        case OMAP_DMA_DATA_BURST_8:
 465                if (dma_omap2plus())
 466                        burst = 0x2;
 467                else
 468                        burst = 0x3;
 469                break;
 470        case OMAP_DMA_DATA_BURST_16:
 471                if (dma_omap2plus()) {
 472                        burst = 0x3;
 473                        break;
 474                }
 475                /* OMAP1 don't support burst 16 */
 476                /* fall through */
 477        default:
 478                printk(KERN_ERR "Invalid DMA burst mode\n");
 479                BUG();
 480                return;
 481        }
 482        l |= (burst << 14);
 483        p->dma_write(l, CSDP, lch);
 484}
 485EXPORT_SYMBOL(omap_set_dma_dest_burst_mode);
 486
 487static inline void omap_enable_channel_irq(int lch)
 488{
 489        /* Clear CSR */
 490        if (dma_omap1())
 491                p->dma_read(CSR, lch);
 492        else
 493                p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
 494
 495        /* Enable some nice interrupts. */
 496        p->dma_write(dma_chan[lch].enabled_irqs, CICR, lch);
 497}
 498
 499static inline void omap_disable_channel_irq(int lch)
 500{
 501        /* disable channel interrupts */
 502        p->dma_write(0, CICR, lch);
 503        /* Clear CSR */
 504        if (dma_omap1())
 505                p->dma_read(CSR, lch);
 506        else
 507                p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
 508}
 509
 510void omap_enable_dma_irq(int lch, u16 bits)
 511{
 512        dma_chan[lch].enabled_irqs |= bits;
 513}
 514EXPORT_SYMBOL(omap_enable_dma_irq);
 515
 516void omap_disable_dma_irq(int lch, u16 bits)
 517{
 518        dma_chan[lch].enabled_irqs &= ~bits;
 519}
 520EXPORT_SYMBOL(omap_disable_dma_irq);
 521
 522static inline void enable_lnk(int lch)
 523{
 524        u32 l;
 525
 526        l = p->dma_read(CLNK_CTRL, lch);
 527
 528        if (dma_omap1())
 529                l &= ~(1 << 14);
 530
 531        /* Set the ENABLE_LNK bits */
 532        if (dma_chan[lch].next_lch != -1)
 533                l = dma_chan[lch].next_lch | (1 << 15);
 534
 535#ifndef CONFIG_ARCH_OMAP1
 536        if (dma_omap2plus())
 537                if (dma_chan[lch].next_linked_ch != -1)
 538                        l = dma_chan[lch].next_linked_ch | (1 << 15);
 539#endif
 540
 541        p->dma_write(l, CLNK_CTRL, lch);
 542}
 543
 544static inline void disable_lnk(int lch)
 545{
 546        u32 l;
 547
 548        l = p->dma_read(CLNK_CTRL, lch);
 549
 550        /* Disable interrupts */
 551        omap_disable_channel_irq(lch);
 552
 553        if (dma_omap1()) {
 554                /* Set the STOP_LNK bit */
 555                l |= 1 << 14;
 556        }
 557
 558        if (dma_omap2plus()) {
 559                /* Clear the ENABLE_LNK bit */
 560                l &= ~(1 << 15);
 561        }
 562
 563        p->dma_write(l, CLNK_CTRL, lch);
 564        dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
 565}
 566
 567static inline void omap2_enable_irq_lch(int lch)
 568{
 569        u32 val;
 570        unsigned long flags;
 571
 572        if (dma_omap1())
 573                return;
 574
 575        spin_lock_irqsave(&dma_chan_lock, flags);
 576        /* clear IRQ STATUS */
 577        p->dma_write(1 << lch, IRQSTATUS_L0, lch);
 578        /* Enable interrupt */
 579        val = p->dma_read(IRQENABLE_L0, lch);
 580        val |= 1 << lch;
 581        p->dma_write(val, IRQENABLE_L0, lch);
 582        spin_unlock_irqrestore(&dma_chan_lock, flags);
 583}
 584
 585static inline void omap2_disable_irq_lch(int lch)
 586{
 587        u32 val;
 588        unsigned long flags;
 589
 590        if (dma_omap1())
 591                return;
 592
 593        spin_lock_irqsave(&dma_chan_lock, flags);
 594        /* Disable interrupt */
 595        val = p->dma_read(IRQENABLE_L0, lch);
 596        val &= ~(1 << lch);
 597        p->dma_write(val, IRQENABLE_L0, lch);
 598        /* clear IRQ STATUS */
 599        p->dma_write(1 << lch, IRQSTATUS_L0, lch);
 600        spin_unlock_irqrestore(&dma_chan_lock, flags);
 601}
 602
 603int omap_request_dma(int dev_id, const char *dev_name,
 604                     void (*callback)(int lch, u16 ch_status, void *data),
 605                     void *data, int *dma_ch_out)
 606{
 607        int ch, free_ch = -1;
 608        unsigned long flags;
 609        struct omap_dma_lch *chan;
 610
 611        WARN(strcmp(dev_name, "DMA engine"), "Using deprecated platform DMA API - please update to DMA engine");
 612
 613        spin_lock_irqsave(&dma_chan_lock, flags);
 614        for (ch = 0; ch < dma_chan_count; ch++) {
 615                if (free_ch == -1 && dma_chan[ch].dev_id == -1) {
 616                        free_ch = ch;
 617                        /* Exit after first free channel found */
 618                        break;
 619                }
 620        }
 621        if (free_ch == -1) {
 622                spin_unlock_irqrestore(&dma_chan_lock, flags);
 623                return -EBUSY;
 624        }
 625        chan = dma_chan + free_ch;
 626        chan->dev_id = dev_id;
 627
 628        if (p->clear_lch_regs)
 629                p->clear_lch_regs(free_ch);
 630
 631        if (dma_omap2plus())
 632                omap_clear_dma(free_ch);
 633
 634        spin_unlock_irqrestore(&dma_chan_lock, flags);
 635
 636        chan->dev_name = dev_name;
 637        chan->callback = callback;
 638        chan->data = data;
 639        chan->flags = 0;
 640
 641#ifndef CONFIG_ARCH_OMAP1
 642        if (dma_omap2plus()) {
 643                chan->chain_id = -1;
 644                chan->next_linked_ch = -1;
 645        }
 646#endif
 647
 648        chan->enabled_irqs = OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
 649
 650        if (dma_omap1())
 651                chan->enabled_irqs |= OMAP1_DMA_TOUT_IRQ;
 652        else if (dma_omap2plus())
 653                chan->enabled_irqs |= OMAP2_DMA_MISALIGNED_ERR_IRQ |
 654                        OMAP2_DMA_TRANS_ERR_IRQ;
 655
 656        if (dma_omap16xx()) {
 657                /* If the sync device is set, configure it dynamically. */
 658                if (dev_id != 0) {
 659                        set_gdma_dev(free_ch + 1, dev_id);
 660                        dev_id = free_ch + 1;
 661                }
 662                /*
 663                 * Disable the 1510 compatibility mode and set the sync device
 664                 * id.
 665                 */
 666                p->dma_write(dev_id | (1 << 10), CCR, free_ch);
 667        } else if (dma_omap1()) {
 668                p->dma_write(dev_id, CCR, free_ch);
 669        }
 670
 671        if (dma_omap2plus()) {
 672                omap_enable_channel_irq(free_ch);
 673                omap2_enable_irq_lch(free_ch);
 674        }
 675
 676        *dma_ch_out = free_ch;
 677
 678        return 0;
 679}
 680EXPORT_SYMBOL(omap_request_dma);
 681
 682void omap_free_dma(int lch)
 683{
 684        unsigned long flags;
 685
 686        if (dma_chan[lch].dev_id == -1) {
 687                pr_err("omap_dma: trying to free unallocated DMA channel %d\n",
 688                       lch);
 689                return;
 690        }
 691
 692        /* Disable interrupt for logical channel */
 693        if (dma_omap2plus())
 694                omap2_disable_irq_lch(lch);
 695
 696        /* Disable all DMA interrupts for the channel. */
 697        omap_disable_channel_irq(lch);
 698
 699        /* Make sure the DMA transfer is stopped. */
 700        p->dma_write(0, CCR, lch);
 701
 702        /* Clear registers */
 703        if (dma_omap2plus())
 704                omap_clear_dma(lch);
 705
 706        spin_lock_irqsave(&dma_chan_lock, flags);
 707        dma_chan[lch].dev_id = -1;
 708        dma_chan[lch].next_lch = -1;
 709        dma_chan[lch].callback = NULL;
 710        spin_unlock_irqrestore(&dma_chan_lock, flags);
 711}
 712EXPORT_SYMBOL(omap_free_dma);
 713
 714/**
 715 * @brief omap_dma_set_global_params : Set global priority settings for dma
 716 *
 717 * @param arb_rate
 718 * @param max_fifo_depth
 719 * @param tparams - Number of threads to reserve : DMA_THREAD_RESERVE_NORM
 720 *                                                 DMA_THREAD_RESERVE_ONET
 721 *                                                 DMA_THREAD_RESERVE_TWOT
 722 *                                                 DMA_THREAD_RESERVE_THREET
 723 */
 724void
 725omap_dma_set_global_params(int arb_rate, int max_fifo_depth, int tparams)
 726{
 727        u32 reg;
 728
 729        if (dma_omap1()) {
 730                printk(KERN_ERR "FIXME: no %s on 15xx/16xx\n", __func__);
 731                return;
 732        }
 733
 734        if (max_fifo_depth == 0)
 735                max_fifo_depth = 1;
 736        if (arb_rate == 0)
 737                arb_rate = 1;
 738
 739        reg = 0xff & max_fifo_depth;
 740        reg |= (0x3 & tparams) << 12;
 741        reg |= (arb_rate & 0xff) << 16;
 742
 743        p->dma_write(reg, GCR, 0);
 744}
 745EXPORT_SYMBOL(omap_dma_set_global_params);
 746
 747/**
 748 * @brief omap_dma_set_prio_lch : Set channel wise priority settings
 749 *
 750 * @param lch
 751 * @param read_prio - Read priority
 752 * @param write_prio - Write priority
 753 * Both of the above can be set with one of the following values :
 754 *      DMA_CH_PRIO_HIGH/DMA_CH_PRIO_LOW
 755 */
 756static int
 757omap_dma_set_prio_lch(int lch, unsigned char read_prio,
 758                      unsigned char write_prio)
 759{
 760        u32 l;
 761
 762        if (unlikely((lch < 0 || lch >= dma_lch_count))) {
 763                printk(KERN_ERR "Invalid channel id\n");
 764                return -EINVAL;
 765        }
 766        l = p->dma_read(CCR, lch);
 767        l &= ~((1 << 6) | (1 << 26));
 768        if (d->dev_caps & IS_RW_PRIORITY)
 769                l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26);
 770        else
 771                l |= ((read_prio & 0x1) << 6);
 772
 773        p->dma_write(l, CCR, lch);
 774
 775        return 0;
 776}
 777
 778
 779/*
 780 * Clears any DMA state so the DMA engine is ready to restart with new buffers
 781 * through omap_start_dma(). Any buffers in flight are discarded.
 782 */
 783static void omap_clear_dma(int lch)
 784{
 785        unsigned long flags;
 786
 787        local_irq_save(flags);
 788        p->clear_dma(lch);
 789        local_irq_restore(flags);
 790}
 791
 792void omap_start_dma(int lch)
 793{
 794        u32 l;
 795
 796        /*
 797         * The CPC/CDAC register needs to be initialized to zero
 798         * before starting dma transfer.
 799         */
 800        if (dma_omap15xx())
 801                p->dma_write(0, CPC, lch);
 802        else
 803                p->dma_write(0, CDAC, lch);
 804
 805        if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
 806                int next_lch, cur_lch;
 807                char dma_chan_link_map[MAX_LOGICAL_DMA_CH_COUNT];
 808
 809                /* Set the link register of the first channel */
 810                enable_lnk(lch);
 811
 812                memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
 813                dma_chan_link_map[lch] = 1;
 814
 815                cur_lch = dma_chan[lch].next_lch;
 816                do {
 817                        next_lch = dma_chan[cur_lch].next_lch;
 818
 819                        /* The loop case: we've been here already */
 820                        if (dma_chan_link_map[cur_lch])
 821                                break;
 822                        /* Mark the current channel */
 823                        dma_chan_link_map[cur_lch] = 1;
 824
 825                        enable_lnk(cur_lch);
 826                        omap_enable_channel_irq(cur_lch);
 827
 828                        cur_lch = next_lch;
 829                } while (next_lch != -1);
 830        } else if (IS_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS))
 831                p->dma_write(lch, CLNK_CTRL, lch);
 832
 833        omap_enable_channel_irq(lch);
 834
 835        l = p->dma_read(CCR, lch);
 836
 837        if (IS_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING))
 838                        l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
 839        l |= OMAP_DMA_CCR_EN;
 840
 841        /*
 842         * As dma_write() uses IO accessors which are weakly ordered, there
 843         * is no guarantee that data in coherent DMA memory will be visible
 844         * to the DMA device.  Add a memory barrier here to ensure that any
 845         * such data is visible prior to enabling DMA.
 846         */
 847        mb();
 848        p->dma_write(l, CCR, lch);
 849
 850        dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
 851}
 852EXPORT_SYMBOL(omap_start_dma);
 853
 854void omap_stop_dma(int lch)
 855{
 856        u32 l;
 857
 858        /* Disable all interrupts on the channel */
 859        omap_disable_channel_irq(lch);
 860
 861        l = p->dma_read(CCR, lch);
 862        if (IS_DMA_ERRATA(DMA_ERRATA_i541) &&
 863                        (l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) {
 864                int i = 0;
 865                u32 sys_cf;
 866
 867                /* Configure No-Standby */
 868                l = p->dma_read(OCP_SYSCONFIG, lch);
 869                sys_cf = l;
 870                l &= ~DMA_SYSCONFIG_MIDLEMODE_MASK;
 871                l |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
 872                p->dma_write(l , OCP_SYSCONFIG, 0);
 873
 874                l = p->dma_read(CCR, lch);
 875                l &= ~OMAP_DMA_CCR_EN;
 876                p->dma_write(l, CCR, lch);
 877
 878                /* Wait for sDMA FIFO drain */
 879                l = p->dma_read(CCR, lch);
 880                while (i < 100 && (l & (OMAP_DMA_CCR_RD_ACTIVE |
 881                                        OMAP_DMA_CCR_WR_ACTIVE))) {
 882                        udelay(5);
 883                        i++;
 884                        l = p->dma_read(CCR, lch);
 885                }
 886                if (i >= 100)
 887                        pr_err("DMA drain did not complete on lch %d\n", lch);
 888                /* Restore OCP_SYSCONFIG */
 889                p->dma_write(sys_cf, OCP_SYSCONFIG, lch);
 890        } else {
 891                l &= ~OMAP_DMA_CCR_EN;
 892                p->dma_write(l, CCR, lch);
 893        }
 894
 895        /*
 896         * Ensure that data transferred by DMA is visible to any access
 897         * after DMA has been disabled.  This is important for coherent
 898         * DMA regions.
 899         */
 900        mb();
 901
 902        if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
 903                int next_lch, cur_lch = lch;
 904                char dma_chan_link_map[MAX_LOGICAL_DMA_CH_COUNT];
 905
 906                memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
 907                do {
 908                        /* The loop case: we've been here already */
 909                        if (dma_chan_link_map[cur_lch])
 910                                break;
 911                        /* Mark the current channel */
 912                        dma_chan_link_map[cur_lch] = 1;
 913
 914                        disable_lnk(cur_lch);
 915
 916                        next_lch = dma_chan[cur_lch].next_lch;
 917                        cur_lch = next_lch;
 918                } while (next_lch != -1);
 919        }
 920
 921        dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
 922}
 923EXPORT_SYMBOL(omap_stop_dma);
 924
 925/*
 926 * Allows changing the DMA callback function or data. This may be needed if
 927 * the driver shares a single DMA channel for multiple dma triggers.
 928 */
 929int omap_set_dma_callback(int lch,
 930                          void (*callback)(int lch, u16 ch_status, void *data),
 931                          void *data)
 932{
 933        unsigned long flags;
 934
 935        if (lch < 0)
 936                return -ENODEV;
 937
 938        spin_lock_irqsave(&dma_chan_lock, flags);
 939        if (dma_chan[lch].dev_id == -1) {
 940                printk(KERN_ERR "DMA callback for not set for free channel\n");
 941                spin_unlock_irqrestore(&dma_chan_lock, flags);
 942                return -EINVAL;
 943        }
 944        dma_chan[lch].callback = callback;
 945        dma_chan[lch].data = data;
 946        spin_unlock_irqrestore(&dma_chan_lock, flags);
 947
 948        return 0;
 949}
 950EXPORT_SYMBOL(omap_set_dma_callback);
 951
 952/*
 953 * Returns current physical source address for the given DMA channel.
 954 * If the channel is running the caller must disable interrupts prior calling
 955 * this function and process the returned value before re-enabling interrupt to
 956 * prevent races with the interrupt handler. Note that in continuous mode there
 957 * is a chance for CSSA_L register overflow between the two reads resulting
 958 * in incorrect return value.
 959 */
 960dma_addr_t omap_get_dma_src_pos(int lch)
 961{
 962        dma_addr_t offset = 0;
 963
 964        if (dma_omap15xx())
 965                offset = p->dma_read(CPC, lch);
 966        else
 967                offset = p->dma_read(CSAC, lch);
 968
 969        if (IS_DMA_ERRATA(DMA_ERRATA_3_3) && offset == 0)
 970                offset = p->dma_read(CSAC, lch);
 971
 972        if (!dma_omap15xx()) {
 973                /*
 974                 * CDAC == 0 indicates that the DMA transfer on the channel has
 975                 * not been started (no data has been transferred so far).
 976                 * Return the programmed source start address in this case.
 977                 */
 978                if (likely(p->dma_read(CDAC, lch)))
 979                        offset = p->dma_read(CSAC, lch);
 980                else
 981                        offset = p->dma_read(CSSA, lch);
 982        }
 983
 984        if (dma_omap1())
 985                offset |= (p->dma_read(CSSA, lch) & 0xFFFF0000);
 986
 987        return offset;
 988}
 989EXPORT_SYMBOL(omap_get_dma_src_pos);
 990
 991/*
 992 * Returns current physical destination address for the given DMA channel.
 993 * If the channel is running the caller must disable interrupts prior calling
 994 * this function and process the returned value before re-enabling interrupt to
 995 * prevent races with the interrupt handler. Note that in continuous mode there
 996 * is a chance for CDSA_L register overflow between the two reads resulting
 997 * in incorrect return value.
 998 */
 999dma_addr_t omap_get_dma_dst_pos(int lch)
1000{
1001        dma_addr_t offset = 0;
1002
1003        if (dma_omap15xx())
1004                offset = p->dma_read(CPC, lch);
1005        else
1006                offset = p->dma_read(CDAC, lch);
1007
1008        /*
1009         * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
1010         * read before the DMA controller finished disabling the channel.
1011         */
1012        if (!dma_omap15xx() && offset == 0) {
1013                offset = p->dma_read(CDAC, lch);
1014                /*
1015                 * CDAC == 0 indicates that the DMA transfer on the channel has
1016                 * not been started (no data has been transferred so far).
1017                 * Return the programmed destination start address in this case.
1018                 */
1019                if (unlikely(!offset))
1020                        offset = p->dma_read(CDSA, lch);
1021        }
1022
1023        if (dma_omap1())
1024                offset |= (p->dma_read(CDSA, lch) & 0xFFFF0000);
1025
1026        return offset;
1027}
1028EXPORT_SYMBOL(omap_get_dma_dst_pos);
1029
1030int omap_get_dma_active_status(int lch)
1031{
1032        return (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN) != 0;
1033}
1034EXPORT_SYMBOL(omap_get_dma_active_status);
1035
1036int omap_dma_running(void)
1037{
1038        int lch;
1039
1040        if (dma_omap1())
1041                if (omap_lcd_dma_running())
1042                        return 1;
1043
1044        for (lch = 0; lch < dma_chan_count; lch++)
1045                if (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN)
1046                        return 1;
1047
1048        return 0;
1049}
1050
1051/*
1052 * lch_queue DMA will start right after lch_head one is finished.
1053 * For this DMA link to start, you still need to start (see omap_start_dma)
1054 * the first one. That will fire up the entire queue.
1055 */
1056void omap_dma_link_lch(int lch_head, int lch_queue)
1057{
1058        if (omap_dma_in_1510_mode()) {
1059                if (lch_head == lch_queue) {
1060                        p->dma_write(p->dma_read(CCR, lch_head) | (3 << 8),
1061                                                                CCR, lch_head);
1062                        return;
1063                }
1064                printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
1065                BUG();
1066                return;
1067        }
1068
1069        if ((dma_chan[lch_head].dev_id == -1) ||
1070            (dma_chan[lch_queue].dev_id == -1)) {
1071                pr_err("omap_dma: trying to link non requested channels\n");
1072                dump_stack();
1073        }
1074
1075        dma_chan[lch_head].next_lch = lch_queue;
1076}
1077EXPORT_SYMBOL(omap_dma_link_lch);
1078
1079/*----------------------------------------------------------------------------*/
1080
1081#ifdef CONFIG_ARCH_OMAP1
1082
1083static int omap1_dma_handle_ch(int ch)
1084{
1085        u32 csr;
1086
1087        if (enable_1510_mode && ch >= 6) {
1088                csr = dma_chan[ch].saved_csr;
1089                dma_chan[ch].saved_csr = 0;
1090        } else
1091                csr = p->dma_read(CSR, ch);
1092        if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
1093                dma_chan[ch + 6].saved_csr = csr >> 7;
1094                csr &= 0x7f;
1095        }
1096        if ((csr & 0x3f) == 0)
1097                return 0;
1098        if (unlikely(dma_chan[ch].dev_id == -1)) {
1099                pr_warn("Spurious interrupt from DMA channel %d (CSR %04x)\n",
1100                        ch, csr);
1101                return 0;
1102        }
1103        if (unlikely(csr & OMAP1_DMA_TOUT_IRQ))
1104                pr_warn("DMA timeout with device %d\n", dma_chan[ch].dev_id);
1105        if (unlikely(csr & OMAP_DMA_DROP_IRQ))
1106                pr_warn("DMA synchronization event drop occurred with device %d\n",
1107                        dma_chan[ch].dev_id);
1108        if (likely(csr & OMAP_DMA_BLOCK_IRQ))
1109                dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
1110        if (likely(dma_chan[ch].callback != NULL))
1111                dma_chan[ch].callback(ch, csr, dma_chan[ch].data);
1112
1113        return 1;
1114}
1115
1116static irqreturn_t omap1_dma_irq_handler(int irq, void *dev_id)
1117{
1118        int ch = ((int) dev_id) - 1;
1119        int handled = 0;
1120
1121        for (;;) {
1122                int handled_now = 0;
1123
1124                handled_now += omap1_dma_handle_ch(ch);
1125                if (enable_1510_mode && dma_chan[ch + 6].saved_csr)
1126                        handled_now += omap1_dma_handle_ch(ch + 6);
1127                if (!handled_now)
1128                        break;
1129                handled += handled_now;
1130        }
1131
1132        return handled ? IRQ_HANDLED : IRQ_NONE;
1133}
1134
1135#else
1136#define omap1_dma_irq_handler   NULL
1137#endif
1138
1139#ifdef CONFIG_ARCH_OMAP2PLUS
1140
1141static int omap2_dma_handle_ch(int ch)
1142{
1143        u32 status = p->dma_read(CSR, ch);
1144
1145        if (!status) {
1146                if (printk_ratelimit())
1147                        pr_warn("Spurious DMA IRQ for lch %d\n", ch);
1148                p->dma_write(1 << ch, IRQSTATUS_L0, ch);
1149                return 0;
1150        }
1151        if (unlikely(dma_chan[ch].dev_id == -1)) {
1152                if (printk_ratelimit())
1153                        pr_warn("IRQ %04x for non-allocated DMA channel %d\n",
1154                                status, ch);
1155                return 0;
1156        }
1157        if (unlikely(status & OMAP_DMA_DROP_IRQ))
1158                pr_info("DMA synchronization event drop occurred with device %d\n",
1159                        dma_chan[ch].dev_id);
1160        if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ)) {
1161                printk(KERN_INFO "DMA transaction error with device %d\n",
1162                       dma_chan[ch].dev_id);
1163                if (IS_DMA_ERRATA(DMA_ERRATA_i378)) {
1164                        u32 ccr;
1165
1166                        ccr = p->dma_read(CCR, ch);
1167                        ccr &= ~OMAP_DMA_CCR_EN;
1168                        p->dma_write(ccr, CCR, ch);
1169                        dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
1170                }
1171        }
1172        if (unlikely(status & OMAP2_DMA_SECURE_ERR_IRQ))
1173                printk(KERN_INFO "DMA secure error with device %d\n",
1174                       dma_chan[ch].dev_id);
1175        if (unlikely(status & OMAP2_DMA_MISALIGNED_ERR_IRQ))
1176                printk(KERN_INFO "DMA misaligned error with device %d\n",
1177                       dma_chan[ch].dev_id);
1178
1179        p->dma_write(status, CSR, ch);
1180        p->dma_write(1 << ch, IRQSTATUS_L0, ch);
1181        /* read back the register to flush the write */
1182        p->dma_read(IRQSTATUS_L0, ch);
1183
1184        /* If the ch is not chained then chain_id will be -1 */
1185        if (dma_chan[ch].chain_id != -1) {
1186                int chain_id = dma_chan[ch].chain_id;
1187                dma_chan[ch].state = DMA_CH_NOTSTARTED;
1188                if (p->dma_read(CLNK_CTRL, ch) & (1 << 15))
1189                        dma_chan[dma_chan[ch].next_linked_ch].state =
1190                                                        DMA_CH_STARTED;
1191                if (dma_linked_lch[chain_id].chain_mode ==
1192                                                OMAP_DMA_DYNAMIC_CHAIN)
1193                        disable_lnk(ch);
1194
1195                if (!OMAP_DMA_CHAIN_QEMPTY(chain_id))
1196                        OMAP_DMA_CHAIN_INCQHEAD(chain_id);
1197
1198                status = p->dma_read(CSR, ch);
1199                p->dma_write(status, CSR, ch);
1200        }
1201
1202        if (likely(dma_chan[ch].callback != NULL))
1203                dma_chan[ch].callback(ch, status, dma_chan[ch].data);
1204
1205        return 0;
1206}
1207
1208/* STATUS register count is from 1-32 while our is 0-31 */
1209static irqreturn_t omap2_dma_irq_handler(int irq, void *dev_id)
1210{
1211        u32 val, enable_reg;
1212        int i;
1213
1214        val = p->dma_read(IRQSTATUS_L0, 0);
1215        if (val == 0) {
1216                if (printk_ratelimit())
1217                        printk(KERN_WARNING "Spurious DMA IRQ\n");
1218                return IRQ_HANDLED;
1219        }
1220        enable_reg = p->dma_read(IRQENABLE_L0, 0);
1221        val &= enable_reg; /* Dispatch only relevant interrupts */
1222        for (i = 0; i < dma_lch_count && val != 0; i++) {
1223                if (val & 1)
1224                        omap2_dma_handle_ch(i);
1225                val >>= 1;
1226        }
1227
1228        return IRQ_HANDLED;
1229}
1230
1231static struct irqaction omap24xx_dma_irq = {
1232        .name = "DMA",
1233        .handler = omap2_dma_irq_handler,
1234};
1235
1236#else
1237static struct irqaction omap24xx_dma_irq;
1238#endif
1239
1240/*----------------------------------------------------------------------------*/
1241
1242/*
1243 * Note that we are currently using only IRQENABLE_L0 and L1.
1244 * As the DSP may be using IRQENABLE_L2 and L3, let's not
1245 * touch those for now.
1246 */
1247void omap_dma_global_context_save(void)
1248{
1249        omap_dma_global_context.dma_irqenable_l0 =
1250                p->dma_read(IRQENABLE_L0, 0);
1251        omap_dma_global_context.dma_irqenable_l1 =
1252                p->dma_read(IRQENABLE_L1, 0);
1253        omap_dma_global_context.dma_ocp_sysconfig =
1254                p->dma_read(OCP_SYSCONFIG, 0);
1255        omap_dma_global_context.dma_gcr = p->dma_read(GCR, 0);
1256}
1257
1258void omap_dma_global_context_restore(void)
1259{
1260        int ch;
1261
1262        p->dma_write(omap_dma_global_context.dma_gcr, GCR, 0);
1263        p->dma_write(omap_dma_global_context.dma_ocp_sysconfig,
1264                OCP_SYSCONFIG, 0);
1265        p->dma_write(omap_dma_global_context.dma_irqenable_l0,
1266                IRQENABLE_L0, 0);
1267        p->dma_write(omap_dma_global_context.dma_irqenable_l1,
1268                IRQENABLE_L1, 0);
1269
1270        if (IS_DMA_ERRATA(DMA_ROMCODE_BUG))
1271                p->dma_write(0x3 , IRQSTATUS_L0, 0);
1272
1273        for (ch = 0; ch < dma_chan_count; ch++)
1274                if (dma_chan[ch].dev_id != -1)
1275                        omap_clear_dma(ch);
1276}
1277
1278struct omap_system_dma_plat_info *omap_get_plat_info(void)
1279{
1280        return p;
1281}
1282EXPORT_SYMBOL_GPL(omap_get_plat_info);
1283
1284static int omap_system_dma_probe(struct platform_device *pdev)
1285{
1286        int ch, ret = 0;
1287        int dma_irq;
1288        char irq_name[4];
1289        int irq_rel;
1290
1291        p = pdev->dev.platform_data;
1292        if (!p) {
1293                dev_err(&pdev->dev,
1294                        "%s: System DMA initialized without platform data\n",
1295                        __func__);
1296                return -EINVAL;
1297        }
1298
1299        d                       = p->dma_attr;
1300        errata                  = p->errata;
1301
1302        if ((d->dev_caps & RESERVE_CHANNEL) && omap_dma_reserve_channels
1303                        && (omap_dma_reserve_channels < d->lch_count))
1304                d->lch_count    = omap_dma_reserve_channels;
1305
1306        dma_lch_count           = d->lch_count;
1307        dma_chan_count          = dma_lch_count;
1308        enable_1510_mode        = d->dev_caps & ENABLE_1510_MODE;
1309
1310        dma_chan = devm_kcalloc(&pdev->dev, dma_lch_count,
1311                                sizeof(*dma_chan), GFP_KERNEL);
1312        if (!dma_chan)
1313                return -ENOMEM;
1314
1315        if (dma_omap2plus()) {
1316                dma_linked_lch = kcalloc(dma_lch_count,
1317                                         sizeof(*dma_linked_lch),
1318                                         GFP_KERNEL);
1319                if (!dma_linked_lch) {
1320                        ret = -ENOMEM;
1321                        goto exit_dma_lch_fail;
1322                }
1323        }
1324
1325        spin_lock_init(&dma_chan_lock);
1326        for (ch = 0; ch < dma_chan_count; ch++) {
1327                omap_clear_dma(ch);
1328                if (dma_omap2plus())
1329                        omap2_disable_irq_lch(ch);
1330
1331                dma_chan[ch].dev_id = -1;
1332                dma_chan[ch].next_lch = -1;
1333
1334                if (ch >= 6 && enable_1510_mode)
1335                        continue;
1336
1337                if (dma_omap1()) {
1338                        /*
1339                         * request_irq() doesn't like dev_id (ie. ch) being
1340                         * zero, so we have to kludge around this.
1341                         */
1342                        sprintf(&irq_name[0], "%d", ch);
1343                        dma_irq = platform_get_irq_byname(pdev, irq_name);
1344
1345                        if (dma_irq < 0) {
1346                                ret = dma_irq;
1347                                goto exit_dma_irq_fail;
1348                        }
1349
1350                        /* INT_DMA_LCD is handled in lcd_dma.c */
1351                        if (dma_irq == INT_DMA_LCD)
1352                                continue;
1353
1354                        ret = request_irq(dma_irq,
1355                                        omap1_dma_irq_handler, 0, "DMA",
1356                                        (void *) (ch + 1));
1357                        if (ret != 0)
1358                                goto exit_dma_irq_fail;
1359                }
1360        }
1361
1362        if (d->dev_caps & IS_RW_PRIORITY)
1363                omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE,
1364                                DMA_DEFAULT_FIFO_DEPTH, 0);
1365
1366        if (dma_omap2plus() && !(d->dev_caps & DMA_ENGINE_HANDLE_IRQ)) {
1367                strcpy(irq_name, "0");
1368                dma_irq = platform_get_irq_byname(pdev, irq_name);
1369                if (dma_irq < 0) {
1370                        dev_err(&pdev->dev, "failed: request IRQ %d", dma_irq);
1371                        ret = dma_irq;
1372                        goto exit_dma_lch_fail;
1373                }
1374                ret = setup_irq(dma_irq, &omap24xx_dma_irq);
1375                if (ret) {
1376                        dev_err(&pdev->dev, "set_up failed for IRQ %d for DMA (error %d)\n",
1377                                dma_irq, ret);
1378                        goto exit_dma_lch_fail;
1379                }
1380        }
1381
1382        /* reserve dma channels 0 and 1 in high security devices on 34xx */
1383        if (d->dev_caps & HS_CHANNELS_RESERVED) {
1384                pr_info("Reserving DMA channels 0 and 1 for HS ROM code\n");
1385                dma_chan[0].dev_id = 0;
1386                dma_chan[1].dev_id = 1;
1387        }
1388        p->show_dma_caps();
1389        return 0;
1390
1391exit_dma_irq_fail:
1392        dev_err(&pdev->dev, "unable to request IRQ %d for DMA (error %d)\n",
1393                dma_irq, ret);
1394        for (irq_rel = 0; irq_rel < ch; irq_rel++) {
1395                dma_irq = platform_get_irq(pdev, irq_rel);
1396                free_irq(dma_irq, (void *)(irq_rel + 1));
1397        }
1398
1399exit_dma_lch_fail:
1400        return ret;
1401}
1402
1403static int omap_system_dma_remove(struct platform_device *pdev)
1404{
1405        int dma_irq;
1406
1407        if (dma_omap2plus()) {
1408                char irq_name[4];
1409                strcpy(irq_name, "0");
1410                dma_irq = platform_get_irq_byname(pdev, irq_name);
1411                if (dma_irq >= 0)
1412                        remove_irq(dma_irq, &omap24xx_dma_irq);
1413        } else {
1414                int irq_rel = 0;
1415                for ( ; irq_rel < dma_chan_count; irq_rel++) {
1416                        dma_irq = platform_get_irq(pdev, irq_rel);
1417                        free_irq(dma_irq, (void *)(irq_rel + 1));
1418                }
1419        }
1420        return 0;
1421}
1422
1423static struct platform_driver omap_system_dma_driver = {
1424        .probe          = omap_system_dma_probe,
1425        .remove         = omap_system_dma_remove,
1426        .driver         = {
1427                .name   = "omap_dma_system"
1428        },
1429};
1430
1431static int __init omap_system_dma_init(void)
1432{
1433        return platform_driver_register(&omap_system_dma_driver);
1434}
1435arch_initcall(omap_system_dma_init);
1436
1437static void __exit omap_system_dma_exit(void)
1438{
1439        platform_driver_unregister(&omap_system_dma_driver);
1440}
1441
1442MODULE_DESCRIPTION("OMAP SYSTEM DMA DRIVER");
1443MODULE_LICENSE("GPL");
1444MODULE_AUTHOR("Texas Instruments Inc");
1445
1446/*
1447 * Reserve the omap SDMA channels using cmdline bootarg
1448 * "omap_dma_reserve_ch=". The valid range is 1 to 32
1449 */
1450static int __init omap_dma_cmdline_reserve_ch(char *str)
1451{
1452        if (get_option(&str, &omap_dma_reserve_channels) != 1)
1453                omap_dma_reserve_channels = 0;
1454        return 1;
1455}
1456
1457__setup("omap_dma_reserve_ch=", omap_dma_cmdline_reserve_ch);
1458
1459
1460