linux/arch/arm/plat-omap/dma.c
<<
>>
Prefs
   1/*
   2 * linux/arch/arm/plat-omap/dma.c
   3 *
   4 * Copyright (C) 2003 Nokia Corporation
   5 * Author: Juha Yrjölä <juha.yrjola@nokia.com>
   6 * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
   7 * Graphics DMA and LCD DMA graphics tranformations
   8 * by Imre Deak <imre.deak@nokia.com>
   9 * OMAP2 support Copyright (C) 2004-2005 Texas Instruments, Inc.
  10 * Merged to support both OMAP1 and OMAP2 by Tony Lindgren <tony@atomide.com>
  11 * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc.
  12 *
  13 * Support functions for the OMAP internal DMA channels.
  14 *
  15 * This program is free software; you can redistribute it and/or modify
  16 * it under the terms of the GNU General Public License version 2 as
  17 * published by the Free Software Foundation.
  18 *
  19 */
  20
  21#include <linux/module.h>
  22#include <linux/init.h>
  23#include <linux/sched.h>
  24#include <linux/spinlock.h>
  25#include <linux/errno.h>
  26#include <linux/interrupt.h>
  27#include <linux/irq.h>
  28
  29#include <asm/system.h>
  30#include <asm/hardware.h>
  31#include <asm/dma.h>
  32#include <asm/io.h>
  33
  34#include <asm/arch/tc.h>
  35
  36#define DEBUG_PRINTS
  37#undef DEBUG_PRINTS
  38#ifdef DEBUG_PRINTS
  39#define debug_printk(x) printk x
  40#else
  41#define debug_printk(x)
  42#endif
  43
  44#define OMAP_DMA_ACTIVE         0x01
  45#define OMAP_DMA_CCR_EN         (1 << 7)
  46#define OMAP2_DMA_CSR_CLEAR_MASK        0xffe
  47
  48#define OMAP_FUNC_MUX_ARM_BASE  (0xfffe1000 + 0xec)
  49
  50static int enable_1510_mode = 0;
  51
  52struct omap_dma_lch {
  53        int next_lch;
  54        int dev_id;
  55        u16 saved_csr;
  56        u16 enabled_irqs;
  57        const char *dev_name;
  58        void (* callback)(int lch, u16 ch_status, void *data);
  59        void *data;
  60        long flags;
  61};
  62
  63static int dma_chan_count;
  64
  65static spinlock_t dma_chan_lock;
  66static struct omap_dma_lch dma_chan[OMAP_LOGICAL_DMA_CH_COUNT];
  67
  68static const u8 omap1_dma_irq[OMAP_LOGICAL_DMA_CH_COUNT] = {
  69        INT_DMA_CH0_6, INT_DMA_CH1_7, INT_DMA_CH2_8, INT_DMA_CH3,
  70        INT_DMA_CH4, INT_DMA_CH5, INT_1610_DMA_CH6, INT_1610_DMA_CH7,
  71        INT_1610_DMA_CH8, INT_1610_DMA_CH9, INT_1610_DMA_CH10,
  72        INT_1610_DMA_CH11, INT_1610_DMA_CH12, INT_1610_DMA_CH13,
  73        INT_1610_DMA_CH14, INT_1610_DMA_CH15, INT_DMA_LCD
  74};
  75
  76#define REVISIT_24XX()          printk(KERN_ERR "FIXME: no %s on 24xx\n", \
  77                                                __FUNCTION__);
  78
  79#ifdef CONFIG_ARCH_OMAP15XX
  80/* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */
  81int omap_dma_in_1510_mode(void)
  82{
  83        return enable_1510_mode;
  84}
  85#else
  86#define omap_dma_in_1510_mode()         0
  87#endif
  88
  89#ifdef CONFIG_ARCH_OMAP1
  90static inline int get_gdma_dev(int req)
  91{
  92        u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
  93        int shift = ((req - 1) % 5) * 6;
  94
  95        return ((omap_readl(reg) >> shift) & 0x3f) + 1;
  96}
  97
  98static inline void set_gdma_dev(int req, int dev)
  99{
 100        u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
 101        int shift = ((req - 1) % 5) * 6;
 102        u32 l;
 103
 104        l = omap_readl(reg);
 105        l &= ~(0x3f << shift);
 106        l |= (dev - 1) << shift;
 107        omap_writel(l, reg);
 108}
 109#else
 110#define set_gdma_dev(req, dev)  do {} while (0)
 111#endif
 112
 113static void clear_lch_regs(int lch)
 114{
 115        int i;
 116        u32 lch_base = OMAP_DMA_BASE + lch * 0x40;
 117
 118        for (i = 0; i < 0x2c; i += 2)
 119                omap_writew(0, lch_base + i);
 120}
 121
 122void omap_set_dma_priority(int lch, int dst_port, int priority)
 123{
 124        unsigned long reg;
 125        u32 l;
 126
 127        if (cpu_class_is_omap1()) {
 128                switch (dst_port) {
 129                case OMAP_DMA_PORT_OCP_T1:      /* FFFECC00 */
 130                        reg = OMAP_TC_OCPT1_PRIOR;
 131                        break;
 132                case OMAP_DMA_PORT_OCP_T2:      /* FFFECCD0 */
 133                        reg = OMAP_TC_OCPT2_PRIOR;
 134                        break;
 135                case OMAP_DMA_PORT_EMIFF:       /* FFFECC08 */
 136                        reg = OMAP_TC_EMIFF_PRIOR;
 137                        break;
 138                case OMAP_DMA_PORT_EMIFS:       /* FFFECC04 */
 139                        reg = OMAP_TC_EMIFS_PRIOR;
 140                        break;
 141                default:
 142                        BUG();
 143                        return;
 144                }
 145                l = omap_readl(reg);
 146                l &= ~(0xf << 8);
 147                l |= (priority & 0xf) << 8;
 148                omap_writel(l, reg);
 149        }
 150
 151        if (cpu_is_omap24xx()) {
 152                if (priority)
 153                        OMAP_DMA_CCR_REG(lch) |= (1 << 6);
 154                else
 155                        OMAP_DMA_CCR_REG(lch) &= ~(1 << 6);
 156        }
 157}
 158
 159void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
 160                                  int frame_count, int sync_mode,
 161                                  int dma_trigger, int src_or_dst_synch)
 162{
 163        OMAP_DMA_CSDP_REG(lch) &= ~0x03;
 164        OMAP_DMA_CSDP_REG(lch) |= data_type;
 165
 166        if (cpu_class_is_omap1()) {
 167                OMAP_DMA_CCR_REG(lch) &= ~(1 << 5);
 168                if (sync_mode == OMAP_DMA_SYNC_FRAME)
 169                        OMAP_DMA_CCR_REG(lch) |= 1 << 5;
 170
 171                OMAP1_DMA_CCR2_REG(lch) &= ~(1 << 2);
 172                if (sync_mode == OMAP_DMA_SYNC_BLOCK)
 173                        OMAP1_DMA_CCR2_REG(lch) |= 1 << 2;
 174        }
 175
 176        if (cpu_is_omap24xx() && dma_trigger) {
 177                u32 val = OMAP_DMA_CCR_REG(lch);
 178
 179                val &= ~(3 << 19);
 180                if (dma_trigger > 63)
 181                        val |= 1 << 20;
 182                if (dma_trigger > 31)
 183                        val |= 1 << 19;
 184
 185                val &= ~(0x1f);
 186                val |= (dma_trigger & 0x1f);
 187
 188                if (sync_mode & OMAP_DMA_SYNC_FRAME)
 189                        val |= 1 << 5;
 190                else
 191                        val &= ~(1 << 5);
 192
 193                if (sync_mode & OMAP_DMA_SYNC_BLOCK)
 194                        val |= 1 << 18;
 195                else
 196                        val &= ~(1 << 18);
 197
 198                if (src_or_dst_synch)
 199                        val |= 1 << 24;         /* source synch */
 200                else
 201                        val &= ~(1 << 24);      /* dest synch */
 202
 203                OMAP_DMA_CCR_REG(lch) = val;
 204        }
 205
 206        OMAP_DMA_CEN_REG(lch) = elem_count;
 207        OMAP_DMA_CFN_REG(lch) = frame_count;
 208}
 209
 210void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color)
 211{
 212        u16 w;
 213
 214        BUG_ON(omap_dma_in_1510_mode());
 215
 216        if (cpu_is_omap24xx()) {
 217                REVISIT_24XX();
 218                return;
 219        }
 220
 221        w = OMAP1_DMA_CCR2_REG(lch) & ~0x03;
 222        switch (mode) {
 223        case OMAP_DMA_CONSTANT_FILL:
 224                w |= 0x01;
 225                break;
 226        case OMAP_DMA_TRANSPARENT_COPY:
 227                w |= 0x02;
 228                break;
 229        case OMAP_DMA_COLOR_DIS:
 230                break;
 231        default:
 232                BUG();
 233        }
 234        OMAP1_DMA_CCR2_REG(lch) = w;
 235
 236        w = OMAP1_DMA_LCH_CTRL_REG(lch) & ~0x0f;
 237        /* Default is channel type 2D */
 238        if (mode) {
 239                OMAP1_DMA_COLOR_L_REG(lch) = (u16)color;
 240                OMAP1_DMA_COLOR_U_REG(lch) = (u16)(color >> 16);
 241                w |= 1;         /* Channel type G */
 242        }
 243        OMAP1_DMA_LCH_CTRL_REG(lch) = w;
 244}
 245
 246void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode)
 247{
 248        if (cpu_is_omap24xx()) {
 249                OMAP_DMA_CSDP_REG(lch) &= ~(0x3 << 16);
 250                OMAP_DMA_CSDP_REG(lch) |= (mode << 16);
 251        }
 252}
 253
 254/* Note that src_port is only for omap1 */
 255void omap_set_dma_src_params(int lch, int src_port, int src_amode,
 256                             unsigned long src_start,
 257                             int src_ei, int src_fi)
 258{
 259        if (cpu_class_is_omap1()) {
 260                OMAP_DMA_CSDP_REG(lch) &= ~(0x1f << 2);
 261                OMAP_DMA_CSDP_REG(lch) |= src_port << 2;
 262        }
 263
 264        OMAP_DMA_CCR_REG(lch) &= ~(0x03 << 12);
 265        OMAP_DMA_CCR_REG(lch) |= src_amode << 12;
 266
 267        if (cpu_class_is_omap1()) {
 268                OMAP1_DMA_CSSA_U_REG(lch) = src_start >> 16;
 269                OMAP1_DMA_CSSA_L_REG(lch) = src_start;
 270        }
 271
 272        if (cpu_is_omap24xx())
 273                OMAP2_DMA_CSSA_REG(lch) = src_start;
 274
 275        OMAP_DMA_CSEI_REG(lch) = src_ei;
 276        OMAP_DMA_CSFI_REG(lch) = src_fi;
 277}
 278
 279void omap_set_dma_params(int lch, struct omap_dma_channel_params * params)
 280{
 281        omap_set_dma_transfer_params(lch, params->data_type,
 282                                     params->elem_count, params->frame_count,
 283                                     params->sync_mode, params->trigger,
 284                                     params->src_or_dst_synch);
 285        omap_set_dma_src_params(lch, params->src_port,
 286                                params->src_amode, params->src_start,
 287                                params->src_ei, params->src_fi);
 288
 289        omap_set_dma_dest_params(lch, params->dst_port,
 290                                 params->dst_amode, params->dst_start,
 291                                 params->dst_ei, params->dst_fi);
 292}
 293
 294void omap_set_dma_src_index(int lch, int eidx, int fidx)
 295{
 296        if (cpu_is_omap24xx()) {
 297                REVISIT_24XX();
 298                return;
 299        }
 300        OMAP_DMA_CSEI_REG(lch) = eidx;
 301        OMAP_DMA_CSFI_REG(lch) = fidx;
 302}
 303
 304void omap_set_dma_src_data_pack(int lch, int enable)
 305{
 306        OMAP_DMA_CSDP_REG(lch) &= ~(1 << 6);
 307        if (enable)
 308                OMAP_DMA_CSDP_REG(lch) |= (1 << 6);
 309}
 310
 311void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
 312{
 313        unsigned int burst = 0;
 314        OMAP_DMA_CSDP_REG(lch) &= ~(0x03 << 7);
 315
 316        switch (burst_mode) {
 317        case OMAP_DMA_DATA_BURST_DIS:
 318                break;
 319        case OMAP_DMA_DATA_BURST_4:
 320                if (cpu_is_omap24xx())
 321                        burst = 0x1;
 322                else
 323                        burst = 0x2;
 324                break;
 325        case OMAP_DMA_DATA_BURST_8:
 326                if (cpu_is_omap24xx()) {
 327                        burst = 0x2;
 328                        break;
 329                }
 330                /* not supported by current hardware on OMAP1
 331                 * w |= (0x03 << 7);
 332                 * fall through
 333                 */
 334        case OMAP_DMA_DATA_BURST_16:
 335                if (cpu_is_omap24xx()) {
 336                        burst = 0x3;
 337                        break;
 338                }
 339                /* OMAP1 don't support burst 16
 340                 * fall through
 341                 */
 342        default:
 343                BUG();
 344        }
 345        OMAP_DMA_CSDP_REG(lch) |= (burst << 7);
 346}
 347
 348/* Note that dest_port is only for OMAP1 */
 349void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
 350                              unsigned long dest_start,
 351                              int dst_ei, int dst_fi)
 352{
 353        if (cpu_class_is_omap1()) {
 354                OMAP_DMA_CSDP_REG(lch) &= ~(0x1f << 9);
 355                OMAP_DMA_CSDP_REG(lch) |= dest_port << 9;
 356        }
 357
 358        OMAP_DMA_CCR_REG(lch) &= ~(0x03 << 14);
 359        OMAP_DMA_CCR_REG(lch) |= dest_amode << 14;
 360
 361        if (cpu_class_is_omap1()) {
 362                OMAP1_DMA_CDSA_U_REG(lch) = dest_start >> 16;
 363                OMAP1_DMA_CDSA_L_REG(lch) = dest_start;
 364        }
 365
 366        if (cpu_is_omap24xx())
 367                OMAP2_DMA_CDSA_REG(lch) = dest_start;
 368
 369        OMAP_DMA_CDEI_REG(lch) = dst_ei;
 370        OMAP_DMA_CDFI_REG(lch) = dst_fi;
 371}
 372
 373void omap_set_dma_dest_index(int lch, int eidx, int fidx)
 374{
 375        if (cpu_is_omap24xx()) {
 376                REVISIT_24XX();
 377                return;
 378        }
 379        OMAP_DMA_CDEI_REG(lch) = eidx;
 380        OMAP_DMA_CDFI_REG(lch) = fidx;
 381}
 382
 383void omap_set_dma_dest_data_pack(int lch, int enable)
 384{
 385        OMAP_DMA_CSDP_REG(lch) &= ~(1 << 13);
 386        if (enable)
 387                OMAP_DMA_CSDP_REG(lch) |= 1 << 13;
 388}
 389
 390void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
 391{
 392        unsigned int burst = 0;
 393        OMAP_DMA_CSDP_REG(lch) &= ~(0x03 << 14);
 394
 395        switch (burst_mode) {
 396        case OMAP_DMA_DATA_BURST_DIS:
 397                break;
 398        case OMAP_DMA_DATA_BURST_4:
 399                if (cpu_is_omap24xx())
 400                        burst = 0x1;
 401                else
 402                        burst = 0x2;
 403                break;
 404        case OMAP_DMA_DATA_BURST_8:
 405                if (cpu_is_omap24xx())
 406                        burst = 0x2;
 407                else
 408                        burst = 0x3;
 409                break;
 410        case OMAP_DMA_DATA_BURST_16:
 411                if (cpu_is_omap24xx()) {
 412                        burst = 0x3;
 413                        break;
 414                }
 415                /* OMAP1 don't support burst 16
 416                 * fall through
 417                 */
 418        default:
 419                printk(KERN_ERR "Invalid DMA burst mode\n");
 420                BUG();
 421                return;
 422        }
 423        OMAP_DMA_CSDP_REG(lch) |= (burst << 14);
 424}
 425
 426static inline void omap_enable_channel_irq(int lch)
 427{
 428        u32 status;
 429
 430        /* Clear CSR */
 431        if (cpu_class_is_omap1())
 432                status = OMAP_DMA_CSR_REG(lch);
 433        else if (cpu_is_omap24xx())
 434                OMAP_DMA_CSR_REG(lch) = OMAP2_DMA_CSR_CLEAR_MASK;
 435
 436        /* Enable some nice interrupts. */
 437        OMAP_DMA_CICR_REG(lch) = dma_chan[lch].enabled_irqs;
 438
 439        dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
 440}
 441
 442static void omap_disable_channel_irq(int lch)
 443{
 444        if (cpu_is_omap24xx())
 445                OMAP_DMA_CICR_REG(lch) = 0;
 446}
 447
 448void omap_enable_dma_irq(int lch, u16 bits)
 449{
 450        dma_chan[lch].enabled_irqs |= bits;
 451}
 452
 453void omap_disable_dma_irq(int lch, u16 bits)
 454{
 455        dma_chan[lch].enabled_irqs &= ~bits;
 456}
 457
 458static inline void enable_lnk(int lch)
 459{
 460        if (cpu_class_is_omap1())
 461                OMAP_DMA_CLNK_CTRL_REG(lch) &= ~(1 << 14);
 462
 463        /* Set the ENABLE_LNK bits */
 464        if (dma_chan[lch].next_lch != -1)
 465                OMAP_DMA_CLNK_CTRL_REG(lch) =
 466                        dma_chan[lch].next_lch | (1 << 15);
 467}
 468
 469static inline void disable_lnk(int lch)
 470{
 471        /* Disable interrupts */
 472        if (cpu_class_is_omap1()) {
 473                OMAP_DMA_CICR_REG(lch) = 0;
 474                /* Set the STOP_LNK bit */
 475                OMAP_DMA_CLNK_CTRL_REG(lch) |= 1 << 14;
 476        }
 477
 478        if (cpu_is_omap24xx()) {
 479                omap_disable_channel_irq(lch);
 480                /* Clear the ENABLE_LNK bit */
 481                OMAP_DMA_CLNK_CTRL_REG(lch) &= ~(1 << 15);
 482        }
 483
 484        dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
 485}
 486
 487static inline void omap2_enable_irq_lch(int lch)
 488{
 489        u32 val;
 490
 491        if (!cpu_is_omap24xx())
 492                return;
 493
 494        val = omap_readl(OMAP_DMA4_IRQENABLE_L0);
 495        val |= 1 << lch;
 496        omap_writel(val, OMAP_DMA4_IRQENABLE_L0);
 497}
 498
 499int omap_request_dma(int dev_id, const char *dev_name,
 500                     void (* callback)(int lch, u16 ch_status, void *data),
 501                     void *data, int *dma_ch_out)
 502{
 503        int ch, free_ch = -1;
 504        unsigned long flags;
 505        struct omap_dma_lch *chan;
 506
 507        spin_lock_irqsave(&dma_chan_lock, flags);
 508        for (ch = 0; ch < dma_chan_count; ch++) {
 509                if (free_ch == -1 && dma_chan[ch].dev_id == -1) {
 510                        free_ch = ch;
 511                        if (dev_id == 0)
 512                                break;
 513                }
 514        }
 515        if (free_ch == -1) {
 516                spin_unlock_irqrestore(&dma_chan_lock, flags);
 517                return -EBUSY;
 518        }
 519        chan = dma_chan + free_ch;
 520        chan->dev_id = dev_id;
 521
 522        if (cpu_class_is_omap1())
 523                clear_lch_regs(free_ch);
 524
 525        if (cpu_is_omap24xx())
 526                omap_clear_dma(free_ch);
 527
 528        spin_unlock_irqrestore(&dma_chan_lock, flags);
 529
 530        chan->dev_name = dev_name;
 531        chan->callback = callback;
 532        chan->data = data;
 533        chan->enabled_irqs = OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
 534
 535        if (cpu_class_is_omap1())
 536                chan->enabled_irqs |= OMAP1_DMA_TOUT_IRQ;
 537        else if (cpu_is_omap24xx())
 538                chan->enabled_irqs |= OMAP2_DMA_MISALIGNED_ERR_IRQ |
 539                        OMAP2_DMA_TRANS_ERR_IRQ;
 540
 541        if (cpu_is_omap16xx()) {
 542                /* If the sync device is set, configure it dynamically. */
 543                if (dev_id != 0) {
 544                        set_gdma_dev(free_ch + 1, dev_id);
 545                        dev_id = free_ch + 1;
 546                }
 547                /* Disable the 1510 compatibility mode and set the sync device
 548                 * id. */
 549                OMAP_DMA_CCR_REG(free_ch) = dev_id | (1 << 10);
 550        } else if (cpu_is_omap730() || cpu_is_omap15xx()) {
 551                OMAP_DMA_CCR_REG(free_ch) = dev_id;
 552        }
 553
 554        if (cpu_is_omap24xx()) {
 555                omap2_enable_irq_lch(free_ch);
 556
 557                omap_enable_channel_irq(free_ch);
 558                /* Clear the CSR register and IRQ status register */
 559                OMAP_DMA_CSR_REG(free_ch) = OMAP2_DMA_CSR_CLEAR_MASK;
 560                omap_writel(1 << free_ch, OMAP_DMA4_IRQSTATUS_L0);
 561        }
 562
 563        *dma_ch_out = free_ch;
 564
 565        return 0;
 566}
 567
 568void omap_free_dma(int lch)
 569{
 570        unsigned long flags;
 571
 572        spin_lock_irqsave(&dma_chan_lock, flags);
 573        if (dma_chan[lch].dev_id == -1) {
 574                printk("omap_dma: trying to free nonallocated DMA channel %d\n",
 575                       lch);
 576                spin_unlock_irqrestore(&dma_chan_lock, flags);
 577                return;
 578        }
 579        dma_chan[lch].dev_id = -1;
 580        dma_chan[lch].next_lch = -1;
 581        dma_chan[lch].callback = NULL;
 582        spin_unlock_irqrestore(&dma_chan_lock, flags);
 583
 584        if (cpu_class_is_omap1()) {
 585                /* Disable all DMA interrupts for the channel. */
 586                OMAP_DMA_CICR_REG(lch) = 0;
 587                /* Make sure the DMA transfer is stopped. */
 588                OMAP_DMA_CCR_REG(lch) = 0;
 589        }
 590
 591        if (cpu_is_omap24xx()) {
 592                u32 val;
 593                /* Disable interrupts */
 594                val = omap_readl(OMAP_DMA4_IRQENABLE_L0);
 595                val &= ~(1 << lch);
 596                omap_writel(val, OMAP_DMA4_IRQENABLE_L0);
 597
 598                /* Clear the CSR register and IRQ status register */
 599                OMAP_DMA_CSR_REG(lch) = OMAP2_DMA_CSR_CLEAR_MASK;
 600                omap_writel(1 << lch, OMAP_DMA4_IRQSTATUS_L0);
 601
 602                /* Disable all DMA interrupts for the channel. */
 603                OMAP_DMA_CICR_REG(lch) = 0;
 604
 605                /* Make sure the DMA transfer is stopped. */
 606                OMAP_DMA_CCR_REG(lch) = 0;
 607                omap_clear_dma(lch);
 608        }
 609}
 610
 611/*
 612 * Clears any DMA state so the DMA engine is ready to restart with new buffers
 613 * through omap_start_dma(). Any buffers in flight are discarded.
 614 */
 615void omap_clear_dma(int lch)
 616{
 617        unsigned long flags;
 618
 619        local_irq_save(flags);
 620
 621        if (cpu_class_is_omap1()) {
 622                int status;
 623                OMAP_DMA_CCR_REG(lch) &= ~OMAP_DMA_CCR_EN;
 624
 625                /* Clear pending interrupts */
 626                status = OMAP_DMA_CSR_REG(lch);
 627        }
 628
 629        if (cpu_is_omap24xx()) {
 630                int i;
 631                u32 lch_base = OMAP24XX_DMA_BASE + lch * 0x60 + 0x80;
 632                for (i = 0; i < 0x44; i += 4)
 633                        omap_writel(0, lch_base + i);
 634        }
 635
 636        local_irq_restore(flags);
 637}
 638
 639void omap_start_dma(int lch)
 640{
 641        if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
 642                int next_lch, cur_lch;
 643                char dma_chan_link_map[OMAP_LOGICAL_DMA_CH_COUNT];
 644
 645                dma_chan_link_map[lch] = 1;
 646                /* Set the link register of the first channel */
 647                enable_lnk(lch);
 648
 649                memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
 650                cur_lch = dma_chan[lch].next_lch;
 651                do {
 652                        next_lch = dma_chan[cur_lch].next_lch;
 653
 654                        /* The loop case: we've been here already */
 655                        if (dma_chan_link_map[cur_lch])
 656                                break;
 657                        /* Mark the current channel */
 658                        dma_chan_link_map[cur_lch] = 1;
 659
 660                        enable_lnk(cur_lch);
 661                        omap_enable_channel_irq(cur_lch);
 662
 663                        cur_lch = next_lch;
 664                } while (next_lch != -1);
 665        } else if (cpu_is_omap24xx()) {
 666                /* Errata: Need to write lch even if not using chaining */
 667                OMAP_DMA_CLNK_CTRL_REG(lch) = lch;
 668        }
 669
 670        omap_enable_channel_irq(lch);
 671
 672        /* Errata: On ES2.0 BUFFERING disable must be set.
 673         * This will always fail on ES1.0 */
 674        if (cpu_is_omap24xx()) {
 675                OMAP_DMA_CCR_REG(lch) |= OMAP_DMA_CCR_EN;
 676        }
 677
 678        OMAP_DMA_CCR_REG(lch) |= OMAP_DMA_CCR_EN;
 679
 680        dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
 681}
 682
 683void omap_stop_dma(int lch)
 684{
 685        if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
 686                int next_lch, cur_lch = lch;
 687                char dma_chan_link_map[OMAP_LOGICAL_DMA_CH_COUNT];
 688
 689                memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
 690                do {
 691                        /* The loop case: we've been here already */
 692                        if (dma_chan_link_map[cur_lch])
 693                                break;
 694                        /* Mark the current channel */
 695                        dma_chan_link_map[cur_lch] = 1;
 696
 697                        disable_lnk(cur_lch);
 698
 699                        next_lch = dma_chan[cur_lch].next_lch;
 700                        cur_lch = next_lch;
 701                } while (next_lch != -1);
 702
 703                return;
 704        }
 705
 706        /* Disable all interrupts on the channel */
 707        if (cpu_class_is_omap1())
 708                OMAP_DMA_CICR_REG(lch) = 0;
 709
 710        OMAP_DMA_CCR_REG(lch) &= ~OMAP_DMA_CCR_EN;
 711        dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
 712}
 713
 714/*
 715 * Allows changing the DMA callback function or data. This may be needed if
 716 * the driver shares a single DMA channel for multiple dma triggers.
 717 */
 718int omap_set_dma_callback(int lch,
 719                          void (* callback)(int lch, u16 ch_status, void *data),
 720                          void *data)
 721{
 722        unsigned long flags;
 723
 724        if (lch < 0)
 725                return -ENODEV;
 726
 727        spin_lock_irqsave(&dma_chan_lock, flags);
 728        if (dma_chan[lch].dev_id == -1) {
 729                printk(KERN_ERR "DMA callback for not set for free channel\n");
 730                spin_unlock_irqrestore(&dma_chan_lock, flags);
 731                return -EINVAL;
 732        }
 733        dma_chan[lch].callback = callback;
 734        dma_chan[lch].data = data;
 735        spin_unlock_irqrestore(&dma_chan_lock, flags);
 736
 737        return 0;
 738}
 739
 740/*
 741 * Returns current physical source address for the given DMA channel.
 742 * If the channel is running the caller must disable interrupts prior calling
 743 * this function and process the returned value before re-enabling interrupt to
 744 * prevent races with the interrupt handler. Note that in continuous mode there
 745 * is a chance for CSSA_L register overflow inbetween the two reads resulting
 746 * in incorrect return value.
 747 */
 748dma_addr_t omap_get_dma_src_pos(int lch)
 749{
 750        dma_addr_t offset = 0;
 751
 752        if (cpu_class_is_omap1())
 753                offset = (dma_addr_t) (OMAP1_DMA_CSSA_L_REG(lch) |
 754                                       (OMAP1_DMA_CSSA_U_REG(lch) << 16));
 755
 756        if (cpu_is_omap24xx())
 757                offset = OMAP_DMA_CSAC_REG(lch);
 758
 759        return offset;
 760}
 761
 762/*
 763 * Returns current physical destination address for the given DMA channel.
 764 * If the channel is running the caller must disable interrupts prior calling
 765 * this function and process the returned value before re-enabling interrupt to
 766 * prevent races with the interrupt handler. Note that in continuous mode there
 767 * is a chance for CDSA_L register overflow inbetween the two reads resulting
 768 * in incorrect return value.
 769 */
 770dma_addr_t omap_get_dma_dst_pos(int lch)
 771{
 772        dma_addr_t offset = 0;
 773
 774        if (cpu_class_is_omap1())
 775                offset = (dma_addr_t) (OMAP1_DMA_CDSA_L_REG(lch) |
 776                                       (OMAP1_DMA_CDSA_U_REG(lch) << 16));
 777
 778        if (cpu_is_omap24xx())
 779                offset = OMAP2_DMA_CDSA_REG(lch);
 780
 781        return offset;
 782}
 783
 784/*
 785 * Returns current source transfer counting for the given DMA channel.
 786 * Can be used to monitor the progress of a transfer inside a block.
 787 * It must be called with disabled interrupts.
 788 */
 789int omap_get_dma_src_addr_counter(int lch)
 790{
 791        return (dma_addr_t) OMAP_DMA_CSAC_REG(lch);
 792}
 793
 794int omap_dma_running(void)
 795{
 796        int lch;
 797
 798        /* Check if LCD DMA is running */
 799        if (cpu_is_omap16xx())
 800                if (omap_readw(OMAP1610_DMA_LCD_CCR) & OMAP_DMA_CCR_EN)
 801                        return 1;
 802
 803        for (lch = 0; lch < dma_chan_count; lch++)
 804                if (OMAP_DMA_CCR_REG(lch) & OMAP_DMA_CCR_EN)
 805                        return 1;
 806
 807        return 0;
 808}
 809
 810/*
 811 * lch_queue DMA will start right after lch_head one is finished.
 812 * For this DMA link to start, you still need to start (see omap_start_dma)
 813 * the first one. That will fire up the entire queue.
 814 */
 815void omap_dma_link_lch (int lch_head, int lch_queue)
 816{
 817        if (omap_dma_in_1510_mode()) {
 818                printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
 819                BUG();
 820                return;
 821        }
 822
 823        if ((dma_chan[lch_head].dev_id == -1) ||
 824            (dma_chan[lch_queue].dev_id == -1)) {
 825                printk(KERN_ERR "omap_dma: trying to link "
 826                       "non requested channels\n");
 827                dump_stack();
 828        }
 829
 830        dma_chan[lch_head].next_lch = lch_queue;
 831}
 832
 833/*
 834 * Once the DMA queue is stopped, we can destroy it.
 835 */
 836void omap_dma_unlink_lch (int lch_head, int lch_queue)
 837{
 838        if (omap_dma_in_1510_mode()) {
 839                printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
 840                BUG();
 841                return;
 842        }
 843
 844        if (dma_chan[lch_head].next_lch != lch_queue ||
 845            dma_chan[lch_head].next_lch == -1) {
 846                printk(KERN_ERR "omap_dma: trying to unlink "
 847                       "non linked channels\n");
 848                dump_stack();
 849        }
 850
 851
 852        if ((dma_chan[lch_head].flags & OMAP_DMA_ACTIVE) ||
 853            (dma_chan[lch_head].flags & OMAP_DMA_ACTIVE)) {
 854                printk(KERN_ERR "omap_dma: You need to stop the DMA channels "
 855                       "before unlinking\n");
 856                dump_stack();
 857        }
 858
 859        dma_chan[lch_head].next_lch = -1;
 860}
 861
 862/*----------------------------------------------------------------------------*/
 863
 864#ifdef CONFIG_ARCH_OMAP1
 865
 866static int omap1_dma_handle_ch(int ch)
 867{
 868        u16 csr;
 869
 870        if (enable_1510_mode && ch >= 6) {
 871                csr = dma_chan[ch].saved_csr;
 872                dma_chan[ch].saved_csr = 0;
 873        } else
 874                csr = OMAP_DMA_CSR_REG(ch);
 875        if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
 876                dma_chan[ch + 6].saved_csr = csr >> 7;
 877                csr &= 0x7f;
 878        }
 879        if ((csr & 0x3f) == 0)
 880                return 0;
 881        if (unlikely(dma_chan[ch].dev_id == -1)) {
 882                printk(KERN_WARNING "Spurious interrupt from DMA channel "
 883                       "%d (CSR %04x)\n", ch, csr);
 884                return 0;
 885        }
 886        if (unlikely(csr & OMAP1_DMA_TOUT_IRQ))
 887                printk(KERN_WARNING "DMA timeout with device %d\n",
 888                       dma_chan[ch].dev_id);
 889        if (unlikely(csr & OMAP_DMA_DROP_IRQ))
 890                printk(KERN_WARNING "DMA synchronization event drop occurred "
 891                       "with device %d\n", dma_chan[ch].dev_id);
 892        if (likely(csr & OMAP_DMA_BLOCK_IRQ))
 893                dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
 894        if (likely(dma_chan[ch].callback != NULL))
 895                dma_chan[ch].callback(ch, csr, dma_chan[ch].data);
 896        return 1;
 897}
 898
 899static irqreturn_t omap1_dma_irq_handler(int irq, void *dev_id)
 900{
 901        int ch = ((int) dev_id) - 1;
 902        int handled = 0;
 903
 904        for (;;) {
 905                int handled_now = 0;
 906
 907                handled_now += omap1_dma_handle_ch(ch);
 908                if (enable_1510_mode && dma_chan[ch + 6].saved_csr)
 909                        handled_now += omap1_dma_handle_ch(ch + 6);
 910                if (!handled_now)
 911                        break;
 912                handled += handled_now;
 913        }
 914
 915        return handled ? IRQ_HANDLED : IRQ_NONE;
 916}
 917
 918#else
 919#define omap1_dma_irq_handler   NULL
 920#endif
 921
 922#ifdef CONFIG_ARCH_OMAP2
 923
 924static int omap2_dma_handle_ch(int ch)
 925{
 926        u32 status = OMAP_DMA_CSR_REG(ch);
 927
 928        if (!status) {
 929                if (printk_ratelimit())
 930                        printk(KERN_WARNING "Spurious DMA IRQ for lch %d\n", ch);
 931                return 0;
 932        }
 933        if (unlikely(dma_chan[ch].dev_id == -1)) {
 934                if (printk_ratelimit())
 935                        printk(KERN_WARNING "IRQ %04x for non-allocated DMA"
 936                                        "channel %d\n", status, ch);
 937                return 0;
 938        }
 939        if (unlikely(status & OMAP_DMA_DROP_IRQ))
 940                printk(KERN_INFO
 941                       "DMA synchronization event drop occurred with device "
 942                       "%d\n", dma_chan[ch].dev_id);
 943        if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ))
 944                printk(KERN_INFO "DMA transaction error with device %d\n",
 945                       dma_chan[ch].dev_id);
 946        if (unlikely(status & OMAP2_DMA_SECURE_ERR_IRQ))
 947                printk(KERN_INFO "DMA secure error with device %d\n",
 948                       dma_chan[ch].dev_id);
 949        if (unlikely(status & OMAP2_DMA_MISALIGNED_ERR_IRQ))
 950                printk(KERN_INFO "DMA misaligned error with device %d\n",
 951                       dma_chan[ch].dev_id);
 952
 953        OMAP_DMA_CSR_REG(ch) = OMAP2_DMA_CSR_CLEAR_MASK;
 954        omap_writel(1 << ch, OMAP_DMA4_IRQSTATUS_L0);
 955
 956        if (likely(dma_chan[ch].callback != NULL))
 957                dma_chan[ch].callback(ch, status, dma_chan[ch].data);
 958
 959        return 0;
 960}
 961
 962/* STATUS register count is from 1-32 while our is 0-31 */
 963static irqreturn_t omap2_dma_irq_handler(int irq, void *dev_id)
 964{
 965        u32 val;
 966        int i;
 967
 968        val = omap_readl(OMAP_DMA4_IRQSTATUS_L0);
 969        if (val == 0) {
 970                if (printk_ratelimit())
 971                        printk(KERN_WARNING "Spurious DMA IRQ\n");
 972                return IRQ_HANDLED;
 973        }
 974        for (i = 0; i < OMAP_LOGICAL_DMA_CH_COUNT && val != 0; i++) {
 975                if (val & 1)
 976                        omap2_dma_handle_ch(i);
 977                val >>= 1;
 978        }
 979
 980        return IRQ_HANDLED;
 981}
 982
 983static struct irqaction omap24xx_dma_irq = {
 984        .name = "DMA",
 985        .handler = omap2_dma_irq_handler,
 986        .flags = IRQF_DISABLED
 987};
 988
 989#else
 990static struct irqaction omap24xx_dma_irq;
 991#endif
 992
 993/*----------------------------------------------------------------------------*/
 994
 995static struct lcd_dma_info {
 996        spinlock_t lock;
 997        int reserved;
 998        void (* callback)(u16 status, void *data);
 999        void *cb_data;
1000
1001        int active;
1002        unsigned long addr, size;
1003        int rotate, data_type, xres, yres;
1004        int vxres;
1005        int mirror;
1006        int xscale, yscale;
1007        int ext_ctrl;
1008        int src_port;
1009        int single_transfer;
1010} lcd_dma;
1011
1012void omap_set_lcd_dma_b1(unsigned long addr, u16 fb_xres, u16 fb_yres,
1013                         int data_type)
1014{
1015        lcd_dma.addr = addr;
1016        lcd_dma.data_type = data_type;
1017        lcd_dma.xres = fb_xres;
1018        lcd_dma.yres = fb_yres;
1019}
1020
1021void omap_set_lcd_dma_src_port(int port)
1022{
1023        lcd_dma.src_port = port;
1024}
1025
1026void omap_set_lcd_dma_ext_controller(int external)
1027{
1028        lcd_dma.ext_ctrl = external;
1029}
1030
1031void omap_set_lcd_dma_single_transfer(int single)
1032{
1033        lcd_dma.single_transfer = single;
1034}
1035
1036
1037void omap_set_lcd_dma_b1_rotation(int rotate)
1038{
1039        if (omap_dma_in_1510_mode()) {
1040                printk(KERN_ERR "DMA rotation is not supported in 1510 mode\n");
1041                BUG();
1042                return;
1043        }
1044        lcd_dma.rotate = rotate;
1045}
1046
1047void omap_set_lcd_dma_b1_mirror(int mirror)
1048{
1049        if (omap_dma_in_1510_mode()) {
1050                printk(KERN_ERR "DMA mirror is not supported in 1510 mode\n");
1051                BUG();
1052        }
1053        lcd_dma.mirror = mirror;
1054}
1055
1056void omap_set_lcd_dma_b1_vxres(unsigned long vxres)
1057{
1058        if (omap_dma_in_1510_mode()) {
1059                printk(KERN_ERR "DMA virtual resulotion is not supported "
1060                                "in 1510 mode\n");
1061                BUG();
1062        }
1063        lcd_dma.vxres = vxres;
1064}
1065
1066void omap_set_lcd_dma_b1_scale(unsigned int xscale, unsigned int yscale)
1067{
1068        if (omap_dma_in_1510_mode()) {
1069                printk(KERN_ERR "DMA scale is not supported in 1510 mode\n");
1070                BUG();
1071        }
1072        lcd_dma.xscale = xscale;
1073        lcd_dma.yscale = yscale;
1074}
1075
1076static void set_b1_regs(void)
1077{
1078        unsigned long top, bottom;
1079        int es;
1080        u16 w;
1081        unsigned long en, fn;
1082        long ei, fi;
1083        unsigned long vxres;
1084        unsigned int xscale, yscale;
1085
1086        switch (lcd_dma.data_type) {
1087        case OMAP_DMA_DATA_TYPE_S8:
1088                es = 1;
1089                break;
1090        case OMAP_DMA_DATA_TYPE_S16:
1091                es = 2;
1092                break;
1093        case OMAP_DMA_DATA_TYPE_S32:
1094                es = 4;
1095                break;
1096        default:
1097                BUG();
1098                return;
1099        }
1100
1101        vxres = lcd_dma.vxres ? lcd_dma.vxres : lcd_dma.xres;
1102        xscale = lcd_dma.xscale ? lcd_dma.xscale : 1;
1103        yscale = lcd_dma.yscale ? lcd_dma.yscale : 1;
1104        BUG_ON(vxres < lcd_dma.xres);
1105#define PIXADDR(x,y) (lcd_dma.addr + ((y) * vxres * yscale + (x) * xscale) * es)
1106#define PIXSTEP(sx, sy, dx, dy) (PIXADDR(dx, dy) - PIXADDR(sx, sy) - es + 1)
1107        switch (lcd_dma.rotate) {
1108        case 0:
1109                if (!lcd_dma.mirror) {
1110                        top = PIXADDR(0, 0);
1111                        bottom = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1);
1112                        /* 1510 DMA requires the bottom address to be 2 more
1113                         * than the actual last memory access location. */
1114                        if (omap_dma_in_1510_mode() &&
1115                            lcd_dma.data_type == OMAP_DMA_DATA_TYPE_S32)
1116                                bottom += 2;
1117                        ei = PIXSTEP(0, 0, 1, 0);
1118                        fi = PIXSTEP(lcd_dma.xres - 1, 0, 0, 1);
1119                } else {
1120                        top = PIXADDR(lcd_dma.xres - 1, 0);
1121                        bottom = PIXADDR(0, lcd_dma.yres - 1);
1122                        ei = PIXSTEP(1, 0, 0, 0);
1123                        fi = PIXSTEP(0, 0, lcd_dma.xres - 1, 1);
1124                }
1125                en = lcd_dma.xres;
1126                fn = lcd_dma.yres;
1127                break;
1128        case 90:
1129                if (!lcd_dma.mirror) {
1130                        top = PIXADDR(0, lcd_dma.yres - 1);
1131                        bottom = PIXADDR(lcd_dma.xres - 1, 0);
1132                        ei = PIXSTEP(0, 1, 0, 0);
1133                        fi = PIXSTEP(0, 0, 1, lcd_dma.yres - 1);
1134                } else {
1135                        top = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1);
1136                        bottom = PIXADDR(0, 0);
1137                        ei = PIXSTEP(0, 1, 0, 0);
1138                        fi = PIXSTEP(1, 0, 0, lcd_dma.yres - 1);
1139                }
1140                en = lcd_dma.yres;
1141                fn = lcd_dma.xres;
1142                break;
1143        case 180:
1144                if (!lcd_dma.mirror) {
1145                        top = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1);
1146                        bottom = PIXADDR(0, 0);
1147                        ei = PIXSTEP(1, 0, 0, 0);
1148                        fi = PIXSTEP(0, 1, lcd_dma.xres - 1, 0);
1149                } else {
1150                        top = PIXADDR(0, lcd_dma.yres - 1);
1151                        bottom = PIXADDR(lcd_dma.xres - 1, 0);
1152                        ei = PIXSTEP(0, 0, 1, 0);
1153                        fi = PIXSTEP(lcd_dma.xres - 1, 1, 0, 0);
1154                }
1155                en = lcd_dma.xres;
1156                fn = lcd_dma.yres;
1157                break;
1158        case 270:
1159                if (!lcd_dma.mirror) {
1160                        top = PIXADDR(lcd_dma.xres - 1, 0);
1161                        bottom = PIXADDR(0, lcd_dma.yres - 1);
1162                        ei = PIXSTEP(0, 0, 0, 1);
1163                        fi = PIXSTEP(1, lcd_dma.yres - 1, 0, 0);
1164                } else {
1165                        top = PIXADDR(0, 0);
1166                        bottom = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1);
1167                        ei = PIXSTEP(0, 0, 0, 1);
1168                        fi = PIXSTEP(0, lcd_dma.yres - 1, 1, 0);
1169                }
1170                en = lcd_dma.yres;
1171                fn = lcd_dma.xres;
1172                break;
1173        default:
1174                BUG();
1175                return; /* Suppress warning about uninitialized vars */
1176        }
1177
1178        if (omap_dma_in_1510_mode()) {
1179                omap_writew(top >> 16, OMAP1510_DMA_LCD_TOP_F1_U);
1180                omap_writew(top, OMAP1510_DMA_LCD_TOP_F1_L);
1181                omap_writew(bottom >> 16, OMAP1510_DMA_LCD_BOT_F1_U);
1182                omap_writew(bottom, OMAP1510_DMA_LCD_BOT_F1_L);
1183
1184                return;
1185        }
1186
1187        /* 1610 regs */
1188        omap_writew(top >> 16, OMAP1610_DMA_LCD_TOP_B1_U);
1189        omap_writew(top, OMAP1610_DMA_LCD_TOP_B1_L);
1190        omap_writew(bottom >> 16, OMAP1610_DMA_LCD_BOT_B1_U);
1191        omap_writew(bottom, OMAP1610_DMA_LCD_BOT_B1_L);
1192
1193        omap_writew(en, OMAP1610_DMA_LCD_SRC_EN_B1);
1194        omap_writew(fn, OMAP1610_DMA_LCD_SRC_FN_B1);
1195
1196        w = omap_readw(OMAP1610_DMA_LCD_CSDP);
1197        w &= ~0x03;
1198        w |= lcd_dma.data_type;
1199        omap_writew(w, OMAP1610_DMA_LCD_CSDP);
1200
1201        w = omap_readw(OMAP1610_DMA_LCD_CTRL);
1202        /* Always set the source port as SDRAM for now*/
1203        w &= ~(0x03 << 6);
1204        if (lcd_dma.callback != NULL)
1205                w |= 1 << 1;            /* Block interrupt enable */
1206        else
1207                w &= ~(1 << 1);
1208        omap_writew(w, OMAP1610_DMA_LCD_CTRL);
1209
1210        if (!(lcd_dma.rotate || lcd_dma.mirror ||
1211              lcd_dma.vxres || lcd_dma.xscale || lcd_dma.yscale))
1212                return;
1213
1214        w = omap_readw(OMAP1610_DMA_LCD_CCR);
1215        /* Set the double-indexed addressing mode */
1216        w |= (0x03 << 12);
1217        omap_writew(w, OMAP1610_DMA_LCD_CCR);
1218
1219        omap_writew(ei, OMAP1610_DMA_LCD_SRC_EI_B1);
1220        omap_writew(fi >> 16, OMAP1610_DMA_LCD_SRC_FI_B1_U);
1221        omap_writew(fi, OMAP1610_DMA_LCD_SRC_FI_B1_L);
1222}
1223
1224static irqreturn_t lcd_dma_irq_handler(int irq, void *dev_id)
1225{
1226        u16 w;
1227
1228        w = omap_readw(OMAP1610_DMA_LCD_CTRL);
1229        if (unlikely(!(w & (1 << 3)))) {
1230                printk(KERN_WARNING "Spurious LCD DMA IRQ\n");
1231                return IRQ_NONE;
1232        }
1233        /* Ack the IRQ */
1234        w |= (1 << 3);
1235        omap_writew(w, OMAP1610_DMA_LCD_CTRL);
1236        lcd_dma.active = 0;
1237        if (lcd_dma.callback != NULL)
1238                lcd_dma.callback(w, lcd_dma.cb_data);
1239
1240        return IRQ_HANDLED;
1241}
1242
1243int omap_request_lcd_dma(void (* callback)(u16 status, void *data),
1244                         void *data)
1245{
1246        spin_lock_irq(&lcd_dma.lock);
1247        if (lcd_dma.reserved) {
1248                spin_unlock_irq(&lcd_dma.lock);
1249                printk(KERN_ERR "LCD DMA channel already reserved\n");
1250                BUG();
1251                return -EBUSY;
1252        }
1253        lcd_dma.reserved = 1;
1254        spin_unlock_irq(&lcd_dma.lock);
1255        lcd_dma.callback = callback;
1256        lcd_dma.cb_data = data;
1257        lcd_dma.active = 0;
1258        lcd_dma.single_transfer = 0;
1259        lcd_dma.rotate = 0;
1260        lcd_dma.vxres = 0;
1261        lcd_dma.mirror = 0;
1262        lcd_dma.xscale = 0;
1263        lcd_dma.yscale = 0;
1264        lcd_dma.ext_ctrl = 0;
1265        lcd_dma.src_port = 0;
1266
1267        return 0;
1268}
1269
1270void omap_free_lcd_dma(void)
1271{
1272        spin_lock(&lcd_dma.lock);
1273        if (!lcd_dma.reserved) {
1274                spin_unlock(&lcd_dma.lock);
1275                printk(KERN_ERR "LCD DMA is not reserved\n");
1276                BUG();
1277                return;
1278        }
1279        if (!enable_1510_mode)
1280                omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1,
1281                            OMAP1610_DMA_LCD_CCR);
1282        lcd_dma.reserved = 0;
1283        spin_unlock(&lcd_dma.lock);
1284}
1285
1286void omap_enable_lcd_dma(void)
1287{
1288        u16 w;
1289
1290        /* Set the Enable bit only if an external controller is
1291         * connected. Otherwise the OMAP internal controller will
1292         * start the transfer when it gets enabled.
1293         */
1294        if (enable_1510_mode || !lcd_dma.ext_ctrl)
1295                return;
1296
1297        w = omap_readw(OMAP1610_DMA_LCD_CTRL);
1298        w |= 1 << 8;
1299        omap_writew(w, OMAP1610_DMA_LCD_CTRL);
1300
1301        lcd_dma.active = 1;
1302
1303        w = omap_readw(OMAP1610_DMA_LCD_CCR);
1304        w |= 1 << 7;
1305        omap_writew(w, OMAP1610_DMA_LCD_CCR);
1306}
1307
1308void omap_setup_lcd_dma(void)
1309{
1310        BUG_ON(lcd_dma.active);
1311        if (!enable_1510_mode) {
1312                /* Set some reasonable defaults */
1313                omap_writew(0x5440, OMAP1610_DMA_LCD_CCR);
1314                omap_writew(0x9102, OMAP1610_DMA_LCD_CSDP);
1315                omap_writew(0x0004, OMAP1610_DMA_LCD_LCH_CTRL);
1316        }
1317        set_b1_regs();
1318        if (!enable_1510_mode) {
1319                u16 w;
1320
1321                w = omap_readw(OMAP1610_DMA_LCD_CCR);
1322                /* If DMA was already active set the end_prog bit to have
1323                 * the programmed register set loaded into the active
1324                 * register set.
1325                 */
1326                w |= 1 << 11;           /* End_prog */
1327                if (!lcd_dma.single_transfer)
1328                        w |= (3 << 8);  /* Auto_init, repeat */
1329                omap_writew(w, OMAP1610_DMA_LCD_CCR);
1330        }
1331}
1332
1333void omap_stop_lcd_dma(void)
1334{
1335        u16 w;
1336
1337        lcd_dma.active = 0;
1338        if (enable_1510_mode || !lcd_dma.ext_ctrl)
1339                return;
1340
1341        w = omap_readw(OMAP1610_DMA_LCD_CCR);
1342        w &= ~(1 << 7);
1343        omap_writew(w, OMAP1610_DMA_LCD_CCR);
1344
1345        w = omap_readw(OMAP1610_DMA_LCD_CTRL);
1346        w &= ~(1 << 8);
1347        omap_writew(w, OMAP1610_DMA_LCD_CTRL);
1348}
1349
1350/*----------------------------------------------------------------------------*/
1351
1352static int __init omap_init_dma(void)
1353{
1354        int ch, r;
1355
1356        if (cpu_is_omap15xx()) {
1357                printk(KERN_INFO "DMA support for OMAP15xx initialized\n");
1358                dma_chan_count = 9;
1359                enable_1510_mode = 1;
1360        } else if (cpu_is_omap16xx() || cpu_is_omap730()) {
1361                printk(KERN_INFO "OMAP DMA hardware version %d\n",
1362                       omap_readw(OMAP_DMA_HW_ID));
1363                printk(KERN_INFO "DMA capabilities: %08x:%08x:%04x:%04x:%04x\n",
1364                       (omap_readw(OMAP_DMA_CAPS_0_U) << 16) |
1365                       omap_readw(OMAP_DMA_CAPS_0_L),
1366                       (omap_readw(OMAP_DMA_CAPS_1_U) << 16) |
1367                       omap_readw(OMAP_DMA_CAPS_1_L),
1368                       omap_readw(OMAP_DMA_CAPS_2), omap_readw(OMAP_DMA_CAPS_3),
1369                       omap_readw(OMAP_DMA_CAPS_4));
1370                if (!enable_1510_mode) {
1371                        u16 w;
1372
1373                        /* Disable OMAP 3.0/3.1 compatibility mode. */
1374                        w = omap_readw(OMAP_DMA_GSCR);
1375                        w |= 1 << 3;
1376                        omap_writew(w, OMAP_DMA_GSCR);
1377                        dma_chan_count = 16;
1378                } else
1379                        dma_chan_count = 9;
1380                if (cpu_is_omap16xx()) {
1381                        u16 w;
1382
1383                        /* this would prevent OMAP sleep */
1384                        w = omap_readw(OMAP1610_DMA_LCD_CTRL);
1385                        w &= ~(1 << 8);
1386                        omap_writew(w, OMAP1610_DMA_LCD_CTRL);
1387                }
1388        } else if (cpu_is_omap24xx()) {
1389                u8 revision = omap_readb(OMAP_DMA4_REVISION);
1390                printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
1391                       revision >> 4, revision & 0xf);
1392                dma_chan_count = OMAP_LOGICAL_DMA_CH_COUNT;
1393        } else {
1394                dma_chan_count = 0;
1395                return 0;
1396        }
1397
1398        memset(&lcd_dma, 0, sizeof(lcd_dma));
1399        spin_lock_init(&lcd_dma.lock);
1400        spin_lock_init(&dma_chan_lock);
1401        memset(&dma_chan, 0, sizeof(dma_chan));
1402
1403        for (ch = 0; ch < dma_chan_count; ch++) {
1404                omap_clear_dma(ch);
1405                dma_chan[ch].dev_id = -1;
1406                dma_chan[ch].next_lch = -1;
1407
1408                if (ch >= 6 && enable_1510_mode)
1409                        continue;
1410
1411                if (cpu_class_is_omap1()) {
1412                        /* request_irq() doesn't like dev_id (ie. ch) being
1413                         * zero, so we have to kludge around this. */
1414                        r = request_irq(omap1_dma_irq[ch],
1415                                        omap1_dma_irq_handler, 0, "DMA",
1416                                        (void *) (ch + 1));
1417                        if (r != 0) {
1418                                int i;
1419
1420                                printk(KERN_ERR "unable to request IRQ %d "
1421                                       "for DMA (error %d)\n",
1422                                       omap1_dma_irq[ch], r);
1423                                for (i = 0; i < ch; i++)
1424                                        free_irq(omap1_dma_irq[i],
1425                                                 (void *) (i + 1));
1426                                return r;
1427                        }
1428                }
1429        }
1430
1431        if (cpu_is_omap24xx())
1432                setup_irq(INT_24XX_SDMA_IRQ0, &omap24xx_dma_irq);
1433
1434        /* FIXME: Update LCD DMA to work on 24xx */
1435        if (cpu_class_is_omap1()) {
1436                r = request_irq(INT_DMA_LCD, lcd_dma_irq_handler, 0,
1437                                "LCD DMA", NULL);
1438                if (r != 0) {
1439                        int i;
1440
1441                        printk(KERN_ERR "unable to request IRQ for LCD DMA "
1442                               "(error %d)\n", r);
1443                        for (i = 0; i < dma_chan_count; i++)
1444                                free_irq(omap1_dma_irq[i], (void *) (i + 1));
1445                        return r;
1446                }
1447        }
1448
1449        return 0;
1450}
1451
1452arch_initcall(omap_init_dma);
1453
1454EXPORT_SYMBOL(omap_get_dma_src_pos);
1455EXPORT_SYMBOL(omap_get_dma_dst_pos);
1456EXPORT_SYMBOL(omap_get_dma_src_addr_counter);
1457EXPORT_SYMBOL(omap_clear_dma);
1458EXPORT_SYMBOL(omap_set_dma_priority);
1459EXPORT_SYMBOL(omap_request_dma);
1460EXPORT_SYMBOL(omap_free_dma);
1461EXPORT_SYMBOL(omap_start_dma);
1462EXPORT_SYMBOL(omap_stop_dma);
1463EXPORT_SYMBOL(omap_set_dma_callback);
1464EXPORT_SYMBOL(omap_enable_dma_irq);
1465EXPORT_SYMBOL(omap_disable_dma_irq);
1466
1467EXPORT_SYMBOL(omap_set_dma_transfer_params);
1468EXPORT_SYMBOL(omap_set_dma_color_mode);
1469EXPORT_SYMBOL(omap_set_dma_write_mode);
1470
1471EXPORT_SYMBOL(omap_set_dma_src_params);
1472EXPORT_SYMBOL(omap_set_dma_src_index);
1473EXPORT_SYMBOL(omap_set_dma_src_data_pack);
1474EXPORT_SYMBOL(omap_set_dma_src_burst_mode);
1475
1476EXPORT_SYMBOL(omap_set_dma_dest_params);
1477EXPORT_SYMBOL(omap_set_dma_dest_index);
1478EXPORT_SYMBOL(omap_set_dma_dest_data_pack);
1479EXPORT_SYMBOL(omap_set_dma_dest_burst_mode);
1480
1481EXPORT_SYMBOL(omap_set_dma_params);
1482
1483EXPORT_SYMBOL(omap_dma_link_lch);
1484EXPORT_SYMBOL(omap_dma_unlink_lch);
1485
1486EXPORT_SYMBOL(omap_request_lcd_dma);
1487EXPORT_SYMBOL(omap_free_lcd_dma);
1488EXPORT_SYMBOL(omap_enable_lcd_dma);
1489EXPORT_SYMBOL(omap_setup_lcd_dma);
1490EXPORT_SYMBOL(omap_stop_lcd_dma);
1491EXPORT_SYMBOL(omap_set_lcd_dma_b1);
1492EXPORT_SYMBOL(omap_set_lcd_dma_single_transfer);
1493EXPORT_SYMBOL(omap_set_lcd_dma_ext_controller);
1494EXPORT_SYMBOL(omap_set_lcd_dma_b1_rotation);
1495EXPORT_SYMBOL(omap_set_lcd_dma_b1_vxres);
1496EXPORT_SYMBOL(omap_set_lcd_dma_b1_scale);
1497EXPORT_SYMBOL(omap_set_lcd_dma_b1_mirror);
1498
1499