linux/arch/arm/mach-davinci/dma.c
<<
>>
Prefs
   1/*
   2 * EDMA3 support for DaVinci
   3 *
   4 * Copyright (C) 2006-2009 Texas Instruments.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  19 */
  20#include <linux/kernel.h>
  21#include <linux/sched.h>
  22#include <linux/init.h>
  23#include <linux/module.h>
  24#include <linux/interrupt.h>
  25#include <linux/platform_device.h>
  26#include <linux/spinlock.h>
  27#include <linux/compiler.h>
  28#include <linux/io.h>
  29
  30#include <mach/cputype.h>
  31#include <mach/memory.h>
  32#include <mach/hardware.h>
  33#include <mach/irqs.h>
  34#include <mach/edma.h>
  35#include <mach/mux.h>
  36
  37
  38/* Offsets matching "struct edmacc_param" */
  39#define PARM_OPT                0x00
  40#define PARM_SRC                0x04
  41#define PARM_A_B_CNT            0x08
  42#define PARM_DST                0x0c
  43#define PARM_SRC_DST_BIDX       0x10
  44#define PARM_LINK_BCNTRLD       0x14
  45#define PARM_SRC_DST_CIDX       0x18
  46#define PARM_CCNT               0x1c
  47
  48#define PARM_SIZE               0x20
  49
  50/* Offsets for EDMA CC global channel registers and their shadows */
  51#define SH_ER           0x00    /* 64 bits */
  52#define SH_ECR          0x08    /* 64 bits */
  53#define SH_ESR          0x10    /* 64 bits */
  54#define SH_CER          0x18    /* 64 bits */
  55#define SH_EER          0x20    /* 64 bits */
  56#define SH_EECR         0x28    /* 64 bits */
  57#define SH_EESR         0x30    /* 64 bits */
  58#define SH_SER          0x38    /* 64 bits */
  59#define SH_SECR         0x40    /* 64 bits */
  60#define SH_IER          0x50    /* 64 bits */
  61#define SH_IECR         0x58    /* 64 bits */
  62#define SH_IESR         0x60    /* 64 bits */
  63#define SH_IPR          0x68    /* 64 bits */
  64#define SH_ICR          0x70    /* 64 bits */
  65#define SH_IEVAL        0x78
  66#define SH_QER          0x80
  67#define SH_QEER         0x84
  68#define SH_QEECR        0x88
  69#define SH_QEESR        0x8c
  70#define SH_QSER         0x90
  71#define SH_QSECR        0x94
  72#define SH_SIZE         0x200
  73
  74/* Offsets for EDMA CC global registers */
  75#define EDMA_REV        0x0000
  76#define EDMA_CCCFG      0x0004
  77#define EDMA_QCHMAP     0x0200  /* 8 registers */
  78#define EDMA_DMAQNUM    0x0240  /* 8 registers (4 on OMAP-L1xx) */
  79#define EDMA_QDMAQNUM   0x0260
  80#define EDMA_QUETCMAP   0x0280
  81#define EDMA_QUEPRI     0x0284
  82#define EDMA_EMR        0x0300  /* 64 bits */
  83#define EDMA_EMCR       0x0308  /* 64 bits */
  84#define EDMA_QEMR       0x0310
  85#define EDMA_QEMCR      0x0314
  86#define EDMA_CCERR      0x0318
  87#define EDMA_CCERRCLR   0x031c
  88#define EDMA_EEVAL      0x0320
  89#define EDMA_DRAE       0x0340  /* 4 x 64 bits*/
  90#define EDMA_QRAE       0x0380  /* 4 registers */
  91#define EDMA_QUEEVTENTRY        0x0400  /* 2 x 16 registers */
  92#define EDMA_QSTAT      0x0600  /* 2 registers */
  93#define EDMA_QWMTHRA    0x0620
  94#define EDMA_QWMTHRB    0x0624
  95#define EDMA_CCSTAT     0x0640
  96
  97#define EDMA_M          0x1000  /* global channel registers */
  98#define EDMA_ECR        0x1008
  99#define EDMA_ECRH       0x100C
 100#define EDMA_SHADOW0    0x2000  /* 4 regions shadowing global channels */
 101#define EDMA_PARM       0x4000  /* 128 param entries */
 102
 103#define PARM_OFFSET(param_no)   (EDMA_PARM + ((param_no) << 5))
 104
 105#define EDMA_DCHMAP     0x0100  /* 64 registers */
 106#define CHMAP_EXIST     BIT(24)
 107
 108#define EDMA_MAX_DMACH           64
 109#define EDMA_MAX_PARAMENTRY     512
 110#define EDMA_MAX_CC               2
 111
 112
 113/*****************************************************************************/
 114
 115static void __iomem *edmacc_regs_base[EDMA_MAX_CC];
 116
 117static inline unsigned int edma_read(unsigned ctlr, int offset)
 118{
 119        return (unsigned int)__raw_readl(edmacc_regs_base[ctlr] + offset);
 120}
 121
 122static inline void edma_write(unsigned ctlr, int offset, int val)
 123{
 124        __raw_writel(val, edmacc_regs_base[ctlr] + offset);
 125}
 126static inline void edma_modify(unsigned ctlr, int offset, unsigned and,
 127                unsigned or)
 128{
 129        unsigned val = edma_read(ctlr, offset);
 130        val &= and;
 131        val |= or;
 132        edma_write(ctlr, offset, val);
 133}
 134static inline void edma_and(unsigned ctlr, int offset, unsigned and)
 135{
 136        unsigned val = edma_read(ctlr, offset);
 137        val &= and;
 138        edma_write(ctlr, offset, val);
 139}
 140static inline void edma_or(unsigned ctlr, int offset, unsigned or)
 141{
 142        unsigned val = edma_read(ctlr, offset);
 143        val |= or;
 144        edma_write(ctlr, offset, val);
 145}
 146static inline unsigned int edma_read_array(unsigned ctlr, int offset, int i)
 147{
 148        return edma_read(ctlr, offset + (i << 2));
 149}
 150static inline void edma_write_array(unsigned ctlr, int offset, int i,
 151                unsigned val)
 152{
 153        edma_write(ctlr, offset + (i << 2), val);
 154}
 155static inline void edma_modify_array(unsigned ctlr, int offset, int i,
 156                unsigned and, unsigned or)
 157{
 158        edma_modify(ctlr, offset + (i << 2), and, or);
 159}
 160static inline void edma_or_array(unsigned ctlr, int offset, int i, unsigned or)
 161{
 162        edma_or(ctlr, offset + (i << 2), or);
 163}
 164static inline void edma_or_array2(unsigned ctlr, int offset, int i, int j,
 165                unsigned or)
 166{
 167        edma_or(ctlr, offset + ((i*2 + j) << 2), or);
 168}
 169static inline void edma_write_array2(unsigned ctlr, int offset, int i, int j,
 170                unsigned val)
 171{
 172        edma_write(ctlr, offset + ((i*2 + j) << 2), val);
 173}
 174static inline unsigned int edma_shadow0_read(unsigned ctlr, int offset)
 175{
 176        return edma_read(ctlr, EDMA_SHADOW0 + offset);
 177}
 178static inline unsigned int edma_shadow0_read_array(unsigned ctlr, int offset,
 179                int i)
 180{
 181        return edma_read(ctlr, EDMA_SHADOW0 + offset + (i << 2));
 182}
 183static inline void edma_shadow0_write(unsigned ctlr, int offset, unsigned val)
 184{
 185        edma_write(ctlr, EDMA_SHADOW0 + offset, val);
 186}
 187static inline void edma_shadow0_write_array(unsigned ctlr, int offset, int i,
 188                unsigned val)
 189{
 190        edma_write(ctlr, EDMA_SHADOW0 + offset + (i << 2), val);
 191}
 192static inline unsigned int edma_parm_read(unsigned ctlr, int offset,
 193                int param_no)
 194{
 195        return edma_read(ctlr, EDMA_PARM + offset + (param_no << 5));
 196}
 197static inline void edma_parm_write(unsigned ctlr, int offset, int param_no,
 198                unsigned val)
 199{
 200        edma_write(ctlr, EDMA_PARM + offset + (param_no << 5), val);
 201}
 202static inline void edma_parm_modify(unsigned ctlr, int offset, int param_no,
 203                unsigned and, unsigned or)
 204{
 205        edma_modify(ctlr, EDMA_PARM + offset + (param_no << 5), and, or);
 206}
 207static inline void edma_parm_and(unsigned ctlr, int offset, int param_no,
 208                unsigned and)
 209{
 210        edma_and(ctlr, EDMA_PARM + offset + (param_no << 5), and);
 211}
 212static inline void edma_parm_or(unsigned ctlr, int offset, int param_no,
 213                unsigned or)
 214{
 215        edma_or(ctlr, EDMA_PARM + offset + (param_no << 5), or);
 216}
 217
 218/*****************************************************************************/
 219
 220/* actual number of DMA channels and slots on this silicon */
 221struct edma {
 222        /* how many dma resources of each type */
 223        unsigned        num_channels;
 224        unsigned        num_region;
 225        unsigned        num_slots;
 226        unsigned        num_tc;
 227        unsigned        num_cc;
 228        enum dma_event_q        default_queue;
 229
 230        /* list of channels with no even trigger; terminated by "-1" */
 231        const s8        *noevent;
 232
 233        /* The edma_inuse bit for each PaRAM slot is clear unless the
 234         * channel is in use ... by ARM or DSP, for QDMA, or whatever.
 235         */
 236        DECLARE_BITMAP(edma_inuse, EDMA_MAX_PARAMENTRY);
 237
 238        /* The edma_noevent bit for each channel is clear unless
 239         * it doesn't trigger DMA events on this platform.  It uses a
 240         * bit of SOC-specific initialization code.
 241         */
 242        DECLARE_BITMAP(edma_noevent, EDMA_MAX_DMACH);
 243
 244        unsigned        irq_res_start;
 245        unsigned        irq_res_end;
 246
 247        struct dma_interrupt_data {
 248                void (*callback)(unsigned channel, unsigned short ch_status,
 249                                void *data);
 250                void *data;
 251        } intr_data[EDMA_MAX_DMACH];
 252};
 253
 254static struct edma *edma_info[EDMA_MAX_CC];
 255
 256/* dummy param set used to (re)initialize parameter RAM slots */
 257static const struct edmacc_param dummy_paramset = {
 258        .link_bcntrld = 0xffff,
 259        .ccnt = 1,
 260};
 261
 262/*****************************************************************************/
 263
 264static void map_dmach_queue(unsigned ctlr, unsigned ch_no,
 265                enum dma_event_q queue_no)
 266{
 267        int bit = (ch_no & 0x7) * 4;
 268
 269        /* default to low priority queue */
 270        if (queue_no == EVENTQ_DEFAULT)
 271                queue_no = edma_info[ctlr]->default_queue;
 272
 273        queue_no &= 7;
 274        edma_modify_array(ctlr, EDMA_DMAQNUM, (ch_no >> 3),
 275                        ~(0x7 << bit), queue_no << bit);
 276}
 277
 278static void __init map_queue_tc(unsigned ctlr, int queue_no, int tc_no)
 279{
 280        int bit = queue_no * 4;
 281        edma_modify(ctlr, EDMA_QUETCMAP, ~(0x7 << bit), ((tc_no & 0x7) << bit));
 282}
 283
 284static void __init assign_priority_to_queue(unsigned ctlr, int queue_no,
 285                int priority)
 286{
 287        int bit = queue_no * 4;
 288        edma_modify(ctlr, EDMA_QUEPRI, ~(0x7 << bit),
 289                        ((priority & 0x7) << bit));
 290}
 291
 292/**
 293 * map_dmach_param - Maps channel number to param entry number
 294 *
 295 * This maps the dma channel number to param entry numberter. In
 296 * other words using the DMA channel mapping registers a param entry
 297 * can be mapped to any channel
 298 *
 299 * Callers are responsible for ensuring the channel mapping logic is
 300 * included in that particular EDMA variant (Eg : dm646x)
 301 *
 302 */
 303static void __init map_dmach_param(unsigned ctlr)
 304{
 305        int i;
 306        for (i = 0; i < EDMA_MAX_DMACH; i++)
 307                edma_write_array(ctlr, EDMA_DCHMAP , i , (i << 5));
 308}
 309
 310static inline void
 311setup_dma_interrupt(unsigned lch,
 312        void (*callback)(unsigned channel, u16 ch_status, void *data),
 313        void *data)
 314{
 315        unsigned ctlr;
 316
 317        ctlr = EDMA_CTLR(lch);
 318        lch = EDMA_CHAN_SLOT(lch);
 319
 320        if (!callback) {
 321                edma_shadow0_write_array(ctlr, SH_IECR, lch >> 5,
 322                                (1 << (lch & 0x1f)));
 323        }
 324
 325        edma_info[ctlr]->intr_data[lch].callback = callback;
 326        edma_info[ctlr]->intr_data[lch].data = data;
 327
 328        if (callback) {
 329                edma_shadow0_write_array(ctlr, SH_ICR, lch >> 5,
 330                                (1 << (lch & 0x1f)));
 331                edma_shadow0_write_array(ctlr, SH_IESR, lch >> 5,
 332                                (1 << (lch & 0x1f)));
 333        }
 334}
 335
 336static int irq2ctlr(int irq)
 337{
 338        if (irq >= edma_info[0]->irq_res_start &&
 339                irq <= edma_info[0]->irq_res_end)
 340                return 0;
 341        else if (irq >= edma_info[1]->irq_res_start &&
 342                irq <= edma_info[1]->irq_res_end)
 343                return 1;
 344
 345        return -1;
 346}
 347
 348/******************************************************************************
 349 *
 350 * DMA interrupt handler
 351 *
 352 *****************************************************************************/
 353static irqreturn_t dma_irq_handler(int irq, void *data)
 354{
 355        int i;
 356        unsigned ctlr;
 357        unsigned int cnt = 0;
 358
 359        ctlr = irq2ctlr(irq);
 360
 361        dev_dbg(data, "dma_irq_handler\n");
 362
 363        if ((edma_shadow0_read_array(ctlr, SH_IPR, 0) == 0)
 364            && (edma_shadow0_read_array(ctlr, SH_IPR, 1) == 0))
 365                return IRQ_NONE;
 366
 367        while (1) {
 368                int j;
 369                if (edma_shadow0_read_array(ctlr, SH_IPR, 0))
 370                        j = 0;
 371                else if (edma_shadow0_read_array(ctlr, SH_IPR, 1))
 372                        j = 1;
 373                else
 374                        break;
 375                dev_dbg(data, "IPR%d %08x\n", j,
 376                                edma_shadow0_read_array(ctlr, SH_IPR, j));
 377                for (i = 0; i < 32; i++) {
 378                        int k = (j << 5) + i;
 379                        if (edma_shadow0_read_array(ctlr, SH_IPR, j) &
 380                                                        (1 << i)) {
 381                                /* Clear the corresponding IPR bits */
 382                                edma_shadow0_write_array(ctlr, SH_ICR, j,
 383                                                        (1 << i));
 384                                if (edma_info[ctlr]->intr_data[k].callback) {
 385                                        edma_info[ctlr]->intr_data[k].callback(
 386                                                k, DMA_COMPLETE,
 387                                                edma_info[ctlr]->intr_data[k].
 388                                                data);
 389                                }
 390                        }
 391                }
 392                cnt++;
 393                if (cnt > 10)
 394                        break;
 395        }
 396        edma_shadow0_write(ctlr, SH_IEVAL, 1);
 397        return IRQ_HANDLED;
 398}
 399
 400/******************************************************************************
 401 *
 402 * DMA error interrupt handler
 403 *
 404 *****************************************************************************/
 405static irqreturn_t dma_ccerr_handler(int irq, void *data)
 406{
 407        int i;
 408        unsigned ctlr;
 409        unsigned int cnt = 0;
 410
 411        ctlr = irq2ctlr(irq);
 412
 413        dev_dbg(data, "dma_ccerr_handler\n");
 414
 415        if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
 416            (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
 417            (edma_read(ctlr, EDMA_QEMR) == 0) &&
 418            (edma_read(ctlr, EDMA_CCERR) == 0))
 419                return IRQ_NONE;
 420
 421        while (1) {
 422                int j = -1;
 423                if (edma_read_array(ctlr, EDMA_EMR, 0))
 424                        j = 0;
 425                else if (edma_read_array(ctlr, EDMA_EMR, 1))
 426                        j = 1;
 427                if (j >= 0) {
 428                        dev_dbg(data, "EMR%d %08x\n", j,
 429                                        edma_read_array(ctlr, EDMA_EMR, j));
 430                        for (i = 0; i < 32; i++) {
 431                                int k = (j << 5) + i;
 432                                if (edma_read_array(ctlr, EDMA_EMR, j) &
 433                                                        (1 << i)) {
 434                                        /* Clear the corresponding EMR bits */
 435                                        edma_write_array(ctlr, EDMA_EMCR, j,
 436                                                        1 << i);
 437                                        /* Clear any SER */
 438                                        edma_shadow0_write_array(ctlr, SH_SECR,
 439                                                                j, (1 << i));
 440                                        if (edma_info[ctlr]->intr_data[k].
 441                                                                callback) {
 442                                                edma_info[ctlr]->intr_data[k].
 443                                                callback(k,
 444                                                DMA_CC_ERROR,
 445                                                edma_info[ctlr]->intr_data
 446                                                [k].data);
 447                                        }
 448                                }
 449                        }
 450                } else if (edma_read(ctlr, EDMA_QEMR)) {
 451                        dev_dbg(data, "QEMR %02x\n",
 452                                edma_read(ctlr, EDMA_QEMR));
 453                        for (i = 0; i < 8; i++) {
 454                                if (edma_read(ctlr, EDMA_QEMR) & (1 << i)) {
 455                                        /* Clear the corresponding IPR bits */
 456                                        edma_write(ctlr, EDMA_QEMCR, 1 << i);
 457                                        edma_shadow0_write(ctlr, SH_QSECR,
 458                                                                (1 << i));
 459
 460                                        /* NOTE:  not reported!! */
 461                                }
 462                        }
 463                } else if (edma_read(ctlr, EDMA_CCERR)) {
 464                        dev_dbg(data, "CCERR %08x\n",
 465                                edma_read(ctlr, EDMA_CCERR));
 466                        /* FIXME:  CCERR.BIT(16) ignored!  much better
 467                         * to just write CCERRCLR with CCERR value...
 468                         */
 469                        for (i = 0; i < 8; i++) {
 470                                if (edma_read(ctlr, EDMA_CCERR) & (1 << i)) {
 471                                        /* Clear the corresponding IPR bits */
 472                                        edma_write(ctlr, EDMA_CCERRCLR, 1 << i);
 473
 474                                        /* NOTE:  not reported!! */
 475                                }
 476                        }
 477                }
 478                if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0)
 479                    && (edma_read_array(ctlr, EDMA_EMR, 1) == 0)
 480                    && (edma_read(ctlr, EDMA_QEMR) == 0)
 481                    && (edma_read(ctlr, EDMA_CCERR) == 0)) {
 482                        break;
 483                }
 484                cnt++;
 485                if (cnt > 10)
 486                        break;
 487        }
 488        edma_write(ctlr, EDMA_EEVAL, 1);
 489        return IRQ_HANDLED;
 490}
 491
 492/******************************************************************************
 493 *
 494 * Transfer controller error interrupt handlers
 495 *
 496 *****************************************************************************/
 497
 498#define tc_errs_handled false   /* disabled as long as they're NOPs */
 499
 500static irqreturn_t dma_tc0err_handler(int irq, void *data)
 501{
 502        dev_dbg(data, "dma_tc0err_handler\n");
 503        return IRQ_HANDLED;
 504}
 505
 506static irqreturn_t dma_tc1err_handler(int irq, void *data)
 507{
 508        dev_dbg(data, "dma_tc1err_handler\n");
 509        return IRQ_HANDLED;
 510}
 511
 512static int reserve_contiguous_params(int ctlr, unsigned int id,
 513                                     unsigned int num_params,
 514                                     unsigned int start_param)
 515{
 516        int i, j;
 517        unsigned int count = num_params;
 518
 519        for (i = start_param; i < edma_info[ctlr]->num_slots; ++i) {
 520                j = EDMA_CHAN_SLOT(i);
 521                if (!test_and_set_bit(j, edma_info[ctlr]->edma_inuse))
 522                        count--;
 523                        if (count == 0)
 524                                break;
 525                else if (id == EDMA_CONT_PARAMS_FIXED_EXACT)
 526                        break;
 527                else
 528                        count = num_params;
 529        }
 530
 531        /*
 532         * We have to clear any bits that we set
 533         * if we run out parameter RAMs, i.e we do find a set
 534         * of contiguous parameter RAMs but do not find the exact number
 535         * requested as we may reach the total number of parameter RAMs
 536         */
 537        if (count) {
 538                for (j = i - num_params + count + 1; j <= i ; ++j)
 539                        clear_bit(j, edma_info[ctlr]->edma_inuse);
 540
 541                return -EBUSY;
 542        }
 543
 544        for (j = i - num_params + 1; j <= i; ++j)
 545                memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(j),
 546                        &dummy_paramset, PARM_SIZE);
 547
 548        return EDMA_CTLR_CHAN(ctlr, i - num_params + 1);
 549}
 550
 551/*-----------------------------------------------------------------------*/
 552
 553/* Resource alloc/free:  dma channels, parameter RAM slots */
 554
 555/**
 556 * edma_alloc_channel - allocate DMA channel and paired parameter RAM
 557 * @channel: specific channel to allocate; negative for "any unmapped channel"
 558 * @callback: optional; to be issued on DMA completion or errors
 559 * @data: passed to callback
 560 * @eventq_no: an EVENTQ_* constant, used to choose which Transfer
 561 *      Controller (TC) executes requests using this channel.  Use
 562 *      EVENTQ_DEFAULT unless you really need a high priority queue.
 563 *
 564 * This allocates a DMA channel and its associated parameter RAM slot.
 565 * The parameter RAM is initialized to hold a dummy transfer.
 566 *
 567 * Normal use is to pass a specific channel number as @channel, to make
 568 * use of hardware events mapped to that channel.  When the channel will
 569 * be used only for software triggering or event chaining, channels not
 570 * mapped to hardware events (or mapped to unused events) are preferable.
 571 *
 572 * DMA transfers start from a channel using edma_start(), or by
 573 * chaining.  When the transfer described in that channel's parameter RAM
 574 * slot completes, that slot's data may be reloaded through a link.
 575 *
 576 * DMA errors are only reported to the @callback associated with the
 577 * channel driving that transfer, but transfer completion callbacks can
 578 * be sent to another channel under control of the TCC field in
 579 * the option word of the transfer's parameter RAM set.  Drivers must not
 580 * use DMA transfer completion callbacks for channels they did not allocate.
 581 * (The same applies to TCC codes used in transfer chaining.)
 582 *
 583 * Returns the number of the channel, else negative errno.
 584 */
 585int edma_alloc_channel(int channel,
 586                void (*callback)(unsigned channel, u16 ch_status, void *data),
 587                void *data,
 588                enum dma_event_q eventq_no)
 589{
 590        unsigned i, done, ctlr = 0;
 591
 592        if (channel >= 0) {
 593                ctlr = EDMA_CTLR(channel);
 594                channel = EDMA_CHAN_SLOT(channel);
 595        }
 596
 597        if (channel < 0) {
 598                for (i = 0; i < EDMA_MAX_CC; i++) {
 599                        channel = 0;
 600                        for (;;) {
 601                                channel = find_next_bit(edma_info[i]->
 602                                                edma_noevent,
 603                                                edma_info[i]->num_channels,
 604                                                channel);
 605                                if (channel == edma_info[i]->num_channels)
 606                                        return -ENOMEM;
 607                                if (!test_and_set_bit(channel,
 608                                                edma_info[i]->edma_inuse)) {
 609                                        done = 1;
 610                                        ctlr = i;
 611                                        break;
 612                                }
 613                                channel++;
 614                        }
 615                        if (done)
 616                                break;
 617                }
 618        } else if (channel >= edma_info[ctlr]->num_channels) {
 619                return -EINVAL;
 620        } else if (test_and_set_bit(channel, edma_info[ctlr]->edma_inuse)) {
 621                return -EBUSY;
 622        }
 623
 624        /* ensure access through shadow region 0 */
 625        edma_or_array2(ctlr, EDMA_DRAE, 0, channel >> 5, 1 << (channel & 0x1f));
 626
 627        /* ensure no events are pending */
 628        edma_stop(EDMA_CTLR_CHAN(ctlr, channel));
 629        memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
 630                        &dummy_paramset, PARM_SIZE);
 631
 632        if (callback)
 633                setup_dma_interrupt(EDMA_CTLR_CHAN(ctlr, channel),
 634                                        callback, data);
 635
 636        map_dmach_queue(ctlr, channel, eventq_no);
 637
 638        return channel;
 639}
 640EXPORT_SYMBOL(edma_alloc_channel);
 641
 642
 643/**
 644 * edma_free_channel - deallocate DMA channel
 645 * @channel: dma channel returned from edma_alloc_channel()
 646 *
 647 * This deallocates the DMA channel and associated parameter RAM slot
 648 * allocated by edma_alloc_channel().
 649 *
 650 * Callers are responsible for ensuring the channel is inactive, and
 651 * will not be reactivated by linking, chaining, or software calls to
 652 * edma_start().
 653 */
 654void edma_free_channel(unsigned channel)
 655{
 656        unsigned ctlr;
 657
 658        ctlr = EDMA_CTLR(channel);
 659        channel = EDMA_CHAN_SLOT(channel);
 660
 661        if (channel >= edma_info[ctlr]->num_channels)
 662                return;
 663
 664        setup_dma_interrupt(channel, NULL, NULL);
 665        /* REVISIT should probably take out of shadow region 0 */
 666
 667        memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
 668                        &dummy_paramset, PARM_SIZE);
 669        clear_bit(channel, edma_info[ctlr]->edma_inuse);
 670}
 671EXPORT_SYMBOL(edma_free_channel);
 672
 673/**
 674 * edma_alloc_slot - allocate DMA parameter RAM
 675 * @slot: specific slot to allocate; negative for "any unused slot"
 676 *
 677 * This allocates a parameter RAM slot, initializing it to hold a
 678 * dummy transfer.  Slots allocated using this routine have not been
 679 * mapped to a hardware DMA channel, and will normally be used by
 680 * linking to them from a slot associated with a DMA channel.
 681 *
 682 * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
 683 * slots may be allocated on behalf of DSP firmware.
 684 *
 685 * Returns the number of the slot, else negative errno.
 686 */
 687int edma_alloc_slot(unsigned ctlr, int slot)
 688{
 689        if (slot >= 0)
 690                slot = EDMA_CHAN_SLOT(slot);
 691
 692        if (slot < 0) {
 693                slot = edma_info[ctlr]->num_channels;
 694                for (;;) {
 695                        slot = find_next_zero_bit(edma_info[ctlr]->edma_inuse,
 696                                        edma_info[ctlr]->num_slots, slot);
 697                        if (slot == edma_info[ctlr]->num_slots)
 698                                return -ENOMEM;
 699                        if (!test_and_set_bit(slot,
 700                                                edma_info[ctlr]->edma_inuse))
 701                                break;
 702                }
 703        } else if (slot < edma_info[ctlr]->num_channels ||
 704                        slot >= edma_info[ctlr]->num_slots) {
 705                return -EINVAL;
 706        } else if (test_and_set_bit(slot, edma_info[ctlr]->edma_inuse)) {
 707                return -EBUSY;
 708        }
 709
 710        memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
 711                        &dummy_paramset, PARM_SIZE);
 712
 713        return EDMA_CTLR_CHAN(ctlr, slot);
 714}
 715EXPORT_SYMBOL(edma_alloc_slot);
 716
 717/**
 718 * edma_free_slot - deallocate DMA parameter RAM
 719 * @slot: parameter RAM slot returned from edma_alloc_slot()
 720 *
 721 * This deallocates the parameter RAM slot allocated by edma_alloc_slot().
 722 * Callers are responsible for ensuring the slot is inactive, and will
 723 * not be activated.
 724 */
 725void edma_free_slot(unsigned slot)
 726{
 727        unsigned ctlr;
 728
 729        ctlr = EDMA_CTLR(slot);
 730        slot = EDMA_CHAN_SLOT(slot);
 731
 732        if (slot < edma_info[ctlr]->num_channels ||
 733                slot >= edma_info[ctlr]->num_slots)
 734                return;
 735
 736        memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
 737                        &dummy_paramset, PARM_SIZE);
 738        clear_bit(slot, edma_info[ctlr]->edma_inuse);
 739}
 740EXPORT_SYMBOL(edma_free_slot);
 741
 742
 743/**
 744 * edma_alloc_cont_slots- alloc contiguous parameter RAM slots
 745 * The API will return the starting point of a set of
 746 * contiguous PARAM's that have been requested
 747 *
 748 * @id: can only be EDMA_CONT_PARAMS_ANY or EDMA_CONT_PARAMS_FIXED_EXACT
 749 * or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
 750 * @count: number of contiguous Paramter RAM's
 751 * @param  - the start value of Parameter RAM that should be passed if id
 752 * is EDMA_CONT_PARAMS_FIXED_EXACT or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
 753 *
 754 * If id is EDMA_CONT_PARAMS_ANY then the API starts looking for a set of
 755 * contiguous Parameter RAMs from parameter RAM 64 in the case of DaVinci SOCs
 756 * and 32 in the case of Primus
 757 *
 758 * If id is EDMA_CONT_PARAMS_FIXED_EXACT then the API starts looking for a
 759 * set of contiguous parameter RAMs from the "param" that is passed as an
 760 * argument to the API.
 761 *
 762 * If id is EDMA_CONT_PARAMS_FIXED_NOT_EXACT then the API initially tries
 763 * starts looking for a set of contiguous parameter RAMs from the "param"
 764 * that is passed as an argument to the API. On failure the API will try to
 765 * find a set of contiguous Parameter RAMs in the remaining Parameter RAMs
 766 */
 767int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count)
 768{
 769        /*
 770         * The start slot requested should be greater than
 771         * the number of channels and lesser than the total number
 772         * of slots
 773         */
 774        if (slot < edma_info[ctlr]->num_channels ||
 775                slot >= edma_info[ctlr]->num_slots)
 776                return -EINVAL;
 777
 778        /*
 779         * The number of parameter RAMs requested cannot be less than 1
 780         * and cannot be more than the number of slots minus the number of
 781         * channels
 782         */
 783        if (count < 1 || count >
 784                (edma_info[ctlr]->num_slots - edma_info[ctlr]->num_channels))
 785                return -EINVAL;
 786
 787        switch (id) {
 788        case EDMA_CONT_PARAMS_ANY:
 789                return reserve_contiguous_params(ctlr, id, count,
 790                                                 edma_info[ctlr]->num_channels);
 791        case EDMA_CONT_PARAMS_FIXED_EXACT:
 792        case EDMA_CONT_PARAMS_FIXED_NOT_EXACT:
 793                return reserve_contiguous_params(ctlr, id, count, slot);
 794        default:
 795                return -EINVAL;
 796        }
 797
 798}
 799EXPORT_SYMBOL(edma_alloc_cont_slots);
 800
 801/**
 802 * edma_free_cont_slots - deallocate DMA parameter RAMs
 803 * @slot: first parameter RAM of a set of parameter RAMs to be freed
 804 * @count: the number of contiguous parameter RAMs to be freed
 805 *
 806 * This deallocates the parameter RAM slots allocated by
 807 * edma_alloc_cont_slots.
 808 * Callers/applications need to keep track of sets of contiguous
 809 * parameter RAMs that have been allocated using the edma_alloc_cont_slots
 810 * API.
 811 * Callers are responsible for ensuring the slots are inactive, and will
 812 * not be activated.
 813 */
 814int edma_free_cont_slots(unsigned slot, int count)
 815{
 816        unsigned ctlr;
 817        int i;
 818
 819        ctlr = EDMA_CTLR(slot);
 820        slot = EDMA_CHAN_SLOT(slot);
 821
 822        if (slot < edma_info[ctlr]->num_channels ||
 823                slot >= edma_info[ctlr]->num_slots ||
 824                count < 1)
 825                return -EINVAL;
 826
 827        for (i = slot; i < slot + count; ++i) {
 828                ctlr = EDMA_CTLR(i);
 829                slot = EDMA_CHAN_SLOT(i);
 830
 831                memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
 832                        &dummy_paramset, PARM_SIZE);
 833                clear_bit(slot, edma_info[ctlr]->edma_inuse);
 834        }
 835
 836        return 0;
 837}
 838EXPORT_SYMBOL(edma_free_cont_slots);
 839
 840/*-----------------------------------------------------------------------*/
 841
 842/* Parameter RAM operations (i) -- read/write partial slots */
 843
 844/**
 845 * edma_set_src - set initial DMA source address in parameter RAM slot
 846 * @slot: parameter RAM slot being configured
 847 * @src_port: physical address of source (memory, controller FIFO, etc)
 848 * @addressMode: INCR, except in very rare cases
 849 * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
 850 *      width to use when addressing the fifo (e.g. W8BIT, W32BIT)
 851 *
 852 * Note that the source address is modified during the DMA transfer
 853 * according to edma_set_src_index().
 854 */
 855void edma_set_src(unsigned slot, dma_addr_t src_port,
 856                                enum address_mode mode, enum fifo_width width)
 857{
 858        unsigned ctlr;
 859
 860        ctlr = EDMA_CTLR(slot);
 861        slot = EDMA_CHAN_SLOT(slot);
 862
 863        if (slot < edma_info[ctlr]->num_slots) {
 864                unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
 865
 866                if (mode) {
 867                        /* set SAM and program FWID */
 868                        i = (i & ~(EDMA_FWID)) | (SAM | ((width & 0x7) << 8));
 869                } else {
 870                        /* clear SAM */
 871                        i &= ~SAM;
 872                }
 873                edma_parm_write(ctlr, PARM_OPT, slot, i);
 874
 875                /* set the source port address
 876                   in source register of param structure */
 877                edma_parm_write(ctlr, PARM_SRC, slot, src_port);
 878        }
 879}
 880EXPORT_SYMBOL(edma_set_src);
 881
 882/**
 883 * edma_set_dest - set initial DMA destination address in parameter RAM slot
 884 * @slot: parameter RAM slot being configured
 885 * @dest_port: physical address of destination (memory, controller FIFO, etc)
 886 * @addressMode: INCR, except in very rare cases
 887 * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
 888 *      width to use when addressing the fifo (e.g. W8BIT, W32BIT)
 889 *
 890 * Note that the destination address is modified during the DMA transfer
 891 * according to edma_set_dest_index().
 892 */
 893void edma_set_dest(unsigned slot, dma_addr_t dest_port,
 894                                 enum address_mode mode, enum fifo_width width)
 895{
 896        unsigned ctlr;
 897
 898        ctlr = EDMA_CTLR(slot);
 899        slot = EDMA_CHAN_SLOT(slot);
 900
 901        if (slot < edma_info[ctlr]->num_slots) {
 902                unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
 903
 904                if (mode) {
 905                        /* set DAM and program FWID */
 906                        i = (i & ~(EDMA_FWID)) | (DAM | ((width & 0x7) << 8));
 907                } else {
 908                        /* clear DAM */
 909                        i &= ~DAM;
 910                }
 911                edma_parm_write(ctlr, PARM_OPT, slot, i);
 912                /* set the destination port address
 913                   in dest register of param structure */
 914                edma_parm_write(ctlr, PARM_DST, slot, dest_port);
 915        }
 916}
 917EXPORT_SYMBOL(edma_set_dest);
 918
 919/**
 920 * edma_get_position - returns the current transfer points
 921 * @slot: parameter RAM slot being examined
 922 * @src: pointer to source port position
 923 * @dst: pointer to destination port position
 924 *
 925 * Returns current source and destination addresses for a particular
 926 * parameter RAM slot.  Its channel should not be active when this is called.
 927 */
 928void edma_get_position(unsigned slot, dma_addr_t *src, dma_addr_t *dst)
 929{
 930        struct edmacc_param temp;
 931        unsigned ctlr;
 932
 933        ctlr = EDMA_CTLR(slot);
 934        slot = EDMA_CHAN_SLOT(slot);
 935
 936        edma_read_slot(EDMA_CTLR_CHAN(ctlr, slot), &temp);
 937        if (src != NULL)
 938                *src = temp.src;
 939        if (dst != NULL)
 940                *dst = temp.dst;
 941}
 942EXPORT_SYMBOL(edma_get_position);
 943
 944/**
 945 * edma_set_src_index - configure DMA source address indexing
 946 * @slot: parameter RAM slot being configured
 947 * @src_bidx: byte offset between source arrays in a frame
 948 * @src_cidx: byte offset between source frames in a block
 949 *
 950 * Offsets are specified to support either contiguous or discontiguous
 951 * memory transfers, or repeated access to a hardware register, as needed.
 952 * When accessing hardware registers, both offsets are normally zero.
 953 */
 954void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx)
 955{
 956        unsigned ctlr;
 957
 958        ctlr = EDMA_CTLR(slot);
 959        slot = EDMA_CHAN_SLOT(slot);
 960
 961        if (slot < edma_info[ctlr]->num_slots) {
 962                edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
 963                                0xffff0000, src_bidx);
 964                edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
 965                                0xffff0000, src_cidx);
 966        }
 967}
 968EXPORT_SYMBOL(edma_set_src_index);
 969
 970/**
 971 * edma_set_dest_index - configure DMA destination address indexing
 972 * @slot: parameter RAM slot being configured
 973 * @dest_bidx: byte offset between destination arrays in a frame
 974 * @dest_cidx: byte offset between destination frames in a block
 975 *
 976 * Offsets are specified to support either contiguous or discontiguous
 977 * memory transfers, or repeated access to a hardware register, as needed.
 978 * When accessing hardware registers, both offsets are normally zero.
 979 */
 980void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx)
 981{
 982        unsigned ctlr;
 983
 984        ctlr = EDMA_CTLR(slot);
 985        slot = EDMA_CHAN_SLOT(slot);
 986
 987        if (slot < edma_info[ctlr]->num_slots) {
 988                edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
 989                                0x0000ffff, dest_bidx << 16);
 990                edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
 991                                0x0000ffff, dest_cidx << 16);
 992        }
 993}
 994EXPORT_SYMBOL(edma_set_dest_index);
 995
 996/**
 997 * edma_set_transfer_params - configure DMA transfer parameters
 998 * @slot: parameter RAM slot being configured
 999 * @acnt: how many bytes per array (at least one)
1000 * @bcnt: how many arrays per frame (at least one)
1001 * @ccnt: how many frames per block (at least one)
1002 * @bcnt_rld: used only for A-Synchronized transfers; this specifies
1003 *      the value to reload into bcnt when it decrements to zero
1004 * @sync_mode: ASYNC or ABSYNC
1005 *
1006 * See the EDMA3 documentation to understand how to configure and link
1007 * transfers using the fields in PaRAM slots.  If you are not doing it
1008 * all at once with edma_write_slot(), you will use this routine
1009 * plus two calls each for source and destination, setting the initial
1010 * address and saying how to index that address.
1011 *
1012 * An example of an A-Synchronized transfer is a serial link using a
1013 * single word shift register.  In that case, @acnt would be equal to
1014 * that word size; the serial controller issues a DMA synchronization
1015 * event to transfer each word, and memory access by the DMA transfer
1016 * controller will be word-at-a-time.
1017 *
1018 * An example of an AB-Synchronized transfer is a device using a FIFO.
1019 * In that case, @acnt equals the FIFO width and @bcnt equals its depth.
1020 * The controller with the FIFO issues DMA synchronization events when
1021 * the FIFO threshold is reached, and the DMA transfer controller will
1022 * transfer one frame to (or from) the FIFO.  It will probably use
1023 * efficient burst modes to access memory.
1024 */
1025void edma_set_transfer_params(unsigned slot,
1026                u16 acnt, u16 bcnt, u16 ccnt,
1027                u16 bcnt_rld, enum sync_dimension sync_mode)
1028{
1029        unsigned ctlr;
1030
1031        ctlr = EDMA_CTLR(slot);
1032        slot = EDMA_CHAN_SLOT(slot);
1033
1034        if (slot < edma_info[ctlr]->num_slots) {
1035                edma_parm_modify(ctlr, PARM_LINK_BCNTRLD, slot,
1036                                0x0000ffff, bcnt_rld << 16);
1037                if (sync_mode == ASYNC)
1038                        edma_parm_and(ctlr, PARM_OPT, slot, ~SYNCDIM);
1039                else
1040                        edma_parm_or(ctlr, PARM_OPT, slot, SYNCDIM);
1041                /* Set the acount, bcount, ccount registers */
1042                edma_parm_write(ctlr, PARM_A_B_CNT, slot, (bcnt << 16) | acnt);
1043                edma_parm_write(ctlr, PARM_CCNT, slot, ccnt);
1044        }
1045}
1046EXPORT_SYMBOL(edma_set_transfer_params);
1047
1048/**
1049 * edma_link - link one parameter RAM slot to another
1050 * @from: parameter RAM slot originating the link
1051 * @to: parameter RAM slot which is the link target
1052 *
1053 * The originating slot should not be part of any active DMA transfer.
1054 */
1055void edma_link(unsigned from, unsigned to)
1056{
1057        unsigned ctlr_from, ctlr_to;
1058
1059        ctlr_from = EDMA_CTLR(from);
1060        from = EDMA_CHAN_SLOT(from);
1061        ctlr_to = EDMA_CTLR(to);
1062        to = EDMA_CHAN_SLOT(to);
1063
1064        if (from >= edma_info[ctlr_from]->num_slots)
1065                return;
1066        if (to >= edma_info[ctlr_to]->num_slots)
1067                return;
1068        edma_parm_modify(ctlr_from, PARM_LINK_BCNTRLD, from, 0xffff0000,
1069                                PARM_OFFSET(to));
1070}
1071EXPORT_SYMBOL(edma_link);
1072
1073/**
1074 * edma_unlink - cut link from one parameter RAM slot
1075 * @from: parameter RAM slot originating the link
1076 *
1077 * The originating slot should not be part of any active DMA transfer.
1078 * Its link is set to 0xffff.
1079 */
1080void edma_unlink(unsigned from)
1081{
1082        unsigned ctlr;
1083
1084        ctlr = EDMA_CTLR(from);
1085        from = EDMA_CHAN_SLOT(from);
1086
1087        if (from >= edma_info[ctlr]->num_slots)
1088                return;
1089        edma_parm_or(ctlr, PARM_LINK_BCNTRLD, from, 0xffff);
1090}
1091EXPORT_SYMBOL(edma_unlink);
1092
1093/*-----------------------------------------------------------------------*/
1094
1095/* Parameter RAM operations (ii) -- read/write whole parameter sets */
1096
1097/**
1098 * edma_write_slot - write parameter RAM data for slot
1099 * @slot: number of parameter RAM slot being modified
1100 * @param: data to be written into parameter RAM slot
1101 *
1102 * Use this to assign all parameters of a transfer at once.  This
1103 * allows more efficient setup of transfers than issuing multiple
1104 * calls to set up those parameters in small pieces, and provides
1105 * complete control over all transfer options.
1106 */
1107void edma_write_slot(unsigned slot, const struct edmacc_param *param)
1108{
1109        unsigned ctlr;
1110
1111        ctlr = EDMA_CTLR(slot);
1112        slot = EDMA_CHAN_SLOT(slot);
1113
1114        if (slot >= edma_info[ctlr]->num_slots)
1115                return;
1116        memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), param,
1117                        PARM_SIZE);
1118}
1119EXPORT_SYMBOL(edma_write_slot);
1120
1121/**
1122 * edma_read_slot - read parameter RAM data from slot
1123 * @slot: number of parameter RAM slot being copied
1124 * @param: where to store copy of parameter RAM data
1125 *
1126 * Use this to read data from a parameter RAM slot, perhaps to
1127 * save them as a template for later reuse.
1128 */
1129void edma_read_slot(unsigned slot, struct edmacc_param *param)
1130{
1131        unsigned ctlr;
1132
1133        ctlr = EDMA_CTLR(slot);
1134        slot = EDMA_CHAN_SLOT(slot);
1135
1136        if (slot >= edma_info[ctlr]->num_slots)
1137                return;
1138        memcpy_fromio(param, edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
1139                        PARM_SIZE);
1140}
1141EXPORT_SYMBOL(edma_read_slot);
1142
1143/*-----------------------------------------------------------------------*/
1144
1145/* Various EDMA channel control operations */
1146
1147/**
1148 * edma_pause - pause dma on a channel
1149 * @channel: on which edma_start() has been called
1150 *
1151 * This temporarily disables EDMA hardware events on the specified channel,
1152 * preventing them from triggering new transfers on its behalf
1153 */
1154void edma_pause(unsigned channel)
1155{
1156        unsigned ctlr;
1157
1158        ctlr = EDMA_CTLR(channel);
1159        channel = EDMA_CHAN_SLOT(channel);
1160
1161        if (channel < edma_info[ctlr]->num_channels) {
1162                unsigned int mask = (1 << (channel & 0x1f));
1163
1164                edma_shadow0_write_array(ctlr, SH_EECR, channel >> 5, mask);
1165        }
1166}
1167EXPORT_SYMBOL(edma_pause);
1168
1169/**
1170 * edma_resume - resumes dma on a paused channel
1171 * @channel: on which edma_pause() has been called
1172 *
1173 * This re-enables EDMA hardware events on the specified channel.
1174 */
1175void edma_resume(unsigned channel)
1176{
1177        unsigned ctlr;
1178
1179        ctlr = EDMA_CTLR(channel);
1180        channel = EDMA_CHAN_SLOT(channel);
1181
1182        if (channel < edma_info[ctlr]->num_channels) {
1183                unsigned int mask = (1 << (channel & 0x1f));
1184
1185                edma_shadow0_write_array(ctlr, SH_EESR, channel >> 5, mask);
1186        }
1187}
1188EXPORT_SYMBOL(edma_resume);
1189
1190/**
1191 * edma_start - start dma on a channel
1192 * @channel: channel being activated
1193 *
1194 * Channels with event associations will be triggered by their hardware
1195 * events, and channels without such associations will be triggered by
1196 * software.  (At this writing there is no interface for using software
1197 * triggers except with channels that don't support hardware triggers.)
1198 *
1199 * Returns zero on success, else negative errno.
1200 */
1201int edma_start(unsigned channel)
1202{
1203        unsigned ctlr;
1204
1205        ctlr = EDMA_CTLR(channel);
1206        channel = EDMA_CHAN_SLOT(channel);
1207
1208        if (channel < edma_info[ctlr]->num_channels) {
1209                int j = channel >> 5;
1210                unsigned int mask = (1 << (channel & 0x1f));
1211
1212                /* EDMA channels without event association */
1213                if (test_bit(channel, edma_info[ctlr]->edma_noevent)) {
1214                        pr_debug("EDMA: ESR%d %08x\n", j,
1215                                edma_shadow0_read_array(ctlr, SH_ESR, j));
1216                        edma_shadow0_write_array(ctlr, SH_ESR, j, mask);
1217                        return 0;
1218                }
1219
1220                /* EDMA channel with event association */
1221                pr_debug("EDMA: ER%d %08x\n", j,
1222                        edma_shadow0_read_array(ctlr, SH_ER, j));
1223                /* Clear any pending error */
1224                edma_write_array(ctlr, EDMA_EMCR, j, mask);
1225                /* Clear any SER */
1226                edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1227                edma_shadow0_write_array(ctlr, SH_EESR, j, mask);
1228                pr_debug("EDMA: EER%d %08x\n", j,
1229                        edma_shadow0_read_array(ctlr, SH_EER, j));
1230                return 0;
1231        }
1232
1233        return -EINVAL;
1234}
1235EXPORT_SYMBOL(edma_start);
1236
1237/**
1238 * edma_stop - stops dma on the channel passed
1239 * @channel: channel being deactivated
1240 *
1241 * When @lch is a channel, any active transfer is paused and
1242 * all pending hardware events are cleared.  The current transfer
1243 * may not be resumed, and the channel's Parameter RAM should be
1244 * reinitialized before being reused.
1245 */
1246void edma_stop(unsigned channel)
1247{
1248        unsigned ctlr;
1249
1250        ctlr = EDMA_CTLR(channel);
1251        channel = EDMA_CHAN_SLOT(channel);
1252
1253        if (channel < edma_info[ctlr]->num_channels) {
1254                int j = channel >> 5;
1255                unsigned int mask = (1 << (channel & 0x1f));
1256
1257                edma_shadow0_write_array(ctlr, SH_EECR, j, mask);
1258                edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
1259                edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1260                edma_write_array(ctlr, EDMA_EMCR, j, mask);
1261
1262                pr_debug("EDMA: EER%d %08x\n", j,
1263                                edma_shadow0_read_array(ctlr, SH_EER, j));
1264
1265                /* REVISIT:  consider guarding against inappropriate event
1266                 * chaining by overwriting with dummy_paramset.
1267                 */
1268        }
1269}
1270EXPORT_SYMBOL(edma_stop);
1271
1272/******************************************************************************
1273 *
1274 * It cleans ParamEntry qand bring back EDMA to initial state if media has
1275 * been removed before EDMA has finished.It is usedful for removable media.
1276 * Arguments:
1277 *      ch_no     - channel no
1278 *
1279 * Return: zero on success, or corresponding error no on failure
1280 *
1281 * FIXME this should not be needed ... edma_stop() should suffice.
1282 *
1283 *****************************************************************************/
1284
1285void edma_clean_channel(unsigned channel)
1286{
1287        unsigned ctlr;
1288
1289        ctlr = EDMA_CTLR(channel);
1290        channel = EDMA_CHAN_SLOT(channel);
1291
1292        if (channel < edma_info[ctlr]->num_channels) {
1293                int j = (channel >> 5);
1294                unsigned int mask = 1 << (channel & 0x1f);
1295
1296                pr_debug("EDMA: EMR%d %08x\n", j,
1297                                edma_read_array(ctlr, EDMA_EMR, j));
1298                edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
1299                /* Clear the corresponding EMR bits */
1300                edma_write_array(ctlr, EDMA_EMCR, j, mask);
1301                /* Clear any SER */
1302                edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1303                edma_write(ctlr, EDMA_CCERRCLR, (1 << 16) | 0x3);
1304        }
1305}
1306EXPORT_SYMBOL(edma_clean_channel);
1307
1308/*
1309 * edma_clear_event - clear an outstanding event on the DMA channel
1310 * Arguments:
1311 *      channel - channel number
1312 */
1313void edma_clear_event(unsigned channel)
1314{
1315        unsigned ctlr;
1316
1317        ctlr = EDMA_CTLR(channel);
1318        channel = EDMA_CHAN_SLOT(channel);
1319
1320        if (channel >= edma_info[ctlr]->num_channels)
1321                return;
1322        if (channel < 32)
1323                edma_write(ctlr, EDMA_ECR, 1 << channel);
1324        else
1325                edma_write(ctlr, EDMA_ECRH, 1 << (channel - 32));
1326}
1327EXPORT_SYMBOL(edma_clear_event);
1328
1329/*-----------------------------------------------------------------------*/
1330
1331static int __init edma_probe(struct platform_device *pdev)
1332{
1333        struct edma_soc_info    *info = pdev->dev.platform_data;
1334        const s8                (*queue_priority_mapping)[2];
1335        const s8                (*queue_tc_mapping)[2];
1336        int                     i, j, found = 0;
1337        int                     status = -1;
1338        const s8                *noevent;
1339        int                     irq[EDMA_MAX_CC] = {0, 0};
1340        int                     err_irq[EDMA_MAX_CC] = {0, 0};
1341        struct resource         *r[EDMA_MAX_CC] = {NULL};
1342        resource_size_t         len[EDMA_MAX_CC];
1343        char                    res_name[10];
1344        char                    irq_name[10];
1345
1346        if (!info)
1347                return -ENODEV;
1348
1349        for (j = 0; j < EDMA_MAX_CC; j++) {
1350                sprintf(res_name, "edma_cc%d", j);
1351                r[j] = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1352                                                res_name);
1353                if (!r[j]) {
1354                        if (found)
1355                                break;
1356                        else
1357                                return -ENODEV;
1358                } else
1359                        found = 1;
1360
1361                len[j] = resource_size(r[j]);
1362
1363                r[j] = request_mem_region(r[j]->start, len[j],
1364                        dev_name(&pdev->dev));
1365                if (!r[j]) {
1366                        status = -EBUSY;
1367                        goto fail1;
1368                }
1369
1370                edmacc_regs_base[j] = ioremap(r[j]->start, len[j]);
1371                if (!edmacc_regs_base[j]) {
1372                        status = -EBUSY;
1373                        goto fail1;
1374                }
1375
1376                edma_info[j] = kmalloc(sizeof(struct edma), GFP_KERNEL);
1377                if (!edma_info[j]) {
1378                        status = -ENOMEM;
1379                        goto fail1;
1380                }
1381                memset(edma_info[j], 0, sizeof(struct edma));
1382
1383                edma_info[j]->num_channels = min_t(unsigned, info[j].n_channel,
1384                                                        EDMA_MAX_DMACH);
1385                edma_info[j]->num_slots = min_t(unsigned, info[j].n_slot,
1386                                                        EDMA_MAX_PARAMENTRY);
1387                edma_info[j]->num_cc = min_t(unsigned, info[j].n_cc,
1388                                                        EDMA_MAX_CC);
1389
1390                edma_info[j]->default_queue = info[j].default_queue;
1391                if (!edma_info[j]->default_queue)
1392                        edma_info[j]->default_queue = EVENTQ_1;
1393
1394                dev_dbg(&pdev->dev, "DMA REG BASE ADDR=%p\n",
1395                        edmacc_regs_base[j]);
1396
1397                for (i = 0; i < edma_info[j]->num_slots; i++)
1398                        memcpy_toio(edmacc_regs_base[j] + PARM_OFFSET(i),
1399                                        &dummy_paramset, PARM_SIZE);
1400
1401                noevent = info[j].noevent;
1402                if (noevent) {
1403                        while (*noevent != -1)
1404                                set_bit(*noevent++, edma_info[j]->edma_noevent);
1405                }
1406
1407                sprintf(irq_name, "edma%d", j);
1408                irq[j] = platform_get_irq_byname(pdev, irq_name);
1409                edma_info[j]->irq_res_start = irq[j];
1410                status = request_irq(irq[j], dma_irq_handler, 0, "edma",
1411                                        &pdev->dev);
1412                if (status < 0) {
1413                        dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
1414                                irq[j], status);
1415                        goto fail;
1416                }
1417
1418                sprintf(irq_name, "edma%d_err", j);
1419                err_irq[j] = platform_get_irq_byname(pdev, irq_name);
1420                edma_info[j]->irq_res_end = err_irq[j];
1421                status = request_irq(err_irq[j], dma_ccerr_handler, 0,
1422                                        "edma_error", &pdev->dev);
1423                if (status < 0) {
1424                        dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
1425                                err_irq[j], status);
1426                        goto fail;
1427                }
1428
1429                /* Everything lives on transfer controller 1 until otherwise
1430                 * specified. This way, long transfers on the low priority queue
1431                 * started by the codec engine will not cause audio defects.
1432                 */
1433                for (i = 0; i < edma_info[j]->num_channels; i++)
1434                        map_dmach_queue(j, i, EVENTQ_1);
1435
1436                queue_tc_mapping = info[j].queue_tc_mapping;
1437                queue_priority_mapping = info[j].queue_priority_mapping;
1438
1439                /* Event queue to TC mapping */
1440                for (i = 0; queue_tc_mapping[i][0] != -1; i++)
1441                        map_queue_tc(j, queue_tc_mapping[i][0],
1442                                        queue_tc_mapping[i][1]);
1443
1444                /* Event queue priority mapping */
1445                for (i = 0; queue_priority_mapping[i][0] != -1; i++)
1446                        assign_priority_to_queue(j,
1447                                                queue_priority_mapping[i][0],
1448                                                queue_priority_mapping[i][1]);
1449
1450                /* Map the channel to param entry if channel mapping logic
1451                 * exist
1452                 */
1453                if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST)
1454                        map_dmach_param(j);
1455
1456                for (i = 0; i < info[j].n_region; i++) {
1457                        edma_write_array2(j, EDMA_DRAE, i, 0, 0x0);
1458                        edma_write_array2(j, EDMA_DRAE, i, 1, 0x0);
1459                        edma_write_array(j, EDMA_QRAE, i, 0x0);
1460                }
1461        }
1462
1463        if (tc_errs_handled) {
1464                status = request_irq(IRQ_TCERRINT0, dma_tc0err_handler, 0,
1465                                        "edma_tc0", &pdev->dev);
1466                if (status < 0) {
1467                        dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
1468                                IRQ_TCERRINT0, status);
1469                        return status;
1470                }
1471                status = request_irq(IRQ_TCERRINT, dma_tc1err_handler, 0,
1472                                        "edma_tc1", &pdev->dev);
1473                if (status < 0) {
1474                        dev_dbg(&pdev->dev, "request_irq %d --> %d\n",
1475                                IRQ_TCERRINT, status);
1476                        return status;
1477                }
1478        }
1479
1480        return 0;
1481
1482fail:
1483        for (i = 0; i < EDMA_MAX_CC; i++) {
1484                if (err_irq[i])
1485                        free_irq(err_irq[i], &pdev->dev);
1486                if (irq[i])
1487                        free_irq(irq[i], &pdev->dev);
1488        }
1489fail1:
1490        for (i = 0; i < EDMA_MAX_CC; i++) {
1491                if (r[i])
1492                        release_mem_region(r[i]->start, len[i]);
1493                if (edmacc_regs_base[i])
1494                        iounmap(edmacc_regs_base[i]);
1495                kfree(edma_info[i]);
1496        }
1497        return status;
1498}
1499
1500
1501static struct platform_driver edma_driver = {
1502        .driver.name    = "edma",
1503};
1504
1505static int __init edma_init(void)
1506{
1507        return platform_driver_probe(&edma_driver, edma_probe);
1508}
1509arch_initcall(edma_init);
1510
1511