linux/drivers/dma/ep93xx_dma.c
<<
>>
Prefs
   1/*
   2 * Driver for the Cirrus Logic EP93xx DMA Controller
   3 *
   4 * Copyright (C) 2011 Mika Westerberg
   5 *
   6 * DMA M2P implementation is based on the original
   7 * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
   8 *
   9 *   Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
  10 *   Copyright (C) 2006 Applied Data Systems
  11 *   Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
  12 *
  13 * This driver is based on dw_dmac and amba-pl08x drivers.
  14 *
  15 * This program is free software; you can redistribute it and/or modify
  16 * it under the terms of the GNU General Public License as published by
  17 * the Free Software Foundation; either version 2 of the License, or
  18 * (at your option) any later version.
  19 */
  20
  21#include <linux/clk.h>
  22#include <linux/init.h>
  23#include <linux/interrupt.h>
  24#include <linux/dmaengine.h>
  25#include <linux/module.h>
  26#include <linux/platform_device.h>
  27#include <linux/slab.h>
  28
  29#include <mach/dma.h>
  30
  31#include "dmaengine.h"
  32
  33/* M2P registers */
  34#define M2P_CONTROL                     0x0000
  35#define M2P_CONTROL_STALLINT            BIT(0)
  36#define M2P_CONTROL_NFBINT              BIT(1)
  37#define M2P_CONTROL_CH_ERROR_INT        BIT(3)
  38#define M2P_CONTROL_ENABLE              BIT(4)
  39#define M2P_CONTROL_ICE                 BIT(6)
  40
  41#define M2P_INTERRUPT                   0x0004
  42#define M2P_INTERRUPT_STALL             BIT(0)
  43#define M2P_INTERRUPT_NFB               BIT(1)
  44#define M2P_INTERRUPT_ERROR             BIT(3)
  45
  46#define M2P_PPALLOC                     0x0008
  47#define M2P_STATUS                      0x000c
  48
  49#define M2P_MAXCNT0                     0x0020
  50#define M2P_BASE0                       0x0024
  51#define M2P_MAXCNT1                     0x0030
  52#define M2P_BASE1                       0x0034
  53
  54#define M2P_STATE_IDLE                  0
  55#define M2P_STATE_STALL                 1
  56#define M2P_STATE_ON                    2
  57#define M2P_STATE_NEXT                  3
  58
  59/* M2M registers */
  60#define M2M_CONTROL                     0x0000
  61#define M2M_CONTROL_DONEINT             BIT(2)
  62#define M2M_CONTROL_ENABLE              BIT(3)
  63#define M2M_CONTROL_START               BIT(4)
  64#define M2M_CONTROL_DAH                 BIT(11)
  65#define M2M_CONTROL_SAH                 BIT(12)
  66#define M2M_CONTROL_PW_SHIFT            9
  67#define M2M_CONTROL_PW_8                (0 << M2M_CONTROL_PW_SHIFT)
  68#define M2M_CONTROL_PW_16               (1 << M2M_CONTROL_PW_SHIFT)
  69#define M2M_CONTROL_PW_32               (2 << M2M_CONTROL_PW_SHIFT)
  70#define M2M_CONTROL_PW_MASK             (3 << M2M_CONTROL_PW_SHIFT)
  71#define M2M_CONTROL_TM_SHIFT            13
  72#define M2M_CONTROL_TM_TX               (1 << M2M_CONTROL_TM_SHIFT)
  73#define M2M_CONTROL_TM_RX               (2 << M2M_CONTROL_TM_SHIFT)
  74#define M2M_CONTROL_RSS_SHIFT           22
  75#define M2M_CONTROL_RSS_SSPRX           (1 << M2M_CONTROL_RSS_SHIFT)
  76#define M2M_CONTROL_RSS_SSPTX           (2 << M2M_CONTROL_RSS_SHIFT)
  77#define M2M_CONTROL_RSS_IDE             (3 << M2M_CONTROL_RSS_SHIFT)
  78#define M2M_CONTROL_NO_HDSK             BIT(24)
  79#define M2M_CONTROL_PWSC_SHIFT          25
  80
  81#define M2M_INTERRUPT                   0x0004
  82#define M2M_INTERRUPT_DONEINT           BIT(1)
  83
  84#define M2M_BCR0                        0x0010
  85#define M2M_BCR1                        0x0014
  86#define M2M_SAR_BASE0                   0x0018
  87#define M2M_SAR_BASE1                   0x001c
  88#define M2M_DAR_BASE0                   0x002c
  89#define M2M_DAR_BASE1                   0x0030
  90
  91#define DMA_MAX_CHAN_BYTES              0xffff
  92#define DMA_MAX_CHAN_DESCRIPTORS        32
  93
  94struct ep93xx_dma_engine;
  95
  96/**
  97 * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
  98 * @src_addr: source address of the transaction
  99 * @dst_addr: destination address of the transaction
 100 * @size: size of the transaction (in bytes)
 101 * @complete: this descriptor is completed
 102 * @txd: dmaengine API descriptor
 103 * @tx_list: list of linked descriptors
 104 * @node: link used for putting this into a channel queue
 105 */
 106struct ep93xx_dma_desc {
 107        u32                             src_addr;
 108        u32                             dst_addr;
 109        size_t                          size;
 110        bool                            complete;
 111        struct dma_async_tx_descriptor  txd;
 112        struct list_head                tx_list;
 113        struct list_head                node;
 114};
 115
 116/**
 117 * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
 118 * @chan: dmaengine API channel
 119 * @edma: pointer to to the engine device
 120 * @regs: memory mapped registers
 121 * @irq: interrupt number of the channel
 122 * @clk: clock used by this channel
 123 * @tasklet: channel specific tasklet used for callbacks
 124 * @lock: lock protecting the fields following
 125 * @flags: flags for the channel
 126 * @buffer: which buffer to use next (0/1)
 127 * @active: flattened chain of descriptors currently being processed
 128 * @queue: pending descriptors which are handled next
 129 * @free_list: list of free descriptors which can be used
 130 * @runtime_addr: physical address currently used as dest/src (M2M only). This
 131 *                is set via %DMA_SLAVE_CONFIG before slave operation is
 132 *                prepared
 133 * @runtime_ctrl: M2M runtime values for the control register.
 134 *
 135 * As EP93xx DMA controller doesn't support real chained DMA descriptors we
 136 * will have slightly different scheme here: @active points to a head of
 137 * flattened DMA descriptor chain.
 138 *
 139 * @queue holds pending transactions. These are linked through the first
 140 * descriptor in the chain. When a descriptor is moved to the @active queue,
 141 * the first and chained descriptors are flattened into a single list.
 142 *
 143 * @chan.private holds pointer to &struct ep93xx_dma_data which contains
 144 * necessary channel configuration information. For memcpy channels this must
 145 * be %NULL.
 146 */
 147struct ep93xx_dma_chan {
 148        struct dma_chan                 chan;
 149        const struct ep93xx_dma_engine  *edma;
 150        void __iomem                    *regs;
 151        int                             irq;
 152        struct clk                      *clk;
 153        struct tasklet_struct           tasklet;
 154        /* protects the fields following */
 155        spinlock_t                      lock;
 156        unsigned long                   flags;
 157/* Channel is configured for cyclic transfers */
 158#define EP93XX_DMA_IS_CYCLIC            0
 159
 160        int                             buffer;
 161        struct list_head                active;
 162        struct list_head                queue;
 163        struct list_head                free_list;
 164        u32                             runtime_addr;
 165        u32                             runtime_ctrl;
 166};
 167
 168/**
 169 * struct ep93xx_dma_engine - the EP93xx DMA engine instance
 170 * @dma_dev: holds the dmaengine device
 171 * @m2m: is this an M2M or M2P device
 172 * @hw_setup: method which sets the channel up for operation
 173 * @hw_shutdown: shuts the channel down and flushes whatever is left
 174 * @hw_submit: pushes active descriptor(s) to the hardware
 175 * @hw_interrupt: handle the interrupt
 176 * @num_channels: number of channels for this instance
 177 * @channels: array of channels
 178 *
 179 * There is one instance of this struct for the M2P channels and one for the
 180 * M2M channels. hw_xxx() methods are used to perform operations which are
 181 * different on M2M and M2P channels. These methods are called with channel
 182 * lock held and interrupts disabled so they cannot sleep.
 183 */
 184struct ep93xx_dma_engine {
 185        struct dma_device       dma_dev;
 186        bool                    m2m;
 187        int                     (*hw_setup)(struct ep93xx_dma_chan *);
 188        void                    (*hw_shutdown)(struct ep93xx_dma_chan *);
 189        void                    (*hw_submit)(struct ep93xx_dma_chan *);
 190        int                     (*hw_interrupt)(struct ep93xx_dma_chan *);
 191#define INTERRUPT_UNKNOWN       0
 192#define INTERRUPT_DONE          1
 193#define INTERRUPT_NEXT_BUFFER   2
 194
 195        size_t                  num_channels;
 196        struct ep93xx_dma_chan  channels[];
 197};
 198
 199static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
 200{
 201        return &edmac->chan.dev->device;
 202}
 203
 204static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
 205{
 206        return container_of(chan, struct ep93xx_dma_chan, chan);
 207}
 208
 209/**
 210 * ep93xx_dma_set_active - set new active descriptor chain
 211 * @edmac: channel
 212 * @desc: head of the new active descriptor chain
 213 *
 214 * Sets @desc to be the head of the new active descriptor chain. This is the
 215 * chain which is processed next. The active list must be empty before calling
 216 * this function.
 217 *
 218 * Called with @edmac->lock held and interrupts disabled.
 219 */
 220static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
 221                                  struct ep93xx_dma_desc *desc)
 222{
 223        BUG_ON(!list_empty(&edmac->active));
 224
 225        list_add_tail(&desc->node, &edmac->active);
 226
 227        /* Flatten the @desc->tx_list chain into @edmac->active list */
 228        while (!list_empty(&desc->tx_list)) {
 229                struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
 230                        struct ep93xx_dma_desc, node);
 231
 232                /*
 233                 * We copy the callback parameters from the first descriptor
 234                 * to all the chained descriptors. This way we can call the
 235                 * callback without having to find out the first descriptor in
 236                 * the chain. Useful for cyclic transfers.
 237                 */
 238                d->txd.callback = desc->txd.callback;
 239                d->txd.callback_param = desc->txd.callback_param;
 240
 241                list_move_tail(&d->node, &edmac->active);
 242        }
 243}
 244
 245/* Called with @edmac->lock held and interrupts disabled */
 246static struct ep93xx_dma_desc *
 247ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
 248{
 249        if (list_empty(&edmac->active))
 250                return NULL;
 251
 252        return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node);
 253}
 254
 255/**
 256 * ep93xx_dma_advance_active - advances to the next active descriptor
 257 * @edmac: channel
 258 *
 259 * Function advances active descriptor to the next in the @edmac->active and
 260 * returns %true if we still have descriptors in the chain to process.
 261 * Otherwise returns %false.
 262 *
 263 * When the channel is in cyclic mode always returns %true.
 264 *
 265 * Called with @edmac->lock held and interrupts disabled.
 266 */
 267static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
 268{
 269        struct ep93xx_dma_desc *desc;
 270
 271        list_rotate_left(&edmac->active);
 272
 273        if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
 274                return true;
 275
 276        desc = ep93xx_dma_get_active(edmac);
 277        if (!desc)
 278                return false;
 279
 280        /*
 281         * If txd.cookie is set it means that we are back in the first
 282         * descriptor in the chain and hence done with it.
 283         */
 284        return !desc->txd.cookie;
 285}
 286
 287/*
 288 * M2P DMA implementation
 289 */
 290
 291static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
 292{
 293        writel(control, edmac->regs + M2P_CONTROL);
 294        /*
 295         * EP93xx User's Guide states that we must perform a dummy read after
 296         * write to the control register.
 297         */
 298        readl(edmac->regs + M2P_CONTROL);
 299}
 300
 301static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
 302{
 303        struct ep93xx_dma_data *data = edmac->chan.private;
 304        u32 control;
 305
 306        writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
 307
 308        control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
 309                | M2P_CONTROL_ENABLE;
 310        m2p_set_control(edmac, control);
 311
 312        return 0;
 313}
 314
 315static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
 316{
 317        return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
 318}
 319
 320static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
 321{
 322        u32 control;
 323
 324        control = readl(edmac->regs + M2P_CONTROL);
 325        control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
 326        m2p_set_control(edmac, control);
 327
 328        while (m2p_channel_state(edmac) >= M2P_STATE_ON)
 329                cpu_relax();
 330
 331        m2p_set_control(edmac, 0);
 332
 333        while (m2p_channel_state(edmac) == M2P_STATE_STALL)
 334                cpu_relax();
 335}
 336
 337static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
 338{
 339        struct ep93xx_dma_desc *desc;
 340        u32 bus_addr;
 341
 342        desc = ep93xx_dma_get_active(edmac);
 343        if (!desc) {
 344                dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
 345                return;
 346        }
 347
 348        if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
 349                bus_addr = desc->src_addr;
 350        else
 351                bus_addr = desc->dst_addr;
 352
 353        if (edmac->buffer == 0) {
 354                writel(desc->size, edmac->regs + M2P_MAXCNT0);
 355                writel(bus_addr, edmac->regs + M2P_BASE0);
 356        } else {
 357                writel(desc->size, edmac->regs + M2P_MAXCNT1);
 358                writel(bus_addr, edmac->regs + M2P_BASE1);
 359        }
 360
 361        edmac->buffer ^= 1;
 362}
 363
 364static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
 365{
 366        u32 control = readl(edmac->regs + M2P_CONTROL);
 367
 368        m2p_fill_desc(edmac);
 369        control |= M2P_CONTROL_STALLINT;
 370
 371        if (ep93xx_dma_advance_active(edmac)) {
 372                m2p_fill_desc(edmac);
 373                control |= M2P_CONTROL_NFBINT;
 374        }
 375
 376        m2p_set_control(edmac, control);
 377}
 378
 379static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
 380{
 381        u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
 382        u32 control;
 383
 384        if (irq_status & M2P_INTERRUPT_ERROR) {
 385                struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
 386
 387                /* Clear the error interrupt */
 388                writel(1, edmac->regs + M2P_INTERRUPT);
 389
 390                /*
 391                 * It seems that there is no easy way of reporting errors back
 392                 * to client so we just report the error here and continue as
 393                 * usual.
 394                 *
 395                 * Revisit this when there is a mechanism to report back the
 396                 * errors.
 397                 */
 398                dev_err(chan2dev(edmac),
 399                        "DMA transfer failed! Details:\n"
 400                        "\tcookie       : %d\n"
 401                        "\tsrc_addr     : 0x%08x\n"
 402                        "\tdst_addr     : 0x%08x\n"
 403                        "\tsize         : %zu\n",
 404                        desc->txd.cookie, desc->src_addr, desc->dst_addr,
 405                        desc->size);
 406        }
 407
 408        switch (irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) {
 409        case M2P_INTERRUPT_STALL:
 410                /* Disable interrupts */
 411                control = readl(edmac->regs + M2P_CONTROL);
 412                control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
 413                m2p_set_control(edmac, control);
 414
 415                return INTERRUPT_DONE;
 416
 417        case M2P_INTERRUPT_NFB:
 418                if (ep93xx_dma_advance_active(edmac))
 419                        m2p_fill_desc(edmac);
 420
 421                return INTERRUPT_NEXT_BUFFER;
 422        }
 423
 424        return INTERRUPT_UNKNOWN;
 425}
 426
 427/*
 428 * M2M DMA implementation
 429 *
 430 * For the M2M transfers we don't use NFB at all. This is because it simply
 431 * doesn't work well with memcpy transfers. When you submit both buffers it is
 432 * extremely unlikely that you get an NFB interrupt, but it instead reports
 433 * DONE interrupt and both buffers are already transferred which means that we
 434 * weren't able to update the next buffer.
 435 *
 436 * So for now we "simulate" NFB by just submitting buffer after buffer
 437 * without double buffering.
 438 */
 439
 440static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
 441{
 442        const struct ep93xx_dma_data *data = edmac->chan.private;
 443        u32 control = 0;
 444
 445        if (!data) {
 446                /* This is memcpy channel, nothing to configure */
 447                writel(control, edmac->regs + M2M_CONTROL);
 448                return 0;
 449        }
 450
 451        switch (data->port) {
 452        case EP93XX_DMA_SSP:
 453                /*
 454                 * This was found via experimenting - anything less than 5
 455                 * causes the channel to perform only a partial transfer which
 456                 * leads to problems since we don't get DONE interrupt then.
 457                 */
 458                control = (5 << M2M_CONTROL_PWSC_SHIFT);
 459                control |= M2M_CONTROL_NO_HDSK;
 460
 461                if (data->direction == DMA_MEM_TO_DEV) {
 462                        control |= M2M_CONTROL_DAH;
 463                        control |= M2M_CONTROL_TM_TX;
 464                        control |= M2M_CONTROL_RSS_SSPTX;
 465                } else {
 466                        control |= M2M_CONTROL_SAH;
 467                        control |= M2M_CONTROL_TM_RX;
 468                        control |= M2M_CONTROL_RSS_SSPRX;
 469                }
 470                break;
 471
 472        case EP93XX_DMA_IDE:
 473                /*
 474                 * This IDE part is totally untested. Values below are taken
 475                 * from the EP93xx Users's Guide and might not be correct.
 476                 */
 477                if (data->direction == DMA_MEM_TO_DEV) {
 478                        /* Worst case from the UG */
 479                        control = (3 << M2M_CONTROL_PWSC_SHIFT);
 480                        control |= M2M_CONTROL_DAH;
 481                        control |= M2M_CONTROL_TM_TX;
 482                } else {
 483                        control = (2 << M2M_CONTROL_PWSC_SHIFT);
 484                        control |= M2M_CONTROL_SAH;
 485                        control |= M2M_CONTROL_TM_RX;
 486                }
 487
 488                control |= M2M_CONTROL_NO_HDSK;
 489                control |= M2M_CONTROL_RSS_IDE;
 490                control |= M2M_CONTROL_PW_16;
 491                break;
 492
 493        default:
 494                return -EINVAL;
 495        }
 496
 497        writel(control, edmac->regs + M2M_CONTROL);
 498        return 0;
 499}
 500
 501static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
 502{
 503        /* Just disable the channel */
 504        writel(0, edmac->regs + M2M_CONTROL);
 505}
 506
 507static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
 508{
 509        struct ep93xx_dma_desc *desc;
 510
 511        desc = ep93xx_dma_get_active(edmac);
 512        if (!desc) {
 513                dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
 514                return;
 515        }
 516
 517        if (edmac->buffer == 0) {
 518                writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
 519                writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
 520                writel(desc->size, edmac->regs + M2M_BCR0);
 521        } else {
 522                writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
 523                writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
 524                writel(desc->size, edmac->regs + M2M_BCR1);
 525        }
 526
 527        edmac->buffer ^= 1;
 528}
 529
 530static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
 531{
 532        struct ep93xx_dma_data *data = edmac->chan.private;
 533        u32 control = readl(edmac->regs + M2M_CONTROL);
 534
 535        /*
 536         * Since we allow clients to configure PW (peripheral width) we always
 537         * clear PW bits here and then set them according what is given in
 538         * the runtime configuration.
 539         */
 540        control &= ~M2M_CONTROL_PW_MASK;
 541        control |= edmac->runtime_ctrl;
 542
 543        m2m_fill_desc(edmac);
 544        control |= M2M_CONTROL_DONEINT;
 545
 546        /*
 547         * Now we can finally enable the channel. For M2M channel this must be
 548         * done _after_ the BCRx registers are programmed.
 549         */
 550        control |= M2M_CONTROL_ENABLE;
 551        writel(control, edmac->regs + M2M_CONTROL);
 552
 553        if (!data) {
 554                /*
 555                 * For memcpy channels the software trigger must be asserted
 556                 * in order to start the memcpy operation.
 557                 */
 558                control |= M2M_CONTROL_START;
 559                writel(control, edmac->regs + M2M_CONTROL);
 560        }
 561}
 562
 563static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
 564{
 565        u32 control;
 566
 567        if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_DONEINT))
 568                return INTERRUPT_UNKNOWN;
 569
 570        /* Clear the DONE bit */
 571        writel(0, edmac->regs + M2M_INTERRUPT);
 572
 573        /* Disable interrupts and the channel */
 574        control = readl(edmac->regs + M2M_CONTROL);
 575        control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_ENABLE);
 576        writel(control, edmac->regs + M2M_CONTROL);
 577
 578        /*
 579         * Since we only get DONE interrupt we have to find out ourselves
 580         * whether there still is something to process. So we try to advance
 581         * the chain an see whether it succeeds.
 582         */
 583        if (ep93xx_dma_advance_active(edmac)) {
 584                edmac->edma->hw_submit(edmac);
 585                return INTERRUPT_NEXT_BUFFER;
 586        }
 587
 588        return INTERRUPT_DONE;
 589}
 590
 591/*
 592 * DMA engine API implementation
 593 */
 594
 595static struct ep93xx_dma_desc *
 596ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
 597{
 598        struct ep93xx_dma_desc *desc, *_desc;
 599        struct ep93xx_dma_desc *ret = NULL;
 600        unsigned long flags;
 601
 602        spin_lock_irqsave(&edmac->lock, flags);
 603        list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
 604                if (async_tx_test_ack(&desc->txd)) {
 605                        list_del_init(&desc->node);
 606
 607                        /* Re-initialize the descriptor */
 608                        desc->src_addr = 0;
 609                        desc->dst_addr = 0;
 610                        desc->size = 0;
 611                        desc->complete = false;
 612                        desc->txd.cookie = 0;
 613                        desc->txd.callback = NULL;
 614                        desc->txd.callback_param = NULL;
 615
 616                        ret = desc;
 617                        break;
 618                }
 619        }
 620        spin_unlock_irqrestore(&edmac->lock, flags);
 621        return ret;
 622}
 623
 624static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
 625                                struct ep93xx_dma_desc *desc)
 626{
 627        if (desc) {
 628                unsigned long flags;
 629
 630                spin_lock_irqsave(&edmac->lock, flags);
 631                list_splice_init(&desc->tx_list, &edmac->free_list);
 632                list_add(&desc->node, &edmac->free_list);
 633                spin_unlock_irqrestore(&edmac->lock, flags);
 634        }
 635}
 636
 637/**
 638 * ep93xx_dma_advance_work - start processing the next pending transaction
 639 * @edmac: channel
 640 *
 641 * If we have pending transactions queued and we are currently idling, this
 642 * function takes the next queued transaction from the @edmac->queue and
 643 * pushes it to the hardware for execution.
 644 */
 645static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
 646{
 647        struct ep93xx_dma_desc *new;
 648        unsigned long flags;
 649
 650        spin_lock_irqsave(&edmac->lock, flags);
 651        if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
 652                spin_unlock_irqrestore(&edmac->lock, flags);
 653                return;
 654        }
 655
 656        /* Take the next descriptor from the pending queue */
 657        new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
 658        list_del_init(&new->node);
 659
 660        ep93xx_dma_set_active(edmac, new);
 661
 662        /* Push it to the hardware */
 663        edmac->edma->hw_submit(edmac);
 664        spin_unlock_irqrestore(&edmac->lock, flags);
 665}
 666
 667static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc *desc)
 668{
 669        struct device *dev = desc->txd.chan->device->dev;
 670
 671        if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
 672                if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
 673                        dma_unmap_single(dev, desc->src_addr, desc->size,
 674                                         DMA_TO_DEVICE);
 675                else
 676                        dma_unmap_page(dev, desc->src_addr, desc->size,
 677                                       DMA_TO_DEVICE);
 678        }
 679        if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
 680                if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
 681                        dma_unmap_single(dev, desc->dst_addr, desc->size,
 682                                         DMA_FROM_DEVICE);
 683                else
 684                        dma_unmap_page(dev, desc->dst_addr, desc->size,
 685                                       DMA_FROM_DEVICE);
 686        }
 687}
 688
 689static void ep93xx_dma_tasklet(unsigned long data)
 690{
 691        struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
 692        struct ep93xx_dma_desc *desc, *d;
 693        dma_async_tx_callback callback = NULL;
 694        void *callback_param = NULL;
 695        LIST_HEAD(list);
 696
 697        spin_lock_irq(&edmac->lock);
 698        /*
 699         * If dma_terminate_all() was called before we get to run, the active
 700         * list has become empty. If that happens we aren't supposed to do
 701         * anything more than call ep93xx_dma_advance_work().
 702         */
 703        desc = ep93xx_dma_get_active(edmac);
 704        if (desc) {
 705                if (desc->complete) {
 706                        /* mark descriptor complete for non cyclic case only */
 707                        if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
 708                                dma_cookie_complete(&desc->txd);
 709                        list_splice_init(&edmac->active, &list);
 710                }
 711                callback = desc->txd.callback;
 712                callback_param = desc->txd.callback_param;
 713        }
 714        spin_unlock_irq(&edmac->lock);
 715
 716        /* Pick up the next descriptor from the queue */
 717        ep93xx_dma_advance_work(edmac);
 718
 719        /* Now we can release all the chained descriptors */
 720        list_for_each_entry_safe(desc, d, &list, node) {
 721                /*
 722                 * For the memcpy channels the API requires us to unmap the
 723                 * buffers unless requested otherwise.
 724                 */
 725                if (!edmac->chan.private)
 726                        ep93xx_dma_unmap_buffers(desc);
 727
 728                ep93xx_dma_desc_put(edmac, desc);
 729        }
 730
 731        if (callback)
 732                callback(callback_param);
 733}
 734
 735static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
 736{
 737        struct ep93xx_dma_chan *edmac = dev_id;
 738        struct ep93xx_dma_desc *desc;
 739        irqreturn_t ret = IRQ_HANDLED;
 740
 741        spin_lock(&edmac->lock);
 742
 743        desc = ep93xx_dma_get_active(edmac);
 744        if (!desc) {
 745                dev_warn(chan2dev(edmac),
 746                         "got interrupt while active list is empty\n");
 747                spin_unlock(&edmac->lock);
 748                return IRQ_NONE;
 749        }
 750
 751        switch (edmac->edma->hw_interrupt(edmac)) {
 752        case INTERRUPT_DONE:
 753                desc->complete = true;
 754                tasklet_schedule(&edmac->tasklet);
 755                break;
 756
 757        case INTERRUPT_NEXT_BUFFER:
 758                if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
 759                        tasklet_schedule(&edmac->tasklet);
 760                break;
 761
 762        default:
 763                dev_warn(chan2dev(edmac), "unknown interrupt!\n");
 764                ret = IRQ_NONE;
 765                break;
 766        }
 767
 768        spin_unlock(&edmac->lock);
 769        return ret;
 770}
 771
 772/**
 773 * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
 774 * @tx: descriptor to be executed
 775 *
 776 * Function will execute given descriptor on the hardware or if the hardware
 777 * is busy, queue the descriptor to be executed later on. Returns cookie which
 778 * can be used to poll the status of the descriptor.
 779 */
 780static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
 781{
 782        struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
 783        struct ep93xx_dma_desc *desc;
 784        dma_cookie_t cookie;
 785        unsigned long flags;
 786
 787        spin_lock_irqsave(&edmac->lock, flags);
 788        cookie = dma_cookie_assign(tx);
 789
 790        desc = container_of(tx, struct ep93xx_dma_desc, txd);
 791
 792        /*
 793         * If nothing is currently prosessed, we push this descriptor
 794         * directly to the hardware. Otherwise we put the descriptor
 795         * to the pending queue.
 796         */
 797        if (list_empty(&edmac->active)) {
 798                ep93xx_dma_set_active(edmac, desc);
 799                edmac->edma->hw_submit(edmac);
 800        } else {
 801                list_add_tail(&desc->node, &edmac->queue);
 802        }
 803
 804        spin_unlock_irqrestore(&edmac->lock, flags);
 805        return cookie;
 806}
 807
 808/**
 809 * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
 810 * @chan: channel to allocate resources
 811 *
 812 * Function allocates necessary resources for the given DMA channel and
 813 * returns number of allocated descriptors for the channel. Negative errno
 814 * is returned in case of failure.
 815 */
 816static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
 817{
 818        struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
 819        struct ep93xx_dma_data *data = chan->private;
 820        const char *name = dma_chan_name(chan);
 821        int ret, i;
 822
 823        /* Sanity check the channel parameters */
 824        if (!edmac->edma->m2m) {
 825                if (!data)
 826                        return -EINVAL;
 827                if (data->port < EP93XX_DMA_I2S1 ||
 828                    data->port > EP93XX_DMA_IRDA)
 829                        return -EINVAL;
 830                if (data->direction != ep93xx_dma_chan_direction(chan))
 831                        return -EINVAL;
 832        } else {
 833                if (data) {
 834                        switch (data->port) {
 835                        case EP93XX_DMA_SSP:
 836                        case EP93XX_DMA_IDE:
 837                                if (data->direction != DMA_MEM_TO_DEV &&
 838                                    data->direction != DMA_DEV_TO_MEM)
 839                                        return -EINVAL;
 840                                break;
 841                        default:
 842                                return -EINVAL;
 843                        }
 844                }
 845        }
 846
 847        if (data && data->name)
 848                name = data->name;
 849
 850        ret = clk_enable(edmac->clk);
 851        if (ret)
 852                return ret;
 853
 854        ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
 855        if (ret)
 856                goto fail_clk_disable;
 857
 858        spin_lock_irq(&edmac->lock);
 859        dma_cookie_init(&edmac->chan);
 860        ret = edmac->edma->hw_setup(edmac);
 861        spin_unlock_irq(&edmac->lock);
 862
 863        if (ret)
 864                goto fail_free_irq;
 865
 866        for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
 867                struct ep93xx_dma_desc *desc;
 868
 869                desc = kzalloc(sizeof(*desc), GFP_KERNEL);
 870                if (!desc) {
 871                        dev_warn(chan2dev(edmac), "not enough descriptors\n");
 872                        break;
 873                }
 874
 875                INIT_LIST_HEAD(&desc->tx_list);
 876
 877                dma_async_tx_descriptor_init(&desc->txd, chan);
 878                desc->txd.flags = DMA_CTRL_ACK;
 879                desc->txd.tx_submit = ep93xx_dma_tx_submit;
 880
 881                ep93xx_dma_desc_put(edmac, desc);
 882        }
 883
 884        return i;
 885
 886fail_free_irq:
 887        free_irq(edmac->irq, edmac);
 888fail_clk_disable:
 889        clk_disable(edmac->clk);
 890
 891        return ret;
 892}
 893
 894/**
 895 * ep93xx_dma_free_chan_resources - release resources for the channel
 896 * @chan: channel
 897 *
 898 * Function releases all the resources allocated for the given channel.
 899 * The channel must be idle when this is called.
 900 */
 901static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
 902{
 903        struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
 904        struct ep93xx_dma_desc *desc, *d;
 905        unsigned long flags;
 906        LIST_HEAD(list);
 907
 908        BUG_ON(!list_empty(&edmac->active));
 909        BUG_ON(!list_empty(&edmac->queue));
 910
 911        spin_lock_irqsave(&edmac->lock, flags);
 912        edmac->edma->hw_shutdown(edmac);
 913        edmac->runtime_addr = 0;
 914        edmac->runtime_ctrl = 0;
 915        edmac->buffer = 0;
 916        list_splice_init(&edmac->free_list, &list);
 917        spin_unlock_irqrestore(&edmac->lock, flags);
 918
 919        list_for_each_entry_safe(desc, d, &list, node)
 920                kfree(desc);
 921
 922        clk_disable(edmac->clk);
 923        free_irq(edmac->irq, edmac);
 924}
 925
 926/**
 927 * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
 928 * @chan: channel
 929 * @dest: destination bus address
 930 * @src: source bus address
 931 * @len: size of the transaction
 932 * @flags: flags for the descriptor
 933 *
 934 * Returns a valid DMA descriptor or %NULL in case of failure.
 935 */
 936static struct dma_async_tx_descriptor *
 937ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
 938                           dma_addr_t src, size_t len, unsigned long flags)
 939{
 940        struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
 941        struct ep93xx_dma_desc *desc, *first;
 942        size_t bytes, offset;
 943
 944        first = NULL;
 945        for (offset = 0; offset < len; offset += bytes) {
 946                desc = ep93xx_dma_desc_get(edmac);
 947                if (!desc) {
 948                        dev_warn(chan2dev(edmac), "couln't get descriptor\n");
 949                        goto fail;
 950                }
 951
 952                bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
 953
 954                desc->src_addr = src + offset;
 955                desc->dst_addr = dest + offset;
 956                desc->size = bytes;
 957
 958                if (!first)
 959                        first = desc;
 960                else
 961                        list_add_tail(&desc->node, &first->tx_list);
 962        }
 963
 964        first->txd.cookie = -EBUSY;
 965        first->txd.flags = flags;
 966
 967        return &first->txd;
 968fail:
 969        ep93xx_dma_desc_put(edmac, first);
 970        return NULL;
 971}
 972
 973/**
 974 * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
 975 * @chan: channel
 976 * @sgl: list of buffers to transfer
 977 * @sg_len: number of entries in @sgl
 978 * @dir: direction of tha DMA transfer
 979 * @flags: flags for the descriptor
 980 * @context: operation context (ignored)
 981 *
 982 * Returns a valid DMA descriptor or %NULL in case of failure.
 983 */
 984static struct dma_async_tx_descriptor *
 985ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
 986                         unsigned int sg_len, enum dma_transfer_direction dir,
 987                         unsigned long flags, void *context)
 988{
 989        struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
 990        struct ep93xx_dma_desc *desc, *first;
 991        struct scatterlist *sg;
 992        int i;
 993
 994        if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
 995                dev_warn(chan2dev(edmac),
 996                         "channel was configured with different direction\n");
 997                return NULL;
 998        }
 999
1000        if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1001                dev_warn(chan2dev(edmac),
1002                         "channel is already used for cyclic transfers\n");
1003                return NULL;
1004        }
1005
1006        first = NULL;
1007        for_each_sg(sgl, sg, sg_len, i) {
1008                size_t sg_len = sg_dma_len(sg);
1009
1010                if (sg_len > DMA_MAX_CHAN_BYTES) {
1011                        dev_warn(chan2dev(edmac), "too big transfer size %d\n",
1012                                 sg_len);
1013                        goto fail;
1014                }
1015
1016                desc = ep93xx_dma_desc_get(edmac);
1017                if (!desc) {
1018                        dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1019                        goto fail;
1020                }
1021
1022                if (dir == DMA_MEM_TO_DEV) {
1023                        desc->src_addr = sg_dma_address(sg);
1024                        desc->dst_addr = edmac->runtime_addr;
1025                } else {
1026                        desc->src_addr = edmac->runtime_addr;
1027                        desc->dst_addr = sg_dma_address(sg);
1028                }
1029                desc->size = sg_len;
1030
1031                if (!first)
1032                        first = desc;
1033                else
1034                        list_add_tail(&desc->node, &first->tx_list);
1035        }
1036
1037        first->txd.cookie = -EBUSY;
1038        first->txd.flags = flags;
1039
1040        return &first->txd;
1041
1042fail:
1043        ep93xx_dma_desc_put(edmac, first);
1044        return NULL;
1045}
1046
1047/**
1048 * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
1049 * @chan: channel
1050 * @dma_addr: DMA mapped address of the buffer
1051 * @buf_len: length of the buffer (in bytes)
1052 * @period_len: lenght of a single period
1053 * @dir: direction of the operation
1054 * @context: operation context (ignored)
1055 *
1056 * Prepares a descriptor for cyclic DMA operation. This means that once the
1057 * descriptor is submitted, we will be submitting in a @period_len sized
1058 * buffers and calling callback once the period has been elapsed. Transfer
1059 * terminates only when client calls dmaengine_terminate_all() for this
1060 * channel.
1061 *
1062 * Returns a valid DMA descriptor or %NULL in case of failure.
1063 */
1064static struct dma_async_tx_descriptor *
1065ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
1066                           size_t buf_len, size_t period_len,
1067                           enum dma_transfer_direction dir, void *context)
1068{
1069        struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1070        struct ep93xx_dma_desc *desc, *first;
1071        size_t offset = 0;
1072
1073        if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1074                dev_warn(chan2dev(edmac),
1075                         "channel was configured with different direction\n");
1076                return NULL;
1077        }
1078
1079        if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1080                dev_warn(chan2dev(edmac),
1081                         "channel is already used for cyclic transfers\n");
1082                return NULL;
1083        }
1084
1085        if (period_len > DMA_MAX_CHAN_BYTES) {
1086                dev_warn(chan2dev(edmac), "too big period length %d\n",
1087                         period_len);
1088                return NULL;
1089        }
1090
1091        /* Split the buffer into period size chunks */
1092        first = NULL;
1093        for (offset = 0; offset < buf_len; offset += period_len) {
1094                desc = ep93xx_dma_desc_get(edmac);
1095                if (!desc) {
1096                        dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1097                        goto fail;
1098                }
1099
1100                if (dir == DMA_MEM_TO_DEV) {
1101                        desc->src_addr = dma_addr + offset;
1102                        desc->dst_addr = edmac->runtime_addr;
1103                } else {
1104                        desc->src_addr = edmac->runtime_addr;
1105                        desc->dst_addr = dma_addr + offset;
1106                }
1107
1108                desc->size = period_len;
1109
1110                if (!first)
1111                        first = desc;
1112                else
1113                        list_add_tail(&desc->node, &first->tx_list);
1114        }
1115
1116        first->txd.cookie = -EBUSY;
1117
1118        return &first->txd;
1119
1120fail:
1121        ep93xx_dma_desc_put(edmac, first);
1122        return NULL;
1123}
1124
1125/**
1126 * ep93xx_dma_terminate_all - terminate all transactions
1127 * @edmac: channel
1128 *
1129 * Stops all DMA transactions. All descriptors are put back to the
1130 * @edmac->free_list and callbacks are _not_ called.
1131 */
1132static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac)
1133{
1134        struct ep93xx_dma_desc *desc, *_d;
1135        unsigned long flags;
1136        LIST_HEAD(list);
1137
1138        spin_lock_irqsave(&edmac->lock, flags);
1139        /* First we disable and flush the DMA channel */
1140        edmac->edma->hw_shutdown(edmac);
1141        clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
1142        list_splice_init(&edmac->active, &list);
1143        list_splice_init(&edmac->queue, &list);
1144        /*
1145         * We then re-enable the channel. This way we can continue submitting
1146         * the descriptors by just calling ->hw_submit() again.
1147         */
1148        edmac->edma->hw_setup(edmac);
1149        spin_unlock_irqrestore(&edmac->lock, flags);
1150
1151        list_for_each_entry_safe(desc, _d, &list, node)
1152                ep93xx_dma_desc_put(edmac, desc);
1153
1154        return 0;
1155}
1156
1157static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac,
1158                                   struct dma_slave_config *config)
1159{
1160        enum dma_slave_buswidth width;
1161        unsigned long flags;
1162        u32 addr, ctrl;
1163
1164        if (!edmac->edma->m2m)
1165                return -EINVAL;
1166
1167        switch (config->direction) {
1168        case DMA_DEV_TO_MEM:
1169                width = config->src_addr_width;
1170                addr = config->src_addr;
1171                break;
1172
1173        case DMA_MEM_TO_DEV:
1174                width = config->dst_addr_width;
1175                addr = config->dst_addr;
1176                break;
1177
1178        default:
1179                return -EINVAL;
1180        }
1181
1182        switch (width) {
1183        case DMA_SLAVE_BUSWIDTH_1_BYTE:
1184                ctrl = 0;
1185                break;
1186        case DMA_SLAVE_BUSWIDTH_2_BYTES:
1187                ctrl = M2M_CONTROL_PW_16;
1188                break;
1189        case DMA_SLAVE_BUSWIDTH_4_BYTES:
1190                ctrl = M2M_CONTROL_PW_32;
1191                break;
1192        default:
1193                return -EINVAL;
1194        }
1195
1196        spin_lock_irqsave(&edmac->lock, flags);
1197        edmac->runtime_addr = addr;
1198        edmac->runtime_ctrl = ctrl;
1199        spin_unlock_irqrestore(&edmac->lock, flags);
1200
1201        return 0;
1202}
1203
1204/**
1205 * ep93xx_dma_control - manipulate all pending operations on a channel
1206 * @chan: channel
1207 * @cmd: control command to perform
1208 * @arg: optional argument
1209 *
1210 * Controls the channel. Function returns %0 in case of success or negative
1211 * error in case of failure.
1212 */
1213static int ep93xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1214                              unsigned long arg)
1215{
1216        struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1217        struct dma_slave_config *config;
1218
1219        switch (cmd) {
1220        case DMA_TERMINATE_ALL:
1221                return ep93xx_dma_terminate_all(edmac);
1222
1223        case DMA_SLAVE_CONFIG:
1224                config = (struct dma_slave_config *)arg;
1225                return ep93xx_dma_slave_config(edmac, config);
1226
1227        default:
1228                break;
1229        }
1230
1231        return -ENOSYS;
1232}
1233
1234/**
1235 * ep93xx_dma_tx_status - check if a transaction is completed
1236 * @chan: channel
1237 * @cookie: transaction specific cookie
1238 * @state: state of the transaction is stored here if given
1239 *
1240 * This function can be used to query state of a given transaction.
1241 */
1242static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
1243                                            dma_cookie_t cookie,
1244                                            struct dma_tx_state *state)
1245{
1246        struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1247        enum dma_status ret;
1248        unsigned long flags;
1249
1250        spin_lock_irqsave(&edmac->lock, flags);
1251        ret = dma_cookie_status(chan, cookie, state);
1252        spin_unlock_irqrestore(&edmac->lock, flags);
1253
1254        return ret;
1255}
1256
1257/**
1258 * ep93xx_dma_issue_pending - push pending transactions to the hardware
1259 * @chan: channel
1260 *
1261 * When this function is called, all pending transactions are pushed to the
1262 * hardware and executed.
1263 */
1264static void ep93xx_dma_issue_pending(struct dma_chan *chan)
1265{
1266        ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
1267}
1268
1269static int __init ep93xx_dma_probe(struct platform_device *pdev)
1270{
1271        struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1272        struct ep93xx_dma_engine *edma;
1273        struct dma_device *dma_dev;
1274        size_t edma_size;
1275        int ret, i;
1276
1277        edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
1278        edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
1279        if (!edma)
1280                return -ENOMEM;
1281
1282        dma_dev = &edma->dma_dev;
1283        edma->m2m = platform_get_device_id(pdev)->driver_data;
1284        edma->num_channels = pdata->num_channels;
1285
1286        INIT_LIST_HEAD(&dma_dev->channels);
1287        for (i = 0; i < pdata->num_channels; i++) {
1288                const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
1289                struct ep93xx_dma_chan *edmac = &edma->channels[i];
1290
1291                edmac->chan.device = dma_dev;
1292                edmac->regs = cdata->base;
1293                edmac->irq = cdata->irq;
1294                edmac->edma = edma;
1295
1296                edmac->clk = clk_get(NULL, cdata->name);
1297                if (IS_ERR(edmac->clk)) {
1298                        dev_warn(&pdev->dev, "failed to get clock for %s\n",
1299                                 cdata->name);
1300                        continue;
1301                }
1302
1303                spin_lock_init(&edmac->lock);
1304                INIT_LIST_HEAD(&edmac->active);
1305                INIT_LIST_HEAD(&edmac->queue);
1306                INIT_LIST_HEAD(&edmac->free_list);
1307                tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet,
1308                             (unsigned long)edmac);
1309
1310                list_add_tail(&edmac->chan.device_node,
1311                              &dma_dev->channels);
1312        }
1313
1314        dma_cap_zero(dma_dev->cap_mask);
1315        dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1316        dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
1317
1318        dma_dev->dev = &pdev->dev;
1319        dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
1320        dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
1321        dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
1322        dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
1323        dma_dev->device_control = ep93xx_dma_control;
1324        dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
1325        dma_dev->device_tx_status = ep93xx_dma_tx_status;
1326
1327        dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
1328
1329        if (edma->m2m) {
1330                dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1331                dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
1332
1333                edma->hw_setup = m2m_hw_setup;
1334                edma->hw_shutdown = m2m_hw_shutdown;
1335                edma->hw_submit = m2m_hw_submit;
1336                edma->hw_interrupt = m2m_hw_interrupt;
1337        } else {
1338                dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1339
1340                edma->hw_setup = m2p_hw_setup;
1341                edma->hw_shutdown = m2p_hw_shutdown;
1342                edma->hw_submit = m2p_hw_submit;
1343                edma->hw_interrupt = m2p_hw_interrupt;
1344        }
1345
1346        ret = dma_async_device_register(dma_dev);
1347        if (unlikely(ret)) {
1348                for (i = 0; i < edma->num_channels; i++) {
1349                        struct ep93xx_dma_chan *edmac = &edma->channels[i];
1350                        if (!IS_ERR_OR_NULL(edmac->clk))
1351                                clk_put(edmac->clk);
1352                }
1353                kfree(edma);
1354        } else {
1355                dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
1356                         edma->m2m ? "M" : "P");
1357        }
1358
1359        return ret;
1360}
1361
1362static struct platform_device_id ep93xx_dma_driver_ids[] = {
1363        { "ep93xx-dma-m2p", 0 },
1364        { "ep93xx-dma-m2m", 1 },
1365        { },
1366};
1367
1368static struct platform_driver ep93xx_dma_driver = {
1369        .driver         = {
1370                .name   = "ep93xx-dma",
1371        },
1372        .id_table       = ep93xx_dma_driver_ids,
1373};
1374
1375static int __init ep93xx_dma_module_init(void)
1376{
1377        return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
1378}
1379subsys_initcall(ep93xx_dma_module_init);
1380
1381MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
1382MODULE_DESCRIPTION("EP93xx DMA driver");
1383MODULE_LICENSE("GPL");
1384