linux/drivers/dma/ep93xx_dma.c
<<
>>
Prefs
   1/*
   2 * Driver for the Cirrus Logic EP93xx DMA Controller
   3 *
   4 * Copyright (C) 2011 Mika Westerberg
   5 *
   6 * DMA M2P implementation is based on the original
   7 * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
   8 *
   9 *   Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
  10 *   Copyright (C) 2006 Applied Data Systems
  11 *   Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
  12 *
  13 * This driver is based on dw_dmac and amba-pl08x drivers.
  14 *
  15 * This program is free software; you can redistribute it and/or modify
  16 * it under the terms of the GNU General Public License as published by
  17 * the Free Software Foundation; either version 2 of the License, or
  18 * (at your option) any later version.
  19 */
  20
  21#include <linux/clk.h>
  22#include <linux/init.h>
  23#include <linux/interrupt.h>
  24#include <linux/dmaengine.h>
  25#include <linux/module.h>
  26#include <linux/platform_device.h>
  27#include <linux/slab.h>
  28
  29#include <linux/platform_data/dma-ep93xx.h>
  30
  31#include "dmaengine.h"
  32
  33/* M2P registers */
  34#define M2P_CONTROL                     0x0000
  35#define M2P_CONTROL_STALLINT            BIT(0)
  36#define M2P_CONTROL_NFBINT              BIT(1)
  37#define M2P_CONTROL_CH_ERROR_INT        BIT(3)
  38#define M2P_CONTROL_ENABLE              BIT(4)
  39#define M2P_CONTROL_ICE                 BIT(6)
  40
  41#define M2P_INTERRUPT                   0x0004
  42#define M2P_INTERRUPT_STALL             BIT(0)
  43#define M2P_INTERRUPT_NFB               BIT(1)
  44#define M2P_INTERRUPT_ERROR             BIT(3)
  45
  46#define M2P_PPALLOC                     0x0008
  47#define M2P_STATUS                      0x000c
  48
  49#define M2P_MAXCNT0                     0x0020
  50#define M2P_BASE0                       0x0024
  51#define M2P_MAXCNT1                     0x0030
  52#define M2P_BASE1                       0x0034
  53
  54#define M2P_STATE_IDLE                  0
  55#define M2P_STATE_STALL                 1
  56#define M2P_STATE_ON                    2
  57#define M2P_STATE_NEXT                  3
  58
  59/* M2M registers */
  60#define M2M_CONTROL                     0x0000
  61#define M2M_CONTROL_DONEINT             BIT(2)
  62#define M2M_CONTROL_ENABLE              BIT(3)
  63#define M2M_CONTROL_START               BIT(4)
  64#define M2M_CONTROL_DAH                 BIT(11)
  65#define M2M_CONTROL_SAH                 BIT(12)
  66#define M2M_CONTROL_PW_SHIFT            9
  67#define M2M_CONTROL_PW_8                (0 << M2M_CONTROL_PW_SHIFT)
  68#define M2M_CONTROL_PW_16               (1 << M2M_CONTROL_PW_SHIFT)
  69#define M2M_CONTROL_PW_32               (2 << M2M_CONTROL_PW_SHIFT)
  70#define M2M_CONTROL_PW_MASK             (3 << M2M_CONTROL_PW_SHIFT)
  71#define M2M_CONTROL_TM_SHIFT            13
  72#define M2M_CONTROL_TM_TX               (1 << M2M_CONTROL_TM_SHIFT)
  73#define M2M_CONTROL_TM_RX               (2 << M2M_CONTROL_TM_SHIFT)
  74#define M2M_CONTROL_NFBINT              BIT(21)
  75#define M2M_CONTROL_RSS_SHIFT           22
  76#define M2M_CONTROL_RSS_SSPRX           (1 << M2M_CONTROL_RSS_SHIFT)
  77#define M2M_CONTROL_RSS_SSPTX           (2 << M2M_CONTROL_RSS_SHIFT)
  78#define M2M_CONTROL_RSS_IDE             (3 << M2M_CONTROL_RSS_SHIFT)
  79#define M2M_CONTROL_NO_HDSK             BIT(24)
  80#define M2M_CONTROL_PWSC_SHIFT          25
  81
  82#define M2M_INTERRUPT                   0x0004
  83#define M2M_INTERRUPT_MASK              6
  84
  85#define M2M_STATUS                      0x000c
  86#define M2M_STATUS_CTL_SHIFT            1
  87#define M2M_STATUS_CTL_IDLE             (0 << M2M_STATUS_CTL_SHIFT)
  88#define M2M_STATUS_CTL_STALL            (1 << M2M_STATUS_CTL_SHIFT)
  89#define M2M_STATUS_CTL_MEMRD            (2 << M2M_STATUS_CTL_SHIFT)
  90#define M2M_STATUS_CTL_MEMWR            (3 << M2M_STATUS_CTL_SHIFT)
  91#define M2M_STATUS_CTL_BWCWAIT          (4 << M2M_STATUS_CTL_SHIFT)
  92#define M2M_STATUS_CTL_MASK             (7 << M2M_STATUS_CTL_SHIFT)
  93#define M2M_STATUS_BUF_SHIFT            4
  94#define M2M_STATUS_BUF_NO               (0 << M2M_STATUS_BUF_SHIFT)
  95#define M2M_STATUS_BUF_ON               (1 << M2M_STATUS_BUF_SHIFT)
  96#define M2M_STATUS_BUF_NEXT             (2 << M2M_STATUS_BUF_SHIFT)
  97#define M2M_STATUS_BUF_MASK             (3 << M2M_STATUS_BUF_SHIFT)
  98#define M2M_STATUS_DONE                 BIT(6)
  99
 100#define M2M_BCR0                        0x0010
 101#define M2M_BCR1                        0x0014
 102#define M2M_SAR_BASE0                   0x0018
 103#define M2M_SAR_BASE1                   0x001c
 104#define M2M_DAR_BASE0                   0x002c
 105#define M2M_DAR_BASE1                   0x0030
 106
 107#define DMA_MAX_CHAN_BYTES              0xffff
 108#define DMA_MAX_CHAN_DESCRIPTORS        32
 109
 110struct ep93xx_dma_engine;
 111
 112/**
 113 * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
 114 * @src_addr: source address of the transaction
 115 * @dst_addr: destination address of the transaction
 116 * @size: size of the transaction (in bytes)
 117 * @complete: this descriptor is completed
 118 * @txd: dmaengine API descriptor
 119 * @tx_list: list of linked descriptors
 120 * @node: link used for putting this into a channel queue
 121 */
 122struct ep93xx_dma_desc {
 123        u32                             src_addr;
 124        u32                             dst_addr;
 125        size_t                          size;
 126        bool                            complete;
 127        struct dma_async_tx_descriptor  txd;
 128        struct list_head                tx_list;
 129        struct list_head                node;
 130};
 131
 132/**
 133 * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
 134 * @chan: dmaengine API channel
 135 * @edma: pointer to to the engine device
 136 * @regs: memory mapped registers
 137 * @irq: interrupt number of the channel
 138 * @clk: clock used by this channel
 139 * @tasklet: channel specific tasklet used for callbacks
 140 * @lock: lock protecting the fields following
 141 * @flags: flags for the channel
 142 * @buffer: which buffer to use next (0/1)
 143 * @active: flattened chain of descriptors currently being processed
 144 * @queue: pending descriptors which are handled next
 145 * @free_list: list of free descriptors which can be used
 146 * @runtime_addr: physical address currently used as dest/src (M2M only). This
 147 *                is set via .device_config before slave operation is
 148 *                prepared
 149 * @runtime_ctrl: M2M runtime values for the control register.
 150 *
 151 * As EP93xx DMA controller doesn't support real chained DMA descriptors we
 152 * will have slightly different scheme here: @active points to a head of
 153 * flattened DMA descriptor chain.
 154 *
 155 * @queue holds pending transactions. These are linked through the first
 156 * descriptor in the chain. When a descriptor is moved to the @active queue,
 157 * the first and chained descriptors are flattened into a single list.
 158 *
 159 * @chan.private holds pointer to &struct ep93xx_dma_data which contains
 160 * necessary channel configuration information. For memcpy channels this must
 161 * be %NULL.
 162 */
 163struct ep93xx_dma_chan {
 164        struct dma_chan                 chan;
 165        const struct ep93xx_dma_engine  *edma;
 166        void __iomem                    *regs;
 167        int                             irq;
 168        struct clk                      *clk;
 169        struct tasklet_struct           tasklet;
 170        /* protects the fields following */
 171        spinlock_t                      lock;
 172        unsigned long                   flags;
 173/* Channel is configured for cyclic transfers */
 174#define EP93XX_DMA_IS_CYCLIC            0
 175
 176        int                             buffer;
 177        struct list_head                active;
 178        struct list_head                queue;
 179        struct list_head                free_list;
 180        u32                             runtime_addr;
 181        u32                             runtime_ctrl;
 182};
 183
 184/**
 185 * struct ep93xx_dma_engine - the EP93xx DMA engine instance
 186 * @dma_dev: holds the dmaengine device
 187 * @m2m: is this an M2M or M2P device
 188 * @hw_setup: method which sets the channel up for operation
 189 * @hw_shutdown: shuts the channel down and flushes whatever is left
 190 * @hw_submit: pushes active descriptor(s) to the hardware
 191 * @hw_interrupt: handle the interrupt
 192 * @num_channels: number of channels for this instance
 193 * @channels: array of channels
 194 *
 195 * There is one instance of this struct for the M2P channels and one for the
 196 * M2M channels. hw_xxx() methods are used to perform operations which are
 197 * different on M2M and M2P channels. These methods are called with channel
 198 * lock held and interrupts disabled so they cannot sleep.
 199 */
 200struct ep93xx_dma_engine {
 201        struct dma_device       dma_dev;
 202        bool                    m2m;
 203        int                     (*hw_setup)(struct ep93xx_dma_chan *);
 204        void                    (*hw_synchronize)(struct ep93xx_dma_chan *);
 205        void                    (*hw_shutdown)(struct ep93xx_dma_chan *);
 206        void                    (*hw_submit)(struct ep93xx_dma_chan *);
 207        int                     (*hw_interrupt)(struct ep93xx_dma_chan *);
 208#define INTERRUPT_UNKNOWN       0
 209#define INTERRUPT_DONE          1
 210#define INTERRUPT_NEXT_BUFFER   2
 211
 212        size_t                  num_channels;
 213        struct ep93xx_dma_chan  channels[];
 214};
 215
 216static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
 217{
 218        return &edmac->chan.dev->device;
 219}
 220
 221static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
 222{
 223        return container_of(chan, struct ep93xx_dma_chan, chan);
 224}
 225
 226/**
 227 * ep93xx_dma_set_active - set new active descriptor chain
 228 * @edmac: channel
 229 * @desc: head of the new active descriptor chain
 230 *
 231 * Sets @desc to be the head of the new active descriptor chain. This is the
 232 * chain which is processed next. The active list must be empty before calling
 233 * this function.
 234 *
 235 * Called with @edmac->lock held and interrupts disabled.
 236 */
 237static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
 238                                  struct ep93xx_dma_desc *desc)
 239{
 240        BUG_ON(!list_empty(&edmac->active));
 241
 242        list_add_tail(&desc->node, &edmac->active);
 243
 244        /* Flatten the @desc->tx_list chain into @edmac->active list */
 245        while (!list_empty(&desc->tx_list)) {
 246                struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
 247                        struct ep93xx_dma_desc, node);
 248
 249                /*
 250                 * We copy the callback parameters from the first descriptor
 251                 * to all the chained descriptors. This way we can call the
 252                 * callback without having to find out the first descriptor in
 253                 * the chain. Useful for cyclic transfers.
 254                 */
 255                d->txd.callback = desc->txd.callback;
 256                d->txd.callback_param = desc->txd.callback_param;
 257
 258                list_move_tail(&d->node, &edmac->active);
 259        }
 260}
 261
 262/* Called with @edmac->lock held and interrupts disabled */
 263static struct ep93xx_dma_desc *
 264ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
 265{
 266        return list_first_entry_or_null(&edmac->active,
 267                                        struct ep93xx_dma_desc, node);
 268}
 269
 270/**
 271 * ep93xx_dma_advance_active - advances to the next active descriptor
 272 * @edmac: channel
 273 *
 274 * Function advances active descriptor to the next in the @edmac->active and
 275 * returns %true if we still have descriptors in the chain to process.
 276 * Otherwise returns %false.
 277 *
 278 * When the channel is in cyclic mode always returns %true.
 279 *
 280 * Called with @edmac->lock held and interrupts disabled.
 281 */
 282static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
 283{
 284        struct ep93xx_dma_desc *desc;
 285
 286        list_rotate_left(&edmac->active);
 287
 288        if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
 289                return true;
 290
 291        desc = ep93xx_dma_get_active(edmac);
 292        if (!desc)
 293                return false;
 294
 295        /*
 296         * If txd.cookie is set it means that we are back in the first
 297         * descriptor in the chain and hence done with it.
 298         */
 299        return !desc->txd.cookie;
 300}
 301
 302/*
 303 * M2P DMA implementation
 304 */
 305
 306static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
 307{
 308        writel(control, edmac->regs + M2P_CONTROL);
 309        /*
 310         * EP93xx User's Guide states that we must perform a dummy read after
 311         * write to the control register.
 312         */
 313        readl(edmac->regs + M2P_CONTROL);
 314}
 315
 316static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
 317{
 318        struct ep93xx_dma_data *data = edmac->chan.private;
 319        u32 control;
 320
 321        writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
 322
 323        control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
 324                | M2P_CONTROL_ENABLE;
 325        m2p_set_control(edmac, control);
 326
 327        edmac->buffer = 0;
 328
 329        return 0;
 330}
 331
 332static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
 333{
 334        return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
 335}
 336
 337static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac)
 338{
 339        unsigned long flags;
 340        u32 control;
 341
 342        spin_lock_irqsave(&edmac->lock, flags);
 343        control = readl(edmac->regs + M2P_CONTROL);
 344        control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
 345        m2p_set_control(edmac, control);
 346        spin_unlock_irqrestore(&edmac->lock, flags);
 347
 348        while (m2p_channel_state(edmac) >= M2P_STATE_ON)
 349                schedule();
 350}
 351
 352static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
 353{
 354        m2p_set_control(edmac, 0);
 355
 356        while (m2p_channel_state(edmac) != M2P_STATE_IDLE)
 357                dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n");
 358}
 359
 360static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
 361{
 362        struct ep93xx_dma_desc *desc;
 363        u32 bus_addr;
 364
 365        desc = ep93xx_dma_get_active(edmac);
 366        if (!desc) {
 367                dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
 368                return;
 369        }
 370
 371        if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
 372                bus_addr = desc->src_addr;
 373        else
 374                bus_addr = desc->dst_addr;
 375
 376        if (edmac->buffer == 0) {
 377                writel(desc->size, edmac->regs + M2P_MAXCNT0);
 378                writel(bus_addr, edmac->regs + M2P_BASE0);
 379        } else {
 380                writel(desc->size, edmac->regs + M2P_MAXCNT1);
 381                writel(bus_addr, edmac->regs + M2P_BASE1);
 382        }
 383
 384        edmac->buffer ^= 1;
 385}
 386
 387static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
 388{
 389        u32 control = readl(edmac->regs + M2P_CONTROL);
 390
 391        m2p_fill_desc(edmac);
 392        control |= M2P_CONTROL_STALLINT;
 393
 394        if (ep93xx_dma_advance_active(edmac)) {
 395                m2p_fill_desc(edmac);
 396                control |= M2P_CONTROL_NFBINT;
 397        }
 398
 399        m2p_set_control(edmac, control);
 400}
 401
 402static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
 403{
 404        u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
 405        u32 control;
 406
 407        if (irq_status & M2P_INTERRUPT_ERROR) {
 408                struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
 409
 410                /* Clear the error interrupt */
 411                writel(1, edmac->regs + M2P_INTERRUPT);
 412
 413                /*
 414                 * It seems that there is no easy way of reporting errors back
 415                 * to client so we just report the error here and continue as
 416                 * usual.
 417                 *
 418                 * Revisit this when there is a mechanism to report back the
 419                 * errors.
 420                 */
 421                dev_err(chan2dev(edmac),
 422                        "DMA transfer failed! Details:\n"
 423                        "\tcookie       : %d\n"
 424                        "\tsrc_addr     : 0x%08x\n"
 425                        "\tdst_addr     : 0x%08x\n"
 426                        "\tsize         : %zu\n",
 427                        desc->txd.cookie, desc->src_addr, desc->dst_addr,
 428                        desc->size);
 429        }
 430
 431        /*
 432         * Even latest E2 silicon revision sometimes assert STALL interrupt
 433         * instead of NFB. Therefore we treat them equally, basing on the
 434         * amount of data we still have to transfer.
 435         */
 436        if (!(irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)))
 437                return INTERRUPT_UNKNOWN;
 438
 439        if (ep93xx_dma_advance_active(edmac)) {
 440                m2p_fill_desc(edmac);
 441                return INTERRUPT_NEXT_BUFFER;
 442        }
 443
 444        /* Disable interrupts */
 445        control = readl(edmac->regs + M2P_CONTROL);
 446        control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
 447        m2p_set_control(edmac, control);
 448
 449        return INTERRUPT_DONE;
 450}
 451
 452/*
 453 * M2M DMA implementation
 454 */
 455
 456static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
 457{
 458        const struct ep93xx_dma_data *data = edmac->chan.private;
 459        u32 control = 0;
 460
 461        if (!data) {
 462                /* This is memcpy channel, nothing to configure */
 463                writel(control, edmac->regs + M2M_CONTROL);
 464                return 0;
 465        }
 466
 467        switch (data->port) {
 468        case EP93XX_DMA_SSP:
 469                /*
 470                 * This was found via experimenting - anything less than 5
 471                 * causes the channel to perform only a partial transfer which
 472                 * leads to problems since we don't get DONE interrupt then.
 473                 */
 474                control = (5 << M2M_CONTROL_PWSC_SHIFT);
 475                control |= M2M_CONTROL_NO_HDSK;
 476
 477                if (data->direction == DMA_MEM_TO_DEV) {
 478                        control |= M2M_CONTROL_DAH;
 479                        control |= M2M_CONTROL_TM_TX;
 480                        control |= M2M_CONTROL_RSS_SSPTX;
 481                } else {
 482                        control |= M2M_CONTROL_SAH;
 483                        control |= M2M_CONTROL_TM_RX;
 484                        control |= M2M_CONTROL_RSS_SSPRX;
 485                }
 486                break;
 487
 488        case EP93XX_DMA_IDE:
 489                /*
 490                 * This IDE part is totally untested. Values below are taken
 491                 * from the EP93xx Users's Guide and might not be correct.
 492                 */
 493                if (data->direction == DMA_MEM_TO_DEV) {
 494                        /* Worst case from the UG */
 495                        control = (3 << M2M_CONTROL_PWSC_SHIFT);
 496                        control |= M2M_CONTROL_DAH;
 497                        control |= M2M_CONTROL_TM_TX;
 498                } else {
 499                        control = (2 << M2M_CONTROL_PWSC_SHIFT);
 500                        control |= M2M_CONTROL_SAH;
 501                        control |= M2M_CONTROL_TM_RX;
 502                }
 503
 504                control |= M2M_CONTROL_NO_HDSK;
 505                control |= M2M_CONTROL_RSS_IDE;
 506                control |= M2M_CONTROL_PW_16;
 507                break;
 508
 509        default:
 510                return -EINVAL;
 511        }
 512
 513        writel(control, edmac->regs + M2M_CONTROL);
 514        return 0;
 515}
 516
 517static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
 518{
 519        /* Just disable the channel */
 520        writel(0, edmac->regs + M2M_CONTROL);
 521}
 522
 523static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
 524{
 525        struct ep93xx_dma_desc *desc;
 526
 527        desc = ep93xx_dma_get_active(edmac);
 528        if (!desc) {
 529                dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
 530                return;
 531        }
 532
 533        if (edmac->buffer == 0) {
 534                writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
 535                writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
 536                writel(desc->size, edmac->regs + M2M_BCR0);
 537        } else {
 538                writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
 539                writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
 540                writel(desc->size, edmac->regs + M2M_BCR1);
 541        }
 542
 543        edmac->buffer ^= 1;
 544}
 545
 546static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
 547{
 548        struct ep93xx_dma_data *data = edmac->chan.private;
 549        u32 control = readl(edmac->regs + M2M_CONTROL);
 550
 551        /*
 552         * Since we allow clients to configure PW (peripheral width) we always
 553         * clear PW bits here and then set them according what is given in
 554         * the runtime configuration.
 555         */
 556        control &= ~M2M_CONTROL_PW_MASK;
 557        control |= edmac->runtime_ctrl;
 558
 559        m2m_fill_desc(edmac);
 560        control |= M2M_CONTROL_DONEINT;
 561
 562        if (ep93xx_dma_advance_active(edmac)) {
 563                m2m_fill_desc(edmac);
 564                control |= M2M_CONTROL_NFBINT;
 565        }
 566
 567        /*
 568         * Now we can finally enable the channel. For M2M channel this must be
 569         * done _after_ the BCRx registers are programmed.
 570         */
 571        control |= M2M_CONTROL_ENABLE;
 572        writel(control, edmac->regs + M2M_CONTROL);
 573
 574        if (!data) {
 575                /*
 576                 * For memcpy channels the software trigger must be asserted
 577                 * in order to start the memcpy operation.
 578                 */
 579                control |= M2M_CONTROL_START;
 580                writel(control, edmac->regs + M2M_CONTROL);
 581        }
 582}
 583
 584/*
 585 * According to EP93xx User's Guide, we should receive DONE interrupt when all
 586 * M2M DMA controller transactions complete normally. This is not always the
 587 * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel
 588 * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel
 589 * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation).
 590 * In effect, disabling the channel when only DONE bit is set could stop
 591 * currently running DMA transfer. To avoid this, we use Buffer FSM and
 592 * Control FSM to check current state of DMA channel.
 593 */
 594static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
 595{
 596        u32 status = readl(edmac->regs + M2M_STATUS);
 597        u32 ctl_fsm = status & M2M_STATUS_CTL_MASK;
 598        u32 buf_fsm = status & M2M_STATUS_BUF_MASK;
 599        bool done = status & M2M_STATUS_DONE;
 600        bool last_done;
 601        u32 control;
 602        struct ep93xx_dma_desc *desc;
 603
 604        /* Accept only DONE and NFB interrupts */
 605        if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK))
 606                return INTERRUPT_UNKNOWN;
 607
 608        if (done) {
 609                /* Clear the DONE bit */
 610                writel(0, edmac->regs + M2M_INTERRUPT);
 611        }
 612
 613        /*
 614         * Check whether we are done with descriptors or not. This, together
 615         * with DMA channel state, determines action to take in interrupt.
 616         */
 617        desc = ep93xx_dma_get_active(edmac);
 618        last_done = !desc || desc->txd.cookie;
 619
 620        /*
 621         * Use M2M DMA Buffer FSM and Control FSM to check current state of
 622         * DMA channel. Using DONE and NFB bits from channel status register
 623         * or bits from channel interrupt register is not reliable.
 624         */
 625        if (!last_done &&
 626            (buf_fsm == M2M_STATUS_BUF_NO ||
 627             buf_fsm == M2M_STATUS_BUF_ON)) {
 628                /*
 629                 * Two buffers are ready for update when Buffer FSM is in
 630                 * DMA_NO_BUF state. Only one buffer can be prepared without
 631                 * disabling the channel or polling the DONE bit.
 632                 * To simplify things, always prepare only one buffer.
 633                 */
 634                if (ep93xx_dma_advance_active(edmac)) {
 635                        m2m_fill_desc(edmac);
 636                        if (done && !edmac->chan.private) {
 637                                /* Software trigger for memcpy channel */
 638                                control = readl(edmac->regs + M2M_CONTROL);
 639                                control |= M2M_CONTROL_START;
 640                                writel(control, edmac->regs + M2M_CONTROL);
 641                        }
 642                        return INTERRUPT_NEXT_BUFFER;
 643                } else {
 644                        last_done = true;
 645                }
 646        }
 647
 648        /*
 649         * Disable the channel only when Buffer FSM is in DMA_NO_BUF state
 650         * and Control FSM is in DMA_STALL state.
 651         */
 652        if (last_done &&
 653            buf_fsm == M2M_STATUS_BUF_NO &&
 654            ctl_fsm == M2M_STATUS_CTL_STALL) {
 655                /* Disable interrupts and the channel */
 656                control = readl(edmac->regs + M2M_CONTROL);
 657                control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT
 658                            | M2M_CONTROL_ENABLE);
 659                writel(control, edmac->regs + M2M_CONTROL);
 660                return INTERRUPT_DONE;
 661        }
 662
 663        /*
 664         * Nothing to do this time.
 665         */
 666        return INTERRUPT_NEXT_BUFFER;
 667}
 668
 669/*
 670 * DMA engine API implementation
 671 */
 672
 673static struct ep93xx_dma_desc *
 674ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
 675{
 676        struct ep93xx_dma_desc *desc, *_desc;
 677        struct ep93xx_dma_desc *ret = NULL;
 678        unsigned long flags;
 679
 680        spin_lock_irqsave(&edmac->lock, flags);
 681        list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
 682                if (async_tx_test_ack(&desc->txd)) {
 683                        list_del_init(&desc->node);
 684
 685                        /* Re-initialize the descriptor */
 686                        desc->src_addr = 0;
 687                        desc->dst_addr = 0;
 688                        desc->size = 0;
 689                        desc->complete = false;
 690                        desc->txd.cookie = 0;
 691                        desc->txd.callback = NULL;
 692                        desc->txd.callback_param = NULL;
 693
 694                        ret = desc;
 695                        break;
 696                }
 697        }
 698        spin_unlock_irqrestore(&edmac->lock, flags);
 699        return ret;
 700}
 701
 702static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
 703                                struct ep93xx_dma_desc *desc)
 704{
 705        if (desc) {
 706                unsigned long flags;
 707
 708                spin_lock_irqsave(&edmac->lock, flags);
 709                list_splice_init(&desc->tx_list, &edmac->free_list);
 710                list_add(&desc->node, &edmac->free_list);
 711                spin_unlock_irqrestore(&edmac->lock, flags);
 712        }
 713}
 714
 715/**
 716 * ep93xx_dma_advance_work - start processing the next pending transaction
 717 * @edmac: channel
 718 *
 719 * If we have pending transactions queued and we are currently idling, this
 720 * function takes the next queued transaction from the @edmac->queue and
 721 * pushes it to the hardware for execution.
 722 */
 723static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
 724{
 725        struct ep93xx_dma_desc *new;
 726        unsigned long flags;
 727
 728        spin_lock_irqsave(&edmac->lock, flags);
 729        if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
 730                spin_unlock_irqrestore(&edmac->lock, flags);
 731                return;
 732        }
 733
 734        /* Take the next descriptor from the pending queue */
 735        new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
 736        list_del_init(&new->node);
 737
 738        ep93xx_dma_set_active(edmac, new);
 739
 740        /* Push it to the hardware */
 741        edmac->edma->hw_submit(edmac);
 742        spin_unlock_irqrestore(&edmac->lock, flags);
 743}
 744
 745static void ep93xx_dma_tasklet(unsigned long data)
 746{
 747        struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
 748        struct ep93xx_dma_desc *desc, *d;
 749        struct dmaengine_desc_callback cb;
 750        LIST_HEAD(list);
 751
 752        memset(&cb, 0, sizeof(cb));
 753        spin_lock_irq(&edmac->lock);
 754        /*
 755         * If dma_terminate_all() was called before we get to run, the active
 756         * list has become empty. If that happens we aren't supposed to do
 757         * anything more than call ep93xx_dma_advance_work().
 758         */
 759        desc = ep93xx_dma_get_active(edmac);
 760        if (desc) {
 761                if (desc->complete) {
 762                        /* mark descriptor complete for non cyclic case only */
 763                        if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
 764                                dma_cookie_complete(&desc->txd);
 765                        list_splice_init(&edmac->active, &list);
 766                }
 767                dmaengine_desc_get_callback(&desc->txd, &cb);
 768        }
 769        spin_unlock_irq(&edmac->lock);
 770
 771        /* Pick up the next descriptor from the queue */
 772        ep93xx_dma_advance_work(edmac);
 773
 774        /* Now we can release all the chained descriptors */
 775        list_for_each_entry_safe(desc, d, &list, node) {
 776                dma_descriptor_unmap(&desc->txd);
 777                ep93xx_dma_desc_put(edmac, desc);
 778        }
 779
 780        dmaengine_desc_callback_invoke(&cb, NULL);
 781}
 782
 783static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
 784{
 785        struct ep93xx_dma_chan *edmac = dev_id;
 786        struct ep93xx_dma_desc *desc;
 787        irqreturn_t ret = IRQ_HANDLED;
 788
 789        spin_lock(&edmac->lock);
 790
 791        desc = ep93xx_dma_get_active(edmac);
 792        if (!desc) {
 793                dev_warn(chan2dev(edmac),
 794                         "got interrupt while active list is empty\n");
 795                spin_unlock(&edmac->lock);
 796                return IRQ_NONE;
 797        }
 798
 799        switch (edmac->edma->hw_interrupt(edmac)) {
 800        case INTERRUPT_DONE:
 801                desc->complete = true;
 802                tasklet_schedule(&edmac->tasklet);
 803                break;
 804
 805        case INTERRUPT_NEXT_BUFFER:
 806                if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
 807                        tasklet_schedule(&edmac->tasklet);
 808                break;
 809
 810        default:
 811                dev_warn(chan2dev(edmac), "unknown interrupt!\n");
 812                ret = IRQ_NONE;
 813                break;
 814        }
 815
 816        spin_unlock(&edmac->lock);
 817        return ret;
 818}
 819
 820/**
 821 * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
 822 * @tx: descriptor to be executed
 823 *
 824 * Function will execute given descriptor on the hardware or if the hardware
 825 * is busy, queue the descriptor to be executed later on. Returns cookie which
 826 * can be used to poll the status of the descriptor.
 827 */
 828static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
 829{
 830        struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
 831        struct ep93xx_dma_desc *desc;
 832        dma_cookie_t cookie;
 833        unsigned long flags;
 834
 835        spin_lock_irqsave(&edmac->lock, flags);
 836        cookie = dma_cookie_assign(tx);
 837
 838        desc = container_of(tx, struct ep93xx_dma_desc, txd);
 839
 840        /*
 841         * If nothing is currently prosessed, we push this descriptor
 842         * directly to the hardware. Otherwise we put the descriptor
 843         * to the pending queue.
 844         */
 845        if (list_empty(&edmac->active)) {
 846                ep93xx_dma_set_active(edmac, desc);
 847                edmac->edma->hw_submit(edmac);
 848        } else {
 849                list_add_tail(&desc->node, &edmac->queue);
 850        }
 851
 852        spin_unlock_irqrestore(&edmac->lock, flags);
 853        return cookie;
 854}
 855
 856/**
 857 * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
 858 * @chan: channel to allocate resources
 859 *
 860 * Function allocates necessary resources for the given DMA channel and
 861 * returns number of allocated descriptors for the channel. Negative errno
 862 * is returned in case of failure.
 863 */
 864static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
 865{
 866        struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
 867        struct ep93xx_dma_data *data = chan->private;
 868        const char *name = dma_chan_name(chan);
 869        int ret, i;
 870
 871        /* Sanity check the channel parameters */
 872        if (!edmac->edma->m2m) {
 873                if (!data)
 874                        return -EINVAL;
 875                if (data->port < EP93XX_DMA_I2S1 ||
 876                    data->port > EP93XX_DMA_IRDA)
 877                        return -EINVAL;
 878                if (data->direction != ep93xx_dma_chan_direction(chan))
 879                        return -EINVAL;
 880        } else {
 881                if (data) {
 882                        switch (data->port) {
 883                        case EP93XX_DMA_SSP:
 884                        case EP93XX_DMA_IDE:
 885                                if (!is_slave_direction(data->direction))
 886                                        return -EINVAL;
 887                                break;
 888                        default:
 889                                return -EINVAL;
 890                        }
 891                }
 892        }
 893
 894        if (data && data->name)
 895                name = data->name;
 896
 897        ret = clk_enable(edmac->clk);
 898        if (ret)
 899                return ret;
 900
 901        ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
 902        if (ret)
 903                goto fail_clk_disable;
 904
 905        spin_lock_irq(&edmac->lock);
 906        dma_cookie_init(&edmac->chan);
 907        ret = edmac->edma->hw_setup(edmac);
 908        spin_unlock_irq(&edmac->lock);
 909
 910        if (ret)
 911                goto fail_free_irq;
 912
 913        for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
 914                struct ep93xx_dma_desc *desc;
 915
 916                desc = kzalloc(sizeof(*desc), GFP_KERNEL);
 917                if (!desc) {
 918                        dev_warn(chan2dev(edmac), "not enough descriptors\n");
 919                        break;
 920                }
 921
 922                INIT_LIST_HEAD(&desc->tx_list);
 923
 924                dma_async_tx_descriptor_init(&desc->txd, chan);
 925                desc->txd.flags = DMA_CTRL_ACK;
 926                desc->txd.tx_submit = ep93xx_dma_tx_submit;
 927
 928                ep93xx_dma_desc_put(edmac, desc);
 929        }
 930
 931        return i;
 932
 933fail_free_irq:
 934        free_irq(edmac->irq, edmac);
 935fail_clk_disable:
 936        clk_disable(edmac->clk);
 937
 938        return ret;
 939}
 940
 941/**
 942 * ep93xx_dma_free_chan_resources - release resources for the channel
 943 * @chan: channel
 944 *
 945 * Function releases all the resources allocated for the given channel.
 946 * The channel must be idle when this is called.
 947 */
 948static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
 949{
 950        struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
 951        struct ep93xx_dma_desc *desc, *d;
 952        unsigned long flags;
 953        LIST_HEAD(list);
 954
 955        BUG_ON(!list_empty(&edmac->active));
 956        BUG_ON(!list_empty(&edmac->queue));
 957
 958        spin_lock_irqsave(&edmac->lock, flags);
 959        edmac->edma->hw_shutdown(edmac);
 960        edmac->runtime_addr = 0;
 961        edmac->runtime_ctrl = 0;
 962        edmac->buffer = 0;
 963        list_splice_init(&edmac->free_list, &list);
 964        spin_unlock_irqrestore(&edmac->lock, flags);
 965
 966        list_for_each_entry_safe(desc, d, &list, node)
 967                kfree(desc);
 968
 969        clk_disable(edmac->clk);
 970        free_irq(edmac->irq, edmac);
 971}
 972
 973/**
 974 * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
 975 * @chan: channel
 976 * @dest: destination bus address
 977 * @src: source bus address
 978 * @len: size of the transaction
 979 * @flags: flags for the descriptor
 980 *
 981 * Returns a valid DMA descriptor or %NULL in case of failure.
 982 */
 983static struct dma_async_tx_descriptor *
 984ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
 985                           dma_addr_t src, size_t len, unsigned long flags)
 986{
 987        struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
 988        struct ep93xx_dma_desc *desc, *first;
 989        size_t bytes, offset;
 990
 991        first = NULL;
 992        for (offset = 0; offset < len; offset += bytes) {
 993                desc = ep93xx_dma_desc_get(edmac);
 994                if (!desc) {
 995                        dev_warn(chan2dev(edmac), "couln't get descriptor\n");
 996                        goto fail;
 997                }
 998
 999                bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
1000
1001                desc->src_addr = src + offset;
1002                desc->dst_addr = dest + offset;
1003                desc->size = bytes;
1004
1005                if (!first)
1006                        first = desc;
1007                else
1008                        list_add_tail(&desc->node, &first->tx_list);
1009        }
1010
1011        first->txd.cookie = -EBUSY;
1012        first->txd.flags = flags;
1013
1014        return &first->txd;
1015fail:
1016        ep93xx_dma_desc_put(edmac, first);
1017        return NULL;
1018}
1019
1020/**
1021 * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
1022 * @chan: channel
1023 * @sgl: list of buffers to transfer
1024 * @sg_len: number of entries in @sgl
1025 * @dir: direction of tha DMA transfer
1026 * @flags: flags for the descriptor
1027 * @context: operation context (ignored)
1028 *
1029 * Returns a valid DMA descriptor or %NULL in case of failure.
1030 */
1031static struct dma_async_tx_descriptor *
1032ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1033                         unsigned int sg_len, enum dma_transfer_direction dir,
1034                         unsigned long flags, void *context)
1035{
1036        struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1037        struct ep93xx_dma_desc *desc, *first;
1038        struct scatterlist *sg;
1039        int i;
1040
1041        if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1042                dev_warn(chan2dev(edmac),
1043                         "channel was configured with different direction\n");
1044                return NULL;
1045        }
1046
1047        if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1048                dev_warn(chan2dev(edmac),
1049                         "channel is already used for cyclic transfers\n");
1050                return NULL;
1051        }
1052
1053        first = NULL;
1054        for_each_sg(sgl, sg, sg_len, i) {
1055                size_t len = sg_dma_len(sg);
1056
1057                if (len > DMA_MAX_CHAN_BYTES) {
1058                        dev_warn(chan2dev(edmac), "too big transfer size %zu\n",
1059                                 len);
1060                        goto fail;
1061                }
1062
1063                desc = ep93xx_dma_desc_get(edmac);
1064                if (!desc) {
1065                        dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1066                        goto fail;
1067                }
1068
1069                if (dir == DMA_MEM_TO_DEV) {
1070                        desc->src_addr = sg_dma_address(sg);
1071                        desc->dst_addr = edmac->runtime_addr;
1072                } else {
1073                        desc->src_addr = edmac->runtime_addr;
1074                        desc->dst_addr = sg_dma_address(sg);
1075                }
1076                desc->size = len;
1077
1078                if (!first)
1079                        first = desc;
1080                else
1081                        list_add_tail(&desc->node, &first->tx_list);
1082        }
1083
1084        first->txd.cookie = -EBUSY;
1085        first->txd.flags = flags;
1086
1087        return &first->txd;
1088
1089fail:
1090        ep93xx_dma_desc_put(edmac, first);
1091        return NULL;
1092}
1093
1094/**
1095 * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
1096 * @chan: channel
1097 * @dma_addr: DMA mapped address of the buffer
1098 * @buf_len: length of the buffer (in bytes)
1099 * @period_len: length of a single period
1100 * @dir: direction of the operation
1101 * @flags: tx descriptor status flags
1102 *
1103 * Prepares a descriptor for cyclic DMA operation. This means that once the
1104 * descriptor is submitted, we will be submitting in a @period_len sized
1105 * buffers and calling callback once the period has been elapsed. Transfer
1106 * terminates only when client calls dmaengine_terminate_all() for this
1107 * channel.
1108 *
1109 * Returns a valid DMA descriptor or %NULL in case of failure.
1110 */
1111static struct dma_async_tx_descriptor *
1112ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
1113                           size_t buf_len, size_t period_len,
1114                           enum dma_transfer_direction dir, unsigned long flags)
1115{
1116        struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1117        struct ep93xx_dma_desc *desc, *first;
1118        size_t offset = 0;
1119
1120        if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1121                dev_warn(chan2dev(edmac),
1122                         "channel was configured with different direction\n");
1123                return NULL;
1124        }
1125
1126        if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1127                dev_warn(chan2dev(edmac),
1128                         "channel is already used for cyclic transfers\n");
1129                return NULL;
1130        }
1131
1132        if (period_len > DMA_MAX_CHAN_BYTES) {
1133                dev_warn(chan2dev(edmac), "too big period length %zu\n",
1134                         period_len);
1135                return NULL;
1136        }
1137
1138        /* Split the buffer into period size chunks */
1139        first = NULL;
1140        for (offset = 0; offset < buf_len; offset += period_len) {
1141                desc = ep93xx_dma_desc_get(edmac);
1142                if (!desc) {
1143                        dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1144                        goto fail;
1145                }
1146
1147                if (dir == DMA_MEM_TO_DEV) {
1148                        desc->src_addr = dma_addr + offset;
1149                        desc->dst_addr = edmac->runtime_addr;
1150                } else {
1151                        desc->src_addr = edmac->runtime_addr;
1152                        desc->dst_addr = dma_addr + offset;
1153                }
1154
1155                desc->size = period_len;
1156
1157                if (!first)
1158                        first = desc;
1159                else
1160                        list_add_tail(&desc->node, &first->tx_list);
1161        }
1162
1163        first->txd.cookie = -EBUSY;
1164
1165        return &first->txd;
1166
1167fail:
1168        ep93xx_dma_desc_put(edmac, first);
1169        return NULL;
1170}
1171
1172/**
1173 * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the
1174 * current context.
1175 * @chan: channel
1176 *
1177 * Synchronizes the DMA channel termination to the current context. When this
1178 * function returns it is guaranteed that all transfers for previously issued
1179 * descriptors have stopped and and it is safe to free the memory associated
1180 * with them. Furthermore it is guaranteed that all complete callback functions
1181 * for a previously submitted descriptor have finished running and it is safe to
1182 * free resources accessed from within the complete callbacks.
1183 */
1184static void ep93xx_dma_synchronize(struct dma_chan *chan)
1185{
1186        struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1187
1188        if (edmac->edma->hw_synchronize)
1189                edmac->edma->hw_synchronize(edmac);
1190}
1191
1192/**
1193 * ep93xx_dma_terminate_all - terminate all transactions
1194 * @chan: channel
1195 *
1196 * Stops all DMA transactions. All descriptors are put back to the
1197 * @edmac->free_list and callbacks are _not_ called.
1198 */
1199static int ep93xx_dma_terminate_all(struct dma_chan *chan)
1200{
1201        struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1202        struct ep93xx_dma_desc *desc, *_d;
1203        unsigned long flags;
1204        LIST_HEAD(list);
1205
1206        spin_lock_irqsave(&edmac->lock, flags);
1207        /* First we disable and flush the DMA channel */
1208        edmac->edma->hw_shutdown(edmac);
1209        clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
1210        list_splice_init(&edmac->active, &list);
1211        list_splice_init(&edmac->queue, &list);
1212        /*
1213         * We then re-enable the channel. This way we can continue submitting
1214         * the descriptors by just calling ->hw_submit() again.
1215         */
1216        edmac->edma->hw_setup(edmac);
1217        spin_unlock_irqrestore(&edmac->lock, flags);
1218
1219        list_for_each_entry_safe(desc, _d, &list, node)
1220                ep93xx_dma_desc_put(edmac, desc);
1221
1222        return 0;
1223}
1224
1225static int ep93xx_dma_slave_config(struct dma_chan *chan,
1226                                   struct dma_slave_config *config)
1227{
1228        struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1229        enum dma_slave_buswidth width;
1230        unsigned long flags;
1231        u32 addr, ctrl;
1232
1233        if (!edmac->edma->m2m)
1234                return -EINVAL;
1235
1236        switch (config->direction) {
1237        case DMA_DEV_TO_MEM:
1238                width = config->src_addr_width;
1239                addr = config->src_addr;
1240                break;
1241
1242        case DMA_MEM_TO_DEV:
1243                width = config->dst_addr_width;
1244                addr = config->dst_addr;
1245                break;
1246
1247        default:
1248                return -EINVAL;
1249        }
1250
1251        switch (width) {
1252        case DMA_SLAVE_BUSWIDTH_1_BYTE:
1253                ctrl = 0;
1254                break;
1255        case DMA_SLAVE_BUSWIDTH_2_BYTES:
1256                ctrl = M2M_CONTROL_PW_16;
1257                break;
1258        case DMA_SLAVE_BUSWIDTH_4_BYTES:
1259                ctrl = M2M_CONTROL_PW_32;
1260                break;
1261        default:
1262                return -EINVAL;
1263        }
1264
1265        spin_lock_irqsave(&edmac->lock, flags);
1266        edmac->runtime_addr = addr;
1267        edmac->runtime_ctrl = ctrl;
1268        spin_unlock_irqrestore(&edmac->lock, flags);
1269
1270        return 0;
1271}
1272
1273/**
1274 * ep93xx_dma_tx_status - check if a transaction is completed
1275 * @chan: channel
1276 * @cookie: transaction specific cookie
1277 * @state: state of the transaction is stored here if given
1278 *
1279 * This function can be used to query state of a given transaction.
1280 */
1281static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
1282                                            dma_cookie_t cookie,
1283                                            struct dma_tx_state *state)
1284{
1285        return dma_cookie_status(chan, cookie, state);
1286}
1287
1288/**
1289 * ep93xx_dma_issue_pending - push pending transactions to the hardware
1290 * @chan: channel
1291 *
1292 * When this function is called, all pending transactions are pushed to the
1293 * hardware and executed.
1294 */
1295static void ep93xx_dma_issue_pending(struct dma_chan *chan)
1296{
1297        ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
1298}
1299
1300static int __init ep93xx_dma_probe(struct platform_device *pdev)
1301{
1302        struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1303        struct ep93xx_dma_engine *edma;
1304        struct dma_device *dma_dev;
1305        size_t edma_size;
1306        int ret, i;
1307
1308        edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
1309        edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
1310        if (!edma)
1311                return -ENOMEM;
1312
1313        dma_dev = &edma->dma_dev;
1314        edma->m2m = platform_get_device_id(pdev)->driver_data;
1315        edma->num_channels = pdata->num_channels;
1316
1317        INIT_LIST_HEAD(&dma_dev->channels);
1318        for (i = 0; i < pdata->num_channels; i++) {
1319                const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
1320                struct ep93xx_dma_chan *edmac = &edma->channels[i];
1321
1322                edmac->chan.device = dma_dev;
1323                edmac->regs = cdata->base;
1324                edmac->irq = cdata->irq;
1325                edmac->edma = edma;
1326
1327                edmac->clk = clk_get(NULL, cdata->name);
1328                if (IS_ERR(edmac->clk)) {
1329                        dev_warn(&pdev->dev, "failed to get clock for %s\n",
1330                                 cdata->name);
1331                        continue;
1332                }
1333
1334                spin_lock_init(&edmac->lock);
1335                INIT_LIST_HEAD(&edmac->active);
1336                INIT_LIST_HEAD(&edmac->queue);
1337                INIT_LIST_HEAD(&edmac->free_list);
1338                tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet,
1339                             (unsigned long)edmac);
1340
1341                list_add_tail(&edmac->chan.device_node,
1342                              &dma_dev->channels);
1343        }
1344
1345        dma_cap_zero(dma_dev->cap_mask);
1346        dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1347        dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
1348
1349        dma_dev->dev = &pdev->dev;
1350        dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
1351        dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
1352        dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
1353        dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
1354        dma_dev->device_config = ep93xx_dma_slave_config;
1355        dma_dev->device_synchronize = ep93xx_dma_synchronize;
1356        dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
1357        dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
1358        dma_dev->device_tx_status = ep93xx_dma_tx_status;
1359
1360        dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
1361
1362        if (edma->m2m) {
1363                dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1364                dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
1365
1366                edma->hw_setup = m2m_hw_setup;
1367                edma->hw_shutdown = m2m_hw_shutdown;
1368                edma->hw_submit = m2m_hw_submit;
1369                edma->hw_interrupt = m2m_hw_interrupt;
1370        } else {
1371                dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1372
1373                edma->hw_synchronize = m2p_hw_synchronize;
1374                edma->hw_setup = m2p_hw_setup;
1375                edma->hw_shutdown = m2p_hw_shutdown;
1376                edma->hw_submit = m2p_hw_submit;
1377                edma->hw_interrupt = m2p_hw_interrupt;
1378        }
1379
1380        ret = dma_async_device_register(dma_dev);
1381        if (unlikely(ret)) {
1382                for (i = 0; i < edma->num_channels; i++) {
1383                        struct ep93xx_dma_chan *edmac = &edma->channels[i];
1384                        if (!IS_ERR_OR_NULL(edmac->clk))
1385                                clk_put(edmac->clk);
1386                }
1387                kfree(edma);
1388        } else {
1389                dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
1390                         edma->m2m ? "M" : "P");
1391        }
1392
1393        return ret;
1394}
1395
1396static const struct platform_device_id ep93xx_dma_driver_ids[] = {
1397        { "ep93xx-dma-m2p", 0 },
1398        { "ep93xx-dma-m2m", 1 },
1399        { },
1400};
1401
1402static struct platform_driver ep93xx_dma_driver = {
1403        .driver         = {
1404                .name   = "ep93xx-dma",
1405        },
1406        .id_table       = ep93xx_dma_driver_ids,
1407};
1408
1409static int __init ep93xx_dma_module_init(void)
1410{
1411        return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
1412}
1413subsys_initcall(ep93xx_dma_module_init);
1414
1415MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
1416MODULE_DESCRIPTION("EP93xx DMA driver");
1417MODULE_LICENSE("GPL");
1418