linux/drivers/dma/sirf-dma.c
<<
>>
Prefs
   1/*
   2 * DMA controller driver for CSR SiRFprimaII
   3 *
   4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
   5 *
   6 * Licensed under GPLv2 or later.
   7 */
   8
   9#include <linux/module.h>
  10#include <linux/dmaengine.h>
  11#include <linux/dma-mapping.h>
  12#include <linux/pm_runtime.h>
  13#include <linux/interrupt.h>
  14#include <linux/io.h>
  15#include <linux/slab.h>
  16#include <linux/of_irq.h>
  17#include <linux/of_address.h>
  18#include <linux/of_device.h>
  19#include <linux/of_platform.h>
  20#include <linux/clk.h>
  21#include <linux/of_dma.h>
  22#include <linux/sirfsoc_dma.h>
  23
  24#include "dmaengine.h"
  25
  26#define SIRFSOC_DMA_VER_A7V1                    1
  27#define SIRFSOC_DMA_VER_A7V2                    2
  28#define SIRFSOC_DMA_VER_A6                      4
  29
  30#define SIRFSOC_DMA_DESCRIPTORS                 16
  31#define SIRFSOC_DMA_CHANNELS                    16
  32#define SIRFSOC_DMA_TABLE_NUM                   256
  33
  34#define SIRFSOC_DMA_CH_ADDR                     0x00
  35#define SIRFSOC_DMA_CH_XLEN                     0x04
  36#define SIRFSOC_DMA_CH_YLEN                     0x08
  37#define SIRFSOC_DMA_CH_CTRL                     0x0C
  38
  39#define SIRFSOC_DMA_WIDTH_0                     0x100
  40#define SIRFSOC_DMA_CH_VALID                    0x140
  41#define SIRFSOC_DMA_CH_INT                      0x144
  42#define SIRFSOC_DMA_INT_EN                      0x148
  43#define SIRFSOC_DMA_INT_EN_CLR                  0x14C
  44#define SIRFSOC_DMA_CH_LOOP_CTRL                0x150
  45#define SIRFSOC_DMA_CH_LOOP_CTRL_CLR            0x154
  46#define SIRFSOC_DMA_WIDTH_ATLAS7                0x10
  47#define SIRFSOC_DMA_VALID_ATLAS7                0x14
  48#define SIRFSOC_DMA_INT_ATLAS7                  0x18
  49#define SIRFSOC_DMA_INT_EN_ATLAS7               0x1c
  50#define SIRFSOC_DMA_LOOP_CTRL_ATLAS7            0x20
  51#define SIRFSOC_DMA_CUR_DATA_ADDR               0x34
  52#define SIRFSOC_DMA_MUL_ATLAS7                  0x38
  53#define SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7         0x158
  54#define SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7     0x15C
  55#define SIRFSOC_DMA_IOBG_SCMD_EN                0x800
  56#define SIRFSOC_DMA_EARLY_RESP_SET              0x818
  57#define SIRFSOC_DMA_EARLY_RESP_CLR              0x81C
  58
  59#define SIRFSOC_DMA_MODE_CTRL_BIT               4
  60#define SIRFSOC_DMA_DIR_CTRL_BIT                5
  61#define SIRFSOC_DMA_MODE_CTRL_BIT_ATLAS7        2
  62#define SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7       3
  63#define SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7         4
  64#define SIRFSOC_DMA_TAB_NUM_ATLAS7              7
  65#define SIRFSOC_DMA_CHAIN_INT_BIT_ATLAS7        5
  66#define SIRFSOC_DMA_CHAIN_FLAG_SHIFT_ATLAS7     25
  67#define SIRFSOC_DMA_CHAIN_ADDR_SHIFT            32
  68
  69#define SIRFSOC_DMA_INT_FINI_INT_ATLAS7         BIT(0)
  70#define SIRFSOC_DMA_INT_CNT_INT_ATLAS7          BIT(1)
  71#define SIRFSOC_DMA_INT_PAU_INT_ATLAS7          BIT(2)
  72#define SIRFSOC_DMA_INT_LOOP_INT_ATLAS7         BIT(3)
  73#define SIRFSOC_DMA_INT_INV_INT_ATLAS7          BIT(4)
  74#define SIRFSOC_DMA_INT_END_INT_ATLAS7          BIT(5)
  75#define SIRFSOC_DMA_INT_ALL_ATLAS7              0x3F
  76
  77/* xlen and dma_width register is in 4 bytes boundary */
  78#define SIRFSOC_DMA_WORD_LEN                    4
  79#define SIRFSOC_DMA_XLEN_MAX_V1         0x800
  80#define SIRFSOC_DMA_XLEN_MAX_V2         0x1000
  81
  82struct sirfsoc_dma_desc {
  83        struct dma_async_tx_descriptor  desc;
  84        struct list_head                node;
  85
  86        /* SiRFprimaII 2D-DMA parameters */
  87
  88        int             xlen;           /* DMA xlen */
  89        int             ylen;           /* DMA ylen */
  90        int             width;          /* DMA width */
  91        int             dir;
  92        bool            cyclic;         /* is loop DMA? */
  93        bool            chain;          /* is chain DMA? */
  94        u32             addr;           /* DMA buffer address */
  95        u64 chain_table[SIRFSOC_DMA_TABLE_NUM]; /* chain tbl */
  96};
  97
  98struct sirfsoc_dma_chan {
  99        struct dma_chan                 chan;
 100        struct list_head                free;
 101        struct list_head                prepared;
 102        struct list_head                queued;
 103        struct list_head                active;
 104        struct list_head                completed;
 105        unsigned long                   happened_cyclic;
 106        unsigned long                   completed_cyclic;
 107
 108        /* Lock for this structure */
 109        spinlock_t                      lock;
 110
 111        int                             mode;
 112};
 113
 114struct sirfsoc_dma_regs {
 115        u32                             ctrl[SIRFSOC_DMA_CHANNELS];
 116        u32                             interrupt_en;
 117};
 118
 119struct sirfsoc_dma {
 120        struct dma_device               dma;
 121        struct tasklet_struct           tasklet;
 122        struct sirfsoc_dma_chan         channels[SIRFSOC_DMA_CHANNELS];
 123        void __iomem                    *base;
 124        int                             irq;
 125        struct clk                      *clk;
 126        int                             type;
 127        void (*exec_desc)(struct sirfsoc_dma_desc *sdesc,
 128                int cid, int burst_mode, void __iomem *base);
 129        struct sirfsoc_dma_regs         regs_save;
 130};
 131
 132struct sirfsoc_dmadata {
 133        void (*exec)(struct sirfsoc_dma_desc *sdesc,
 134                int cid, int burst_mode, void __iomem *base);
 135        int type;
 136};
 137
 138enum sirfsoc_dma_chain_flag {
 139        SIRFSOC_DMA_CHAIN_NORMAL = 0x01,
 140        SIRFSOC_DMA_CHAIN_PAUSE = 0x02,
 141        SIRFSOC_DMA_CHAIN_LOOP = 0x03,
 142        SIRFSOC_DMA_CHAIN_END = 0x04
 143};
 144
 145#define DRV_NAME        "sirfsoc_dma"
 146
 147static int sirfsoc_dma_runtime_suspend(struct device *dev);
 148
 149/* Convert struct dma_chan to struct sirfsoc_dma_chan */
 150static inline
 151struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c)
 152{
 153        return container_of(c, struct sirfsoc_dma_chan, chan);
 154}
 155
 156/* Convert struct dma_chan to struct sirfsoc_dma */
 157static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c)
 158{
 159        struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c);
 160        return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]);
 161}
 162
 163static void sirfsoc_dma_execute_hw_a7v2(struct sirfsoc_dma_desc *sdesc,
 164                int cid, int burst_mode, void __iomem *base)
 165{
 166        if (sdesc->chain) {
 167                /* DMA v2 HW chain mode */
 168                writel_relaxed((sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7) |
 169                               (sdesc->chain <<
 170                                SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7) |
 171                               (0x8 << SIRFSOC_DMA_TAB_NUM_ATLAS7) | 0x3,
 172                               base + SIRFSOC_DMA_CH_CTRL);
 173        } else {
 174                /* DMA v2 legacy mode */
 175                writel_relaxed(sdesc->xlen, base + SIRFSOC_DMA_CH_XLEN);
 176                writel_relaxed(sdesc->ylen, base + SIRFSOC_DMA_CH_YLEN);
 177                writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_ATLAS7);
 178                writel_relaxed((sdesc->width*((sdesc->ylen+1)>>1)),
 179                                base + SIRFSOC_DMA_MUL_ATLAS7);
 180                writel_relaxed((sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7) |
 181                               (sdesc->chain <<
 182                                SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7) |
 183                               0x3, base + SIRFSOC_DMA_CH_CTRL);
 184        }
 185        writel_relaxed(sdesc->chain ? SIRFSOC_DMA_INT_END_INT_ATLAS7 :
 186                       (SIRFSOC_DMA_INT_FINI_INT_ATLAS7 |
 187                        SIRFSOC_DMA_INT_LOOP_INT_ATLAS7),
 188                       base + SIRFSOC_DMA_INT_EN_ATLAS7);
 189        writel(sdesc->addr, base + SIRFSOC_DMA_CH_ADDR);
 190        if (sdesc->cyclic)
 191                writel(0x10001, base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7);
 192}
 193
 194static void sirfsoc_dma_execute_hw_a7v1(struct sirfsoc_dma_desc *sdesc,
 195                int cid, int burst_mode, void __iomem *base)
 196{
 197        writel_relaxed(1, base + SIRFSOC_DMA_IOBG_SCMD_EN);
 198        writel_relaxed((1 << cid), base + SIRFSOC_DMA_EARLY_RESP_SET);
 199        writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_0 + cid * 4);
 200        writel_relaxed(cid | (burst_mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
 201                       (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT),
 202                       base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
 203        writel_relaxed(sdesc->xlen, base + cid * 0x10 + SIRFSOC_DMA_CH_XLEN);
 204        writel_relaxed(sdesc->ylen, base + cid * 0x10 + SIRFSOC_DMA_CH_YLEN);
 205        writel_relaxed(readl_relaxed(base + SIRFSOC_DMA_INT_EN) |
 206                       (1 << cid), base + SIRFSOC_DMA_INT_EN);
 207        writel(sdesc->addr >> 2, base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
 208        if (sdesc->cyclic) {
 209                writel((1 << cid) | 1 << (cid + 16) |
 210                       readl_relaxed(base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7),
 211                       base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7);
 212        }
 213
 214}
 215
 216static void sirfsoc_dma_execute_hw_a6(struct sirfsoc_dma_desc *sdesc,
 217                int cid, int burst_mode, void __iomem *base)
 218{
 219        writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_0 + cid * 4);
 220        writel_relaxed(cid | (burst_mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
 221                       (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT),
 222                       base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
 223        writel_relaxed(sdesc->xlen, base + cid * 0x10 + SIRFSOC_DMA_CH_XLEN);
 224        writel_relaxed(sdesc->ylen, base + cid * 0x10 + SIRFSOC_DMA_CH_YLEN);
 225        writel_relaxed(readl_relaxed(base + SIRFSOC_DMA_INT_EN) |
 226                       (1 << cid), base + SIRFSOC_DMA_INT_EN);
 227        writel(sdesc->addr >> 2, base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
 228        if (sdesc->cyclic) {
 229                writel((1 << cid) | 1 << (cid + 16) |
 230                       readl_relaxed(base + SIRFSOC_DMA_CH_LOOP_CTRL),
 231                       base + SIRFSOC_DMA_CH_LOOP_CTRL);
 232        }
 233
 234}
 235
 236/* Execute all queued DMA descriptors */
 237static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
 238{
 239        struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
 240        int cid = schan->chan.chan_id;
 241        struct sirfsoc_dma_desc *sdesc = NULL;
 242        void __iomem *base;
 243
 244        /*
 245         * lock has been held by functions calling this, so we don't hold
 246         * lock again
 247         */
 248        base = sdma->base;
 249        sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc,
 250                                 node);
 251        /* Move the first queued descriptor to active list */
 252        list_move_tail(&sdesc->node, &schan->active);
 253
 254        if (sdma->type == SIRFSOC_DMA_VER_A7V2)
 255                cid = 0;
 256
 257        /* Start the DMA transfer */
 258        sdma->exec_desc(sdesc, cid, schan->mode, base);
 259
 260        if (sdesc->cyclic)
 261                schan->happened_cyclic = schan->completed_cyclic = 0;
 262}
 263
 264/* Interrupt handler */
 265static irqreturn_t sirfsoc_dma_irq(int irq, void *data)
 266{
 267        struct sirfsoc_dma *sdma = data;
 268        struct sirfsoc_dma_chan *schan;
 269        struct sirfsoc_dma_desc *sdesc = NULL;
 270        u32 is;
 271        bool chain;
 272        int ch;
 273        void __iomem *reg;
 274
 275        switch (sdma->type) {
 276        case SIRFSOC_DMA_VER_A6:
 277        case SIRFSOC_DMA_VER_A7V1:
 278                is = readl(sdma->base + SIRFSOC_DMA_CH_INT);
 279                reg = sdma->base + SIRFSOC_DMA_CH_INT;
 280                while ((ch = fls(is) - 1) >= 0) {
 281                        is &= ~(1 << ch);
 282                        writel_relaxed(1 << ch, reg);
 283                        schan = &sdma->channels[ch];
 284                        spin_lock(&schan->lock);
 285                        sdesc = list_first_entry(&schan->active,
 286                                                 struct sirfsoc_dma_desc, node);
 287                        if (!sdesc->cyclic) {
 288                                /* Execute queued descriptors */
 289                                list_splice_tail_init(&schan->active,
 290                                                      &schan->completed);
 291                                dma_cookie_complete(&sdesc->desc);
 292                                if (!list_empty(&schan->queued))
 293                                        sirfsoc_dma_execute(schan);
 294                        } else
 295                                schan->happened_cyclic++;
 296                        spin_unlock(&schan->lock);
 297                }
 298                break;
 299
 300        case SIRFSOC_DMA_VER_A7V2:
 301                is = readl(sdma->base + SIRFSOC_DMA_INT_ATLAS7);
 302
 303                reg = sdma->base + SIRFSOC_DMA_INT_ATLAS7;
 304                writel_relaxed(SIRFSOC_DMA_INT_ALL_ATLAS7, reg);
 305                schan = &sdma->channels[0];
 306                spin_lock(&schan->lock);
 307                sdesc = list_first_entry(&schan->active,
 308                                         struct sirfsoc_dma_desc, node);
 309                if (!sdesc->cyclic) {
 310                        chain = sdesc->chain;
 311                        if ((chain && (is & SIRFSOC_DMA_INT_END_INT_ATLAS7)) ||
 312                                (!chain &&
 313                                (is & SIRFSOC_DMA_INT_FINI_INT_ATLAS7))) {
 314                                /* Execute queued descriptors */
 315                                list_splice_tail_init(&schan->active,
 316                                                      &schan->completed);
 317                                dma_cookie_complete(&sdesc->desc);
 318                                if (!list_empty(&schan->queued))
 319                                        sirfsoc_dma_execute(schan);
 320                        }
 321                } else if (sdesc->cyclic && (is &
 322                                        SIRFSOC_DMA_INT_LOOP_INT_ATLAS7))
 323                        schan->happened_cyclic++;
 324
 325                spin_unlock(&schan->lock);
 326                break;
 327
 328        default:
 329                break;
 330        }
 331
 332        /* Schedule tasklet */
 333        tasklet_schedule(&sdma->tasklet);
 334
 335        return IRQ_HANDLED;
 336}
 337
 338/* process completed descriptors */
 339static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
 340{
 341        dma_cookie_t last_cookie = 0;
 342        struct sirfsoc_dma_chan *schan;
 343        struct sirfsoc_dma_desc *sdesc;
 344        struct dma_async_tx_descriptor *desc;
 345        unsigned long flags;
 346        unsigned long happened_cyclic;
 347        LIST_HEAD(list);
 348        int i;
 349
 350        for (i = 0; i < sdma->dma.chancnt; i++) {
 351                schan = &sdma->channels[i];
 352
 353                /* Get all completed descriptors */
 354                spin_lock_irqsave(&schan->lock, flags);
 355                if (!list_empty(&schan->completed)) {
 356                        list_splice_tail_init(&schan->completed, &list);
 357                        spin_unlock_irqrestore(&schan->lock, flags);
 358
 359                        /* Execute callbacks and run dependencies */
 360                        list_for_each_entry(sdesc, &list, node) {
 361                                desc = &sdesc->desc;
 362
 363                                dmaengine_desc_get_callback_invoke(desc, NULL);
 364                                last_cookie = desc->cookie;
 365                                dma_run_dependencies(desc);
 366                        }
 367
 368                        /* Free descriptors */
 369                        spin_lock_irqsave(&schan->lock, flags);
 370                        list_splice_tail_init(&list, &schan->free);
 371                        schan->chan.completed_cookie = last_cookie;
 372                        spin_unlock_irqrestore(&schan->lock, flags);
 373                } else {
 374                        if (list_empty(&schan->active)) {
 375                                spin_unlock_irqrestore(&schan->lock, flags);
 376                                continue;
 377                        }
 378
 379                        /* for cyclic channel, desc is always in active list */
 380                        sdesc = list_first_entry(&schan->active,
 381                                struct sirfsoc_dma_desc, node);
 382
 383                        /* cyclic DMA */
 384                        happened_cyclic = schan->happened_cyclic;
 385                        spin_unlock_irqrestore(&schan->lock, flags);
 386
 387                        desc = &sdesc->desc;
 388                        while (happened_cyclic != schan->completed_cyclic) {
 389                                dmaengine_desc_get_callback_invoke(desc, NULL);
 390                                schan->completed_cyclic++;
 391                        }
 392                }
 393        }
 394}
 395
 396/* DMA Tasklet */
 397static void sirfsoc_dma_tasklet(unsigned long data)
 398{
 399        struct sirfsoc_dma *sdma = (void *)data;
 400
 401        sirfsoc_dma_process_completed(sdma);
 402}
 403
 404/* Submit descriptor to hardware */
 405static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
 406{
 407        struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan);
 408        struct sirfsoc_dma_desc *sdesc;
 409        unsigned long flags;
 410        dma_cookie_t cookie;
 411
 412        sdesc = container_of(txd, struct sirfsoc_dma_desc, desc);
 413
 414        spin_lock_irqsave(&schan->lock, flags);
 415
 416        /* Move descriptor to queue */
 417        list_move_tail(&sdesc->node, &schan->queued);
 418
 419        cookie = dma_cookie_assign(txd);
 420
 421        spin_unlock_irqrestore(&schan->lock, flags);
 422
 423        return cookie;
 424}
 425
 426static int sirfsoc_dma_slave_config(struct dma_chan *chan,
 427                                    struct dma_slave_config *config)
 428{
 429        struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
 430        unsigned long flags;
 431
 432        if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
 433                (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES))
 434                return -EINVAL;
 435
 436        spin_lock_irqsave(&schan->lock, flags);
 437        schan->mode = (config->src_maxburst == 4 ? 1 : 0);
 438        spin_unlock_irqrestore(&schan->lock, flags);
 439
 440        return 0;
 441}
 442
 443static int sirfsoc_dma_terminate_all(struct dma_chan *chan)
 444{
 445        struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
 446        struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
 447        int cid = schan->chan.chan_id;
 448        unsigned long flags;
 449
 450        spin_lock_irqsave(&schan->lock, flags);
 451
 452        switch (sdma->type) {
 453        case SIRFSOC_DMA_VER_A7V1:
 454                writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_INT_EN_CLR);
 455                writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_INT);
 456                writel_relaxed((1 << cid) | 1 << (cid + 16),
 457                               sdma->base +
 458                               SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7);
 459                writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
 460                break;
 461        case SIRFSOC_DMA_VER_A7V2:
 462                writel_relaxed(0, sdma->base + SIRFSOC_DMA_INT_EN_ATLAS7);
 463                writel_relaxed(SIRFSOC_DMA_INT_ALL_ATLAS7,
 464                               sdma->base + SIRFSOC_DMA_INT_ATLAS7);
 465                writel_relaxed(0, sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7);
 466                writel_relaxed(0, sdma->base + SIRFSOC_DMA_VALID_ATLAS7);
 467                break;
 468        case SIRFSOC_DMA_VER_A6:
 469                writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) &
 470                               ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
 471                writel_relaxed(readl_relaxed(sdma->base +
 472                                             SIRFSOC_DMA_CH_LOOP_CTRL) &
 473                               ~((1 << cid) | 1 << (cid + 16)),
 474                               sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
 475                writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
 476                break;
 477        default:
 478                break;
 479        }
 480
 481        list_splice_tail_init(&schan->active, &schan->free);
 482        list_splice_tail_init(&schan->queued, &schan->free);
 483
 484        spin_unlock_irqrestore(&schan->lock, flags);
 485
 486        return 0;
 487}
 488
 489static int sirfsoc_dma_pause_chan(struct dma_chan *chan)
 490{
 491        struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
 492        struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
 493        int cid = schan->chan.chan_id;
 494        unsigned long flags;
 495
 496        spin_lock_irqsave(&schan->lock, flags);
 497
 498        switch (sdma->type) {
 499        case SIRFSOC_DMA_VER_A7V1:
 500                writel_relaxed((1 << cid) | 1 << (cid + 16),
 501                               sdma->base +
 502                               SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7);
 503                break;
 504        case SIRFSOC_DMA_VER_A7V2:
 505                writel_relaxed(0, sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7);
 506                break;
 507        case SIRFSOC_DMA_VER_A6:
 508                writel_relaxed(readl_relaxed(sdma->base +
 509                                             SIRFSOC_DMA_CH_LOOP_CTRL) &
 510                               ~((1 << cid) | 1 << (cid + 16)),
 511                               sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
 512                break;
 513
 514        default:
 515                break;
 516        }
 517
 518        spin_unlock_irqrestore(&schan->lock, flags);
 519
 520        return 0;
 521}
 522
 523static int sirfsoc_dma_resume_chan(struct dma_chan *chan)
 524{
 525        struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
 526        struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
 527        int cid = schan->chan.chan_id;
 528        unsigned long flags;
 529
 530        spin_lock_irqsave(&schan->lock, flags);
 531        switch (sdma->type) {
 532        case SIRFSOC_DMA_VER_A7V1:
 533                writel_relaxed((1 << cid) | 1 << (cid + 16),
 534                               sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7);
 535                break;
 536        case SIRFSOC_DMA_VER_A7V2:
 537                writel_relaxed(0x10001,
 538                               sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7);
 539                break;
 540        case SIRFSOC_DMA_VER_A6:
 541                writel_relaxed(readl_relaxed(sdma->base +
 542                                             SIRFSOC_DMA_CH_LOOP_CTRL) |
 543                               ((1 << cid) | 1 << (cid + 16)),
 544                               sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
 545                break;
 546
 547        default:
 548                break;
 549        }
 550
 551        spin_unlock_irqrestore(&schan->lock, flags);
 552
 553        return 0;
 554}
 555
 556/* Alloc channel resources */
 557static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
 558{
 559        struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
 560        struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
 561        struct sirfsoc_dma_desc *sdesc;
 562        unsigned long flags;
 563        LIST_HEAD(descs);
 564        int i;
 565
 566        pm_runtime_get_sync(sdma->dma.dev);
 567
 568        /* Alloc descriptors for this channel */
 569        for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
 570                sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL);
 571                if (!sdesc) {
 572                        dev_notice(sdma->dma.dev, "Memory allocation error. "
 573                                "Allocated only %u descriptors\n", i);
 574                        break;
 575                }
 576
 577                dma_async_tx_descriptor_init(&sdesc->desc, chan);
 578                sdesc->desc.flags = DMA_CTRL_ACK;
 579                sdesc->desc.tx_submit = sirfsoc_dma_tx_submit;
 580
 581                list_add_tail(&sdesc->node, &descs);
 582        }
 583
 584        /* Return error only if no descriptors were allocated */
 585        if (i == 0)
 586                return -ENOMEM;
 587
 588        spin_lock_irqsave(&schan->lock, flags);
 589
 590        list_splice_tail_init(&descs, &schan->free);
 591        spin_unlock_irqrestore(&schan->lock, flags);
 592
 593        return i;
 594}
 595
 596/* Free channel resources */
 597static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
 598{
 599        struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
 600        struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
 601        struct sirfsoc_dma_desc *sdesc, *tmp;
 602        unsigned long flags;
 603        LIST_HEAD(descs);
 604
 605        spin_lock_irqsave(&schan->lock, flags);
 606
 607        /* Channel must be idle */
 608        BUG_ON(!list_empty(&schan->prepared));
 609        BUG_ON(!list_empty(&schan->queued));
 610        BUG_ON(!list_empty(&schan->active));
 611        BUG_ON(!list_empty(&schan->completed));
 612
 613        /* Move data */
 614        list_splice_tail_init(&schan->free, &descs);
 615
 616        spin_unlock_irqrestore(&schan->lock, flags);
 617
 618        /* Free descriptors */
 619        list_for_each_entry_safe(sdesc, tmp, &descs, node)
 620                kfree(sdesc);
 621
 622        pm_runtime_put(sdma->dma.dev);
 623}
 624
 625/* Send pending descriptor to hardware */
 626static void sirfsoc_dma_issue_pending(struct dma_chan *chan)
 627{
 628        struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
 629        unsigned long flags;
 630
 631        spin_lock_irqsave(&schan->lock, flags);
 632
 633        if (list_empty(&schan->active) && !list_empty(&schan->queued))
 634                sirfsoc_dma_execute(schan);
 635
 636        spin_unlock_irqrestore(&schan->lock, flags);
 637}
 638
 639/* Check request completion status */
 640static enum dma_status
 641sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
 642        struct dma_tx_state *txstate)
 643{
 644        struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
 645        struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
 646        unsigned long flags;
 647        enum dma_status ret;
 648        struct sirfsoc_dma_desc *sdesc;
 649        int cid = schan->chan.chan_id;
 650        unsigned long dma_pos;
 651        unsigned long dma_request_bytes;
 652        unsigned long residue;
 653
 654        spin_lock_irqsave(&schan->lock, flags);
 655
 656        if (list_empty(&schan->active)) {
 657                ret = dma_cookie_status(chan, cookie, txstate);
 658                dma_set_residue(txstate, 0);
 659                spin_unlock_irqrestore(&schan->lock, flags);
 660                return ret;
 661        }
 662        sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, node);
 663        if (sdesc->cyclic)
 664                dma_request_bytes = (sdesc->xlen + 1) * (sdesc->ylen + 1) *
 665                        (sdesc->width * SIRFSOC_DMA_WORD_LEN);
 666        else
 667                dma_request_bytes = sdesc->xlen * SIRFSOC_DMA_WORD_LEN;
 668
 669        ret = dma_cookie_status(chan, cookie, txstate);
 670
 671        if (sdma->type == SIRFSOC_DMA_VER_A7V2)
 672                cid = 0;
 673
 674        if (sdma->type == SIRFSOC_DMA_VER_A7V2) {
 675                dma_pos = readl_relaxed(sdma->base + SIRFSOC_DMA_CUR_DATA_ADDR);
 676        } else {
 677                dma_pos = readl_relaxed(
 678                        sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR) << 2;
 679        }
 680
 681        residue = dma_request_bytes - (dma_pos - sdesc->addr);
 682        dma_set_residue(txstate, residue);
 683
 684        spin_unlock_irqrestore(&schan->lock, flags);
 685
 686        return ret;
 687}
 688
 689static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved(
 690        struct dma_chan *chan, struct dma_interleaved_template *xt,
 691        unsigned long flags)
 692{
 693        struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
 694        struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
 695        struct sirfsoc_dma_desc *sdesc = NULL;
 696        unsigned long iflags;
 697        int ret;
 698
 699        if ((xt->dir != DMA_MEM_TO_DEV) && (xt->dir != DMA_DEV_TO_MEM)) {
 700                ret = -EINVAL;
 701                goto err_dir;
 702        }
 703
 704        /* Get free descriptor */
 705        spin_lock_irqsave(&schan->lock, iflags);
 706        if (!list_empty(&schan->free)) {
 707                sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
 708                        node);
 709                list_del(&sdesc->node);
 710        }
 711        spin_unlock_irqrestore(&schan->lock, iflags);
 712
 713        if (!sdesc) {
 714                /* try to free completed descriptors */
 715                sirfsoc_dma_process_completed(sdma);
 716                ret = 0;
 717                goto no_desc;
 718        }
 719
 720        /* Place descriptor in prepared list */
 721        spin_lock_irqsave(&schan->lock, iflags);
 722
 723        /*
 724         * Number of chunks in a frame can only be 1 for prima2
 725         * and ylen (number of frame - 1) must be at least 0
 726         */
 727        if ((xt->frame_size == 1) && (xt->numf > 0)) {
 728                sdesc->cyclic = 0;
 729                sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN;
 730                sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) /
 731                                SIRFSOC_DMA_WORD_LEN;
 732                sdesc->ylen = xt->numf - 1;
 733                if (xt->dir == DMA_MEM_TO_DEV) {
 734                        sdesc->addr = xt->src_start;
 735                        sdesc->dir = 1;
 736                } else {
 737                        sdesc->addr = xt->dst_start;
 738                        sdesc->dir = 0;
 739                }
 740
 741                list_add_tail(&sdesc->node, &schan->prepared);
 742        } else {
 743                pr_err("sirfsoc DMA Invalid xfer\n");
 744                ret = -EINVAL;
 745                goto err_xfer;
 746        }
 747        spin_unlock_irqrestore(&schan->lock, iflags);
 748
 749        return &sdesc->desc;
 750err_xfer:
 751        spin_unlock_irqrestore(&schan->lock, iflags);
 752no_desc:
 753err_dir:
 754        return ERR_PTR(ret);
 755}
 756
 757static struct dma_async_tx_descriptor *
 758sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
 759        size_t buf_len, size_t period_len,
 760        enum dma_transfer_direction direction, unsigned long flags)
 761{
 762        struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
 763        struct sirfsoc_dma_desc *sdesc = NULL;
 764        unsigned long iflags;
 765
 766        /*
 767         * we only support cycle transfer with 2 period
 768         * If the X-length is set to 0, it would be the loop mode.
 769         * The DMA address keeps increasing until reaching the end of a loop
 770         * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then
 771         * the DMA address goes back to the beginning of this area.
 772         * In loop mode, the DMA data region is divided into two parts, BUFA
 773         * and BUFB. DMA controller generates interrupts twice in each loop:
 774         * when the DMA address reaches the end of BUFA or the end of the
 775         * BUFB
 776         */
 777        if (buf_len !=  2 * period_len)
 778                return ERR_PTR(-EINVAL);
 779
 780        /* Get free descriptor */
 781        spin_lock_irqsave(&schan->lock, iflags);
 782        if (!list_empty(&schan->free)) {
 783                sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
 784                        node);
 785                list_del(&sdesc->node);
 786        }
 787        spin_unlock_irqrestore(&schan->lock, iflags);
 788
 789        if (!sdesc)
 790                return NULL;
 791
 792        /* Place descriptor in prepared list */
 793        spin_lock_irqsave(&schan->lock, iflags);
 794        sdesc->addr = addr;
 795        sdesc->cyclic = 1;
 796        sdesc->xlen = 0;
 797        sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1;
 798        sdesc->width = 1;
 799        list_add_tail(&sdesc->node, &schan->prepared);
 800        spin_unlock_irqrestore(&schan->lock, iflags);
 801
 802        return &sdesc->desc;
 803}
 804
 805/*
 806 * The DMA controller consists of 16 independent DMA channels.
 807 * Each channel is allocated to a different function
 808 */
 809bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
 810{
 811        unsigned int ch_nr = (unsigned int) chan_id;
 812
 813        if (ch_nr == chan->chan_id +
 814                chan->device->dev_id * SIRFSOC_DMA_CHANNELS)
 815                return true;
 816
 817        return false;
 818}
 819EXPORT_SYMBOL(sirfsoc_dma_filter_id);
 820
 821#define SIRFSOC_DMA_BUSWIDTHS \
 822        (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
 823        BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
 824        BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
 825        BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
 826        BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
 827
 828static struct dma_chan *of_dma_sirfsoc_xlate(struct of_phandle_args *dma_spec,
 829        struct of_dma *ofdma)
 830{
 831        struct sirfsoc_dma *sdma = ofdma->of_dma_data;
 832        unsigned int request = dma_spec->args[0];
 833
 834        if (request >= SIRFSOC_DMA_CHANNELS)
 835                return NULL;
 836
 837        return dma_get_slave_channel(&sdma->channels[request].chan);
 838}
 839
 840static int sirfsoc_dma_probe(struct platform_device *op)
 841{
 842        struct device_node *dn = op->dev.of_node;
 843        struct device *dev = &op->dev;
 844        struct dma_device *dma;
 845        struct sirfsoc_dma *sdma;
 846        struct sirfsoc_dma_chan *schan;
 847        struct sirfsoc_dmadata *data;
 848        struct resource res;
 849        ulong regs_start, regs_size;
 850        u32 id;
 851        int ret, i;
 852
 853        sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL);
 854        if (!sdma)
 855                return -ENOMEM;
 856
 857        data = (struct sirfsoc_dmadata *)
 858                (of_match_device(op->dev.driver->of_match_table,
 859                                 &op->dev)->data);
 860        sdma->exec_desc = data->exec;
 861        sdma->type = data->type;
 862
 863        if (of_property_read_u32(dn, "cell-index", &id)) {
 864                dev_err(dev, "Fail to get DMAC index\n");
 865                return -ENODEV;
 866        }
 867
 868        sdma->irq = irq_of_parse_and_map(dn, 0);
 869        if (!sdma->irq) {
 870                dev_err(dev, "Error mapping IRQ!\n");
 871                return -EINVAL;
 872        }
 873
 874        sdma->clk = devm_clk_get(dev, NULL);
 875        if (IS_ERR(sdma->clk)) {
 876                dev_err(dev, "failed to get a clock.\n");
 877                return PTR_ERR(sdma->clk);
 878        }
 879
 880        ret = of_address_to_resource(dn, 0, &res);
 881        if (ret) {
 882                dev_err(dev, "Error parsing memory region!\n");
 883                goto irq_dispose;
 884        }
 885
 886        regs_start = res.start;
 887        regs_size = resource_size(&res);
 888
 889        sdma->base = devm_ioremap(dev, regs_start, regs_size);
 890        if (!sdma->base) {
 891                dev_err(dev, "Error mapping memory region!\n");
 892                ret = -ENOMEM;
 893                goto irq_dispose;
 894        }
 895
 896        ret = request_irq(sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, sdma);
 897        if (ret) {
 898                dev_err(dev, "Error requesting IRQ!\n");
 899                ret = -EINVAL;
 900                goto irq_dispose;
 901        }
 902
 903        dma = &sdma->dma;
 904        dma->dev = dev;
 905
 906        dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
 907        dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
 908        dma->device_issue_pending = sirfsoc_dma_issue_pending;
 909        dma->device_config = sirfsoc_dma_slave_config;
 910        dma->device_pause = sirfsoc_dma_pause_chan;
 911        dma->device_resume = sirfsoc_dma_resume_chan;
 912        dma->device_terminate_all = sirfsoc_dma_terminate_all;
 913        dma->device_tx_status = sirfsoc_dma_tx_status;
 914        dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
 915        dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
 916        dma->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
 917        dma->dst_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
 918        dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
 919
 920        INIT_LIST_HEAD(&dma->channels);
 921        dma_cap_set(DMA_SLAVE, dma->cap_mask);
 922        dma_cap_set(DMA_CYCLIC, dma->cap_mask);
 923        dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
 924        dma_cap_set(DMA_PRIVATE, dma->cap_mask);
 925
 926        for (i = 0; i < SIRFSOC_DMA_CHANNELS; i++) {
 927                schan = &sdma->channels[i];
 928
 929                schan->chan.device = dma;
 930                dma_cookie_init(&schan->chan);
 931
 932                INIT_LIST_HEAD(&schan->free);
 933                INIT_LIST_HEAD(&schan->prepared);
 934                INIT_LIST_HEAD(&schan->queued);
 935                INIT_LIST_HEAD(&schan->active);
 936                INIT_LIST_HEAD(&schan->completed);
 937
 938                spin_lock_init(&schan->lock);
 939                list_add_tail(&schan->chan.device_node, &dma->channels);
 940        }
 941
 942        tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
 943
 944        /* Register DMA engine */
 945        dev_set_drvdata(dev, sdma);
 946
 947        ret = dma_async_device_register(dma);
 948        if (ret)
 949                goto free_irq;
 950
 951        /* Device-tree DMA controller registration */
 952        ret = of_dma_controller_register(dn, of_dma_sirfsoc_xlate, sdma);
 953        if (ret) {
 954                dev_err(dev, "failed to register DMA controller\n");
 955                goto unreg_dma_dev;
 956        }
 957
 958        pm_runtime_enable(&op->dev);
 959        dev_info(dev, "initialized SIRFSOC DMAC driver\n");
 960
 961        return 0;
 962
 963unreg_dma_dev:
 964        dma_async_device_unregister(dma);
 965free_irq:
 966        free_irq(sdma->irq, sdma);
 967irq_dispose:
 968        irq_dispose_mapping(sdma->irq);
 969        return ret;
 970}
 971
 972static int sirfsoc_dma_remove(struct platform_device *op)
 973{
 974        struct device *dev = &op->dev;
 975        struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
 976
 977        of_dma_controller_free(op->dev.of_node);
 978        dma_async_device_unregister(&sdma->dma);
 979        free_irq(sdma->irq, sdma);
 980        tasklet_kill(&sdma->tasklet);
 981        irq_dispose_mapping(sdma->irq);
 982        pm_runtime_disable(&op->dev);
 983        if (!pm_runtime_status_suspended(&op->dev))
 984                sirfsoc_dma_runtime_suspend(&op->dev);
 985
 986        return 0;
 987}
 988
 989static int __maybe_unused sirfsoc_dma_runtime_suspend(struct device *dev)
 990{
 991        struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
 992
 993        clk_disable_unprepare(sdma->clk);
 994        return 0;
 995}
 996
 997static int __maybe_unused sirfsoc_dma_runtime_resume(struct device *dev)
 998{
 999        struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
1000        int ret;
1001
1002        ret = clk_prepare_enable(sdma->clk);
1003        if (ret < 0) {
1004                dev_err(dev, "clk_enable failed: %d\n", ret);
1005                return ret;
1006        }
1007        return 0;
1008}
1009
1010static int __maybe_unused sirfsoc_dma_pm_suspend(struct device *dev)
1011{
1012        struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
1013        struct sirfsoc_dma_regs *save = &sdma->regs_save;
1014        struct sirfsoc_dma_chan *schan;
1015        int ch;
1016        int ret;
1017        int count;
1018        u32 int_offset;
1019
1020        /*
1021         * if we were runtime-suspended before, resume to enable clock
1022         * before accessing register
1023         */
1024        if (pm_runtime_status_suspended(dev)) {
1025                ret = sirfsoc_dma_runtime_resume(dev);
1026                if (ret < 0)
1027                        return ret;
1028        }
1029
1030        if (sdma->type == SIRFSOC_DMA_VER_A7V2) {
1031                count = 1;
1032                int_offset = SIRFSOC_DMA_INT_EN_ATLAS7;
1033        } else {
1034                count = SIRFSOC_DMA_CHANNELS;
1035                int_offset = SIRFSOC_DMA_INT_EN;
1036        }
1037
1038        /*
1039         * DMA controller will lose all registers while suspending
1040         * so we need to save registers for active channels
1041         */
1042        for (ch = 0; ch < count; ch++) {
1043                schan = &sdma->channels[ch];
1044                if (list_empty(&schan->active))
1045                        continue;
1046                save->ctrl[ch] = readl_relaxed(sdma->base +
1047                        ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
1048        }
1049        save->interrupt_en = readl_relaxed(sdma->base + int_offset);
1050
1051        /* Disable clock */
1052        sirfsoc_dma_runtime_suspend(dev);
1053
1054        return 0;
1055}
1056
1057static int __maybe_unused sirfsoc_dma_pm_resume(struct device *dev)
1058{
1059        struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
1060        struct sirfsoc_dma_regs *save = &sdma->regs_save;
1061        struct sirfsoc_dma_desc *sdesc;
1062        struct sirfsoc_dma_chan *schan;
1063        int ch;
1064        int ret;
1065        int count;
1066        u32 int_offset;
1067        u32 width_offset;
1068
1069        /* Enable clock before accessing register */
1070        ret = sirfsoc_dma_runtime_resume(dev);
1071        if (ret < 0)
1072                return ret;
1073
1074        if (sdma->type == SIRFSOC_DMA_VER_A7V2) {
1075                count = 1;
1076                int_offset = SIRFSOC_DMA_INT_EN_ATLAS7;
1077                width_offset = SIRFSOC_DMA_WIDTH_ATLAS7;
1078        } else {
1079                count = SIRFSOC_DMA_CHANNELS;
1080                int_offset = SIRFSOC_DMA_INT_EN;
1081                width_offset = SIRFSOC_DMA_WIDTH_0;
1082        }
1083
1084        writel_relaxed(save->interrupt_en, sdma->base + int_offset);
1085        for (ch = 0; ch < count; ch++) {
1086                schan = &sdma->channels[ch];
1087                if (list_empty(&schan->active))
1088                        continue;
1089                sdesc = list_first_entry(&schan->active,
1090                        struct sirfsoc_dma_desc,
1091                        node);
1092                writel_relaxed(sdesc->width,
1093                        sdma->base + width_offset + ch * 4);
1094                writel_relaxed(sdesc->xlen,
1095                        sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_XLEN);
1096                writel_relaxed(sdesc->ylen,
1097                        sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_YLEN);
1098                writel_relaxed(save->ctrl[ch],
1099                        sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
1100                if (sdma->type == SIRFSOC_DMA_VER_A7V2) {
1101                        writel_relaxed(sdesc->addr,
1102                                sdma->base + SIRFSOC_DMA_CH_ADDR);
1103                } else {
1104                        writel_relaxed(sdesc->addr >> 2,
1105                                sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_ADDR);
1106
1107                }
1108        }
1109
1110        /* if we were runtime-suspended before, suspend again */
1111        if (pm_runtime_status_suspended(dev))
1112                sirfsoc_dma_runtime_suspend(dev);
1113
1114        return 0;
1115}
1116
1117static const struct dev_pm_ops sirfsoc_dma_pm_ops = {
1118        SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend, sirfsoc_dma_runtime_resume, NULL)
1119        SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend, sirfsoc_dma_pm_resume)
1120};
1121
1122static struct sirfsoc_dmadata sirfsoc_dmadata_a6 = {
1123        .exec = sirfsoc_dma_execute_hw_a6,
1124        .type = SIRFSOC_DMA_VER_A6,
1125};
1126
1127static struct sirfsoc_dmadata sirfsoc_dmadata_a7v1 = {
1128        .exec = sirfsoc_dma_execute_hw_a7v1,
1129        .type = SIRFSOC_DMA_VER_A7V1,
1130};
1131
1132static struct sirfsoc_dmadata sirfsoc_dmadata_a7v2 = {
1133        .exec = sirfsoc_dma_execute_hw_a7v2,
1134        .type = SIRFSOC_DMA_VER_A7V2,
1135};
1136
1137static const struct of_device_id sirfsoc_dma_match[] = {
1138        { .compatible = "sirf,prima2-dmac", .data = &sirfsoc_dmadata_a6,},
1139        { .compatible = "sirf,atlas7-dmac", .data = &sirfsoc_dmadata_a7v1,},
1140        { .compatible = "sirf,atlas7-dmac-v2", .data = &sirfsoc_dmadata_a7v2,},
1141        {},
1142};
1143MODULE_DEVICE_TABLE(of, sirfsoc_dma_match);
1144
1145static struct platform_driver sirfsoc_dma_driver = {
1146        .probe          = sirfsoc_dma_probe,
1147        .remove         = sirfsoc_dma_remove,
1148        .driver = {
1149                .name = DRV_NAME,
1150                .pm = &sirfsoc_dma_pm_ops,
1151                .of_match_table = sirfsoc_dma_match,
1152        },
1153};
1154
1155static __init int sirfsoc_dma_init(void)
1156{
1157        return platform_driver_register(&sirfsoc_dma_driver);
1158}
1159
1160static void __exit sirfsoc_dma_exit(void)
1161{
1162        platform_driver_unregister(&sirfsoc_dma_driver);
1163}
1164
1165subsys_initcall(sirfsoc_dma_init);
1166module_exit(sirfsoc_dma_exit);
1167
1168MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>");
1169MODULE_AUTHOR("Barry Song <baohua.song@csr.com>");
1170MODULE_DESCRIPTION("SIRFSOC DMA control driver");
1171MODULE_LICENSE("GPL v2");
1172