linux/drivers/dma/pxa_dma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright 2015 Robert Jarzmik <robert.jarzmik@free.fr>
   4 */
   5
   6#include <linux/err.h>
   7#include <linux/module.h>
   8#include <linux/init.h>
   9#include <linux/types.h>
  10#include <linux/interrupt.h>
  11#include <linux/dma-mapping.h>
  12#include <linux/slab.h>
  13#include <linux/dmaengine.h>
  14#include <linux/platform_device.h>
  15#include <linux/device.h>
  16#include <linux/platform_data/mmp_dma.h>
  17#include <linux/dmapool.h>
  18#include <linux/of_device.h>
  19#include <linux/of_dma.h>
  20#include <linux/of.h>
  21#include <linux/wait.h>
  22#include <linux/dma/pxa-dma.h>
  23
  24#include "dmaengine.h"
  25#include "virt-dma.h"
  26
  27#define DCSR(n)         (0x0000 + ((n) << 2))
  28#define DALGN(n)        0x00a0
  29#define DINT            0x00f0
  30#define DDADR(n)        (0x0200 + ((n) << 4))
  31#define DSADR(n)        (0x0204 + ((n) << 4))
  32#define DTADR(n)        (0x0208 + ((n) << 4))
  33#define DCMD(n)         (0x020c + ((n) << 4))
  34
  35#define PXA_DCSR_RUN            BIT(31) /* Run Bit (read / write) */
  36#define PXA_DCSR_NODESC         BIT(30) /* No-Descriptor Fetch (read / write) */
  37#define PXA_DCSR_STOPIRQEN      BIT(29) /* Stop Interrupt Enable (R/W) */
  38#define PXA_DCSR_REQPEND        BIT(8)  /* Request Pending (read-only) */
  39#define PXA_DCSR_STOPSTATE      BIT(3)  /* Stop State (read-only) */
  40#define PXA_DCSR_ENDINTR        BIT(2)  /* End Interrupt (read / write) */
  41#define PXA_DCSR_STARTINTR      BIT(1)  /* Start Interrupt (read / write) */
  42#define PXA_DCSR_BUSERR         BIT(0)  /* Bus Error Interrupt (read / write) */
  43
  44#define PXA_DCSR_EORIRQEN       BIT(28) /* End of Receive IRQ Enable (R/W) */
  45#define PXA_DCSR_EORJMPEN       BIT(27) /* Jump to next descriptor on EOR */
  46#define PXA_DCSR_EORSTOPEN      BIT(26) /* STOP on an EOR */
  47#define PXA_DCSR_SETCMPST       BIT(25) /* Set Descriptor Compare Status */
  48#define PXA_DCSR_CLRCMPST       BIT(24) /* Clear Descriptor Compare Status */
  49#define PXA_DCSR_CMPST          BIT(10) /* The Descriptor Compare Status */
  50#define PXA_DCSR_EORINTR        BIT(9)  /* The end of Receive */
  51
  52#define DRCMR_MAPVLD    BIT(7)  /* Map Valid (read / write) */
  53#define DRCMR_CHLNUM    0x1f    /* mask for Channel Number (read / write) */
  54
  55#define DDADR_DESCADDR  0xfffffff0      /* Address of next descriptor (mask) */
  56#define DDADR_STOP      BIT(0)  /* Stop (read / write) */
  57
  58#define PXA_DCMD_INCSRCADDR     BIT(31) /* Source Address Increment Setting. */
  59#define PXA_DCMD_INCTRGADDR     BIT(30) /* Target Address Increment Setting. */
  60#define PXA_DCMD_FLOWSRC        BIT(29) /* Flow Control by the source. */
  61#define PXA_DCMD_FLOWTRG        BIT(28) /* Flow Control by the target. */
  62#define PXA_DCMD_STARTIRQEN     BIT(22) /* Start Interrupt Enable */
  63#define PXA_DCMD_ENDIRQEN       BIT(21) /* End Interrupt Enable */
  64#define PXA_DCMD_ENDIAN         BIT(18) /* Device Endian-ness. */
  65#define PXA_DCMD_BURST8         (1 << 16)       /* 8 byte burst */
  66#define PXA_DCMD_BURST16        (2 << 16)       /* 16 byte burst */
  67#define PXA_DCMD_BURST32        (3 << 16)       /* 32 byte burst */
  68#define PXA_DCMD_WIDTH1         (1 << 14)       /* 1 byte width */
  69#define PXA_DCMD_WIDTH2         (2 << 14)       /* 2 byte width (HalfWord) */
  70#define PXA_DCMD_WIDTH4         (3 << 14)       /* 4 byte width (Word) */
  71#define PXA_DCMD_LENGTH         0x01fff         /* length mask (max = 8K - 1) */
  72
  73#define PDMA_ALIGNMENT          3
  74#define PDMA_MAX_DESC_BYTES     (PXA_DCMD_LENGTH & ~((1 << PDMA_ALIGNMENT) - 1))
  75
  76struct pxad_desc_hw {
  77        u32 ddadr;      /* Points to the next descriptor + flags */
  78        u32 dsadr;      /* DSADR value for the current transfer */
  79        u32 dtadr;      /* DTADR value for the current transfer */
  80        u32 dcmd;       /* DCMD value for the current transfer */
  81} __aligned(16);
  82
  83struct pxad_desc_sw {
  84        struct virt_dma_desc    vd;             /* Virtual descriptor */
  85        int                     nb_desc;        /* Number of hw. descriptors */
  86        size_t                  len;            /* Number of bytes xfered */
  87        dma_addr_t              first;          /* First descriptor's addr */
  88
  89        /* At least one descriptor has an src/dst address not multiple of 8 */
  90        bool                    misaligned;
  91        bool                    cyclic;
  92        struct dma_pool         *desc_pool;     /* Channel's used allocator */
  93
  94        struct pxad_desc_hw     *hw_desc[];     /* DMA coherent descriptors */
  95};
  96
  97struct pxad_phy {
  98        int                     idx;
  99        void __iomem            *base;
 100        struct pxad_chan        *vchan;
 101};
 102
 103struct pxad_chan {
 104        struct virt_dma_chan    vc;             /* Virtual channel */
 105        u32                     drcmr;          /* Requestor of the channel */
 106        enum pxad_chan_prio     prio;           /* Required priority of phy */
 107        /*
 108         * At least one desc_sw in submitted or issued transfers on this channel
 109         * has one address such as: addr % 8 != 0. This implies the DALGN
 110         * setting on the phy.
 111         */
 112        bool                    misaligned;
 113        struct dma_slave_config cfg;            /* Runtime config */
 114
 115        /* protected by vc->lock */
 116        struct pxad_phy         *phy;
 117        struct dma_pool         *desc_pool;     /* Descriptors pool */
 118        dma_cookie_t            bus_error;
 119
 120        wait_queue_head_t       wq_state;
 121};
 122
 123struct pxad_device {
 124        struct dma_device               slave;
 125        int                             nr_chans;
 126        int                             nr_requestors;
 127        void __iomem                    *base;
 128        struct pxad_phy                 *phys;
 129        spinlock_t                      phy_lock;       /* Phy association */
 130#ifdef CONFIG_DEBUG_FS
 131        struct dentry                   *dbgfs_root;
 132        struct dentry                   *dbgfs_state;
 133        struct dentry                   **dbgfs_chan;
 134#endif
 135};
 136
 137#define tx_to_pxad_desc(tx)                                     \
 138        container_of(tx, struct pxad_desc_sw, async_tx)
 139#define to_pxad_chan(dchan)                                     \
 140        container_of(dchan, struct pxad_chan, vc.chan)
 141#define to_pxad_dev(dmadev)                                     \
 142        container_of(dmadev, struct pxad_device, slave)
 143#define to_pxad_sw_desc(_vd)                            \
 144        container_of((_vd), struct pxad_desc_sw, vd)
 145
 146#define _phy_readl_relaxed(phy, _reg)                                   \
 147        readl_relaxed((phy)->base + _reg((phy)->idx))
 148#define phy_readl_relaxed(phy, _reg)                                    \
 149        ({                                                              \
 150                u32 _v;                                                 \
 151                _v = readl_relaxed((phy)->base + _reg((phy)->idx));     \
 152                dev_vdbg(&phy->vchan->vc.chan.dev->device,              \
 153                         "%s(): readl(%s): 0x%08x\n", __func__, #_reg,  \
 154                          _v);                                          \
 155                _v;                                                     \
 156        })
 157#define phy_writel(phy, val, _reg)                                      \
 158        do {                                                            \
 159                writel((val), (phy)->base + _reg((phy)->idx));          \
 160                dev_vdbg(&phy->vchan->vc.chan.dev->device,              \
 161                         "%s(): writel(0x%08x, %s)\n",                  \
 162                         __func__, (u32)(val), #_reg);                  \
 163        } while (0)
 164#define phy_writel_relaxed(phy, val, _reg)                              \
 165        do {                                                            \
 166                writel_relaxed((val), (phy)->base + _reg((phy)->idx));  \
 167                dev_vdbg(&phy->vchan->vc.chan.dev->device,              \
 168                         "%s(): writel_relaxed(0x%08x, %s)\n",          \
 169                         __func__, (u32)(val), #_reg);                  \
 170        } while (0)
 171
 172static unsigned int pxad_drcmr(unsigned int line)
 173{
 174        if (line < 64)
 175                return 0x100 + line * 4;
 176        return 0x1000 + line * 4;
 177}
 178
 179/*
 180 * Debug fs
 181 */
 182#ifdef CONFIG_DEBUG_FS
 183#include <linux/debugfs.h>
 184#include <linux/uaccess.h>
 185#include <linux/seq_file.h>
 186
 187static int dbg_show_requester_chan(struct seq_file *s, void *p)
 188{
 189        struct pxad_phy *phy = s->private;
 190        int i;
 191        u32 drcmr;
 192
 193        seq_printf(s, "DMA channel %d requester :\n", phy->idx);
 194        for (i = 0; i < 70; i++) {
 195                drcmr = readl_relaxed(phy->base + pxad_drcmr(i));
 196                if ((drcmr & DRCMR_CHLNUM) == phy->idx)
 197                        seq_printf(s, "\tRequester %d (MAPVLD=%d)\n", i,
 198                                   !!(drcmr & DRCMR_MAPVLD));
 199        }
 200        return 0;
 201}
 202
 203static inline int dbg_burst_from_dcmd(u32 dcmd)
 204{
 205        int burst = (dcmd >> 16) & 0x3;
 206
 207        return burst ? 4 << burst : 0;
 208}
 209
 210static int is_phys_valid(unsigned long addr)
 211{
 212        return pfn_valid(__phys_to_pfn(addr));
 213}
 214
 215#define PXA_DCSR_STR(flag) (dcsr & PXA_DCSR_##flag ? #flag" " : "")
 216#define PXA_DCMD_STR(flag) (dcmd & PXA_DCMD_##flag ? #flag" " : "")
 217
 218static int dbg_show_descriptors(struct seq_file *s, void *p)
 219{
 220        struct pxad_phy *phy = s->private;
 221        int i, max_show = 20, burst, width;
 222        u32 dcmd;
 223        unsigned long phys_desc, ddadr;
 224        struct pxad_desc_hw *desc;
 225
 226        phys_desc = ddadr = _phy_readl_relaxed(phy, DDADR);
 227
 228        seq_printf(s, "DMA channel %d descriptors :\n", phy->idx);
 229        seq_printf(s, "[%03d] First descriptor unknown\n", 0);
 230        for (i = 1; i < max_show && is_phys_valid(phys_desc); i++) {
 231                desc = phys_to_virt(phys_desc);
 232                dcmd = desc->dcmd;
 233                burst = dbg_burst_from_dcmd(dcmd);
 234                width = (1 << ((dcmd >> 14) & 0x3)) >> 1;
 235
 236                seq_printf(s, "[%03d] Desc at %08lx(virt %p)\n",
 237                           i, phys_desc, desc);
 238                seq_printf(s, "\tDDADR = %08x\n", desc->ddadr);
 239                seq_printf(s, "\tDSADR = %08x\n", desc->dsadr);
 240                seq_printf(s, "\tDTADR = %08x\n", desc->dtadr);
 241                seq_printf(s, "\tDCMD  = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n",
 242                           dcmd,
 243                           PXA_DCMD_STR(INCSRCADDR), PXA_DCMD_STR(INCTRGADDR),
 244                           PXA_DCMD_STR(FLOWSRC), PXA_DCMD_STR(FLOWTRG),
 245                           PXA_DCMD_STR(STARTIRQEN), PXA_DCMD_STR(ENDIRQEN),
 246                           PXA_DCMD_STR(ENDIAN), burst, width,
 247                           dcmd & PXA_DCMD_LENGTH);
 248                phys_desc = desc->ddadr;
 249        }
 250        if (i == max_show)
 251                seq_printf(s, "[%03d] Desc at %08lx ... max display reached\n",
 252                           i, phys_desc);
 253        else
 254                seq_printf(s, "[%03d] Desc at %08lx is %s\n",
 255                           i, phys_desc, phys_desc == DDADR_STOP ?
 256                           "DDADR_STOP" : "invalid");
 257
 258        return 0;
 259}
 260
 261static int dbg_show_chan_state(struct seq_file *s, void *p)
 262{
 263        struct pxad_phy *phy = s->private;
 264        u32 dcsr, dcmd;
 265        int burst, width;
 266        static const char * const str_prio[] = {
 267                "high", "normal", "low", "invalid"
 268        };
 269
 270        dcsr = _phy_readl_relaxed(phy, DCSR);
 271        dcmd = _phy_readl_relaxed(phy, DCMD);
 272        burst = dbg_burst_from_dcmd(dcmd);
 273        width = (1 << ((dcmd >> 14) & 0x3)) >> 1;
 274
 275        seq_printf(s, "DMA channel %d\n", phy->idx);
 276        seq_printf(s, "\tPriority : %s\n",
 277                          str_prio[(phy->idx & 0xf) / 4]);
 278        seq_printf(s, "\tUnaligned transfer bit: %s\n",
 279                          _phy_readl_relaxed(phy, DALGN) & BIT(phy->idx) ?
 280                          "yes" : "no");
 281        seq_printf(s, "\tDCSR  = %08x (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
 282                   dcsr, PXA_DCSR_STR(RUN), PXA_DCSR_STR(NODESC),
 283                   PXA_DCSR_STR(STOPIRQEN), PXA_DCSR_STR(EORIRQEN),
 284                   PXA_DCSR_STR(EORJMPEN), PXA_DCSR_STR(EORSTOPEN),
 285                   PXA_DCSR_STR(SETCMPST), PXA_DCSR_STR(CLRCMPST),
 286                   PXA_DCSR_STR(CMPST), PXA_DCSR_STR(EORINTR),
 287                   PXA_DCSR_STR(REQPEND), PXA_DCSR_STR(STOPSTATE),
 288                   PXA_DCSR_STR(ENDINTR), PXA_DCSR_STR(STARTINTR),
 289                   PXA_DCSR_STR(BUSERR));
 290
 291        seq_printf(s, "\tDCMD  = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n",
 292                   dcmd,
 293                   PXA_DCMD_STR(INCSRCADDR), PXA_DCMD_STR(INCTRGADDR),
 294                   PXA_DCMD_STR(FLOWSRC), PXA_DCMD_STR(FLOWTRG),
 295                   PXA_DCMD_STR(STARTIRQEN), PXA_DCMD_STR(ENDIRQEN),
 296                   PXA_DCMD_STR(ENDIAN), burst, width, dcmd & PXA_DCMD_LENGTH);
 297        seq_printf(s, "\tDSADR = %08x\n", _phy_readl_relaxed(phy, DSADR));
 298        seq_printf(s, "\tDTADR = %08x\n", _phy_readl_relaxed(phy, DTADR));
 299        seq_printf(s, "\tDDADR = %08x\n", _phy_readl_relaxed(phy, DDADR));
 300
 301        return 0;
 302}
 303
 304static int dbg_show_state(struct seq_file *s, void *p)
 305{
 306        struct pxad_device *pdev = s->private;
 307
 308        /* basic device status */
 309        seq_puts(s, "DMA engine status\n");
 310        seq_printf(s, "\tChannel number: %d\n", pdev->nr_chans);
 311
 312        return 0;
 313}
 314
 315#define DBGFS_FUNC_DECL(name) \
 316static int dbg_open_##name(struct inode *inode, struct file *file) \
 317{ \
 318        return single_open(file, dbg_show_##name, inode->i_private); \
 319} \
 320static const struct file_operations dbg_fops_##name = { \
 321        .open           = dbg_open_##name, \
 322        .llseek         = seq_lseek, \
 323        .read           = seq_read, \
 324        .release        = single_release, \
 325}
 326
 327DBGFS_FUNC_DECL(state);
 328DBGFS_FUNC_DECL(chan_state);
 329DBGFS_FUNC_DECL(descriptors);
 330DBGFS_FUNC_DECL(requester_chan);
 331
 332static struct dentry *pxad_dbg_alloc_chan(struct pxad_device *pdev,
 333                                             int ch, struct dentry *chandir)
 334{
 335        char chan_name[11];
 336        struct dentry *chan, *chan_state = NULL, *chan_descr = NULL;
 337        struct dentry *chan_reqs = NULL;
 338        void *dt;
 339
 340        scnprintf(chan_name, sizeof(chan_name), "%d", ch);
 341        chan = debugfs_create_dir(chan_name, chandir);
 342        dt = (void *)&pdev->phys[ch];
 343
 344        if (chan)
 345                chan_state = debugfs_create_file("state", 0400, chan, dt,
 346                                                 &dbg_fops_chan_state);
 347        if (chan_state)
 348                chan_descr = debugfs_create_file("descriptors", 0400, chan, dt,
 349                                                 &dbg_fops_descriptors);
 350        if (chan_descr)
 351                chan_reqs = debugfs_create_file("requesters", 0400, chan, dt,
 352                                                &dbg_fops_requester_chan);
 353        if (!chan_reqs)
 354                goto err_state;
 355
 356        return chan;
 357
 358err_state:
 359        debugfs_remove_recursive(chan);
 360        return NULL;
 361}
 362
 363static void pxad_init_debugfs(struct pxad_device *pdev)
 364{
 365        int i;
 366        struct dentry *chandir;
 367
 368        pdev->dbgfs_root = debugfs_create_dir(dev_name(pdev->slave.dev), NULL);
 369        if (IS_ERR(pdev->dbgfs_root) || !pdev->dbgfs_root)
 370                goto err_root;
 371
 372        pdev->dbgfs_state = debugfs_create_file("state", 0400, pdev->dbgfs_root,
 373                                                pdev, &dbg_fops_state);
 374        if (!pdev->dbgfs_state)
 375                goto err_state;
 376
 377        pdev->dbgfs_chan =
 378                kmalloc_array(pdev->nr_chans, sizeof(*pdev->dbgfs_state),
 379                              GFP_KERNEL);
 380        if (!pdev->dbgfs_chan)
 381                goto err_alloc;
 382
 383        chandir = debugfs_create_dir("channels", pdev->dbgfs_root);
 384        if (!chandir)
 385                goto err_chandir;
 386
 387        for (i = 0; i < pdev->nr_chans; i++) {
 388                pdev->dbgfs_chan[i] = pxad_dbg_alloc_chan(pdev, i, chandir);
 389                if (!pdev->dbgfs_chan[i])
 390                        goto err_chans;
 391        }
 392
 393        return;
 394err_chans:
 395err_chandir:
 396        kfree(pdev->dbgfs_chan);
 397err_alloc:
 398err_state:
 399        debugfs_remove_recursive(pdev->dbgfs_root);
 400err_root:
 401        pr_err("pxad: debugfs is not available\n");
 402}
 403
 404static void pxad_cleanup_debugfs(struct pxad_device *pdev)
 405{
 406        debugfs_remove_recursive(pdev->dbgfs_root);
 407}
 408#else
 409static inline void pxad_init_debugfs(struct pxad_device *pdev) {}
 410static inline void pxad_cleanup_debugfs(struct pxad_device *pdev) {}
 411#endif
 412
 413static struct pxad_phy *lookup_phy(struct pxad_chan *pchan)
 414{
 415        int prio, i;
 416        struct pxad_device *pdev = to_pxad_dev(pchan->vc.chan.device);
 417        struct pxad_phy *phy, *found = NULL;
 418        unsigned long flags;
 419
 420        /*
 421         * dma channel priorities
 422         * ch 0 - 3,  16 - 19  <--> (0)
 423         * ch 4 - 7,  20 - 23  <--> (1)
 424         * ch 8 - 11, 24 - 27  <--> (2)
 425         * ch 12 - 15, 28 - 31  <--> (3)
 426         */
 427
 428        spin_lock_irqsave(&pdev->phy_lock, flags);
 429        for (prio = pchan->prio; prio >= PXAD_PRIO_HIGHEST; prio--) {
 430                for (i = 0; i < pdev->nr_chans; i++) {
 431                        if (prio != (i & 0xf) >> 2)
 432                                continue;
 433                        phy = &pdev->phys[i];
 434                        if (!phy->vchan) {
 435                                phy->vchan = pchan;
 436                                found = phy;
 437                                goto out_unlock;
 438                        }
 439                }
 440        }
 441
 442out_unlock:
 443        spin_unlock_irqrestore(&pdev->phy_lock, flags);
 444        dev_dbg(&pchan->vc.chan.dev->device,
 445                "%s(): phy=%p(%d)\n", __func__, found,
 446                found ? found->idx : -1);
 447
 448        return found;
 449}
 450
 451static void pxad_free_phy(struct pxad_chan *chan)
 452{
 453        struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
 454        unsigned long flags;
 455        u32 reg;
 456
 457        dev_dbg(&chan->vc.chan.dev->device,
 458                "%s(): freeing\n", __func__);
 459        if (!chan->phy)
 460                return;
 461
 462        /* clear the channel mapping in DRCMR */
 463        if (chan->drcmr <= pdev->nr_requestors) {
 464                reg = pxad_drcmr(chan->drcmr);
 465                writel_relaxed(0, chan->phy->base + reg);
 466        }
 467
 468        spin_lock_irqsave(&pdev->phy_lock, flags);
 469        chan->phy->vchan = NULL;
 470        chan->phy = NULL;
 471        spin_unlock_irqrestore(&pdev->phy_lock, flags);
 472}
 473
 474static bool is_chan_running(struct pxad_chan *chan)
 475{
 476        u32 dcsr;
 477        struct pxad_phy *phy = chan->phy;
 478
 479        if (!phy)
 480                return false;
 481        dcsr = phy_readl_relaxed(phy, DCSR);
 482        return dcsr & PXA_DCSR_RUN;
 483}
 484
 485static bool is_running_chan_misaligned(struct pxad_chan *chan)
 486{
 487        u32 dalgn;
 488
 489        BUG_ON(!chan->phy);
 490        dalgn = phy_readl_relaxed(chan->phy, DALGN);
 491        return dalgn & (BIT(chan->phy->idx));
 492}
 493
 494static void phy_enable(struct pxad_phy *phy, bool misaligned)
 495{
 496        struct pxad_device *pdev;
 497        u32 reg, dalgn;
 498
 499        if (!phy->vchan)
 500                return;
 501
 502        dev_dbg(&phy->vchan->vc.chan.dev->device,
 503                "%s(); phy=%p(%d) misaligned=%d\n", __func__,
 504                phy, phy->idx, misaligned);
 505
 506        pdev = to_pxad_dev(phy->vchan->vc.chan.device);
 507        if (phy->vchan->drcmr <= pdev->nr_requestors) {
 508                reg = pxad_drcmr(phy->vchan->drcmr);
 509                writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
 510        }
 511
 512        dalgn = phy_readl_relaxed(phy, DALGN);
 513        if (misaligned)
 514                dalgn |= BIT(phy->idx);
 515        else
 516                dalgn &= ~BIT(phy->idx);
 517        phy_writel_relaxed(phy, dalgn, DALGN);
 518
 519        phy_writel(phy, PXA_DCSR_STOPIRQEN | PXA_DCSR_ENDINTR |
 520                   PXA_DCSR_BUSERR | PXA_DCSR_RUN, DCSR);
 521}
 522
 523static void phy_disable(struct pxad_phy *phy)
 524{
 525        u32 dcsr;
 526
 527        if (!phy)
 528                return;
 529
 530        dcsr = phy_readl_relaxed(phy, DCSR);
 531        dev_dbg(&phy->vchan->vc.chan.dev->device,
 532                "%s(): phy=%p(%d)\n", __func__, phy, phy->idx);
 533        phy_writel(phy, dcsr & ~PXA_DCSR_RUN & ~PXA_DCSR_STOPIRQEN, DCSR);
 534}
 535
 536static void pxad_launch_chan(struct pxad_chan *chan,
 537                                 struct pxad_desc_sw *desc)
 538{
 539        dev_dbg(&chan->vc.chan.dev->device,
 540                "%s(): desc=%p\n", __func__, desc);
 541        if (!chan->phy) {
 542                chan->phy = lookup_phy(chan);
 543                if (!chan->phy) {
 544                        dev_dbg(&chan->vc.chan.dev->device,
 545                                "%s(): no free dma channel\n", __func__);
 546                        return;
 547                }
 548        }
 549        chan->bus_error = 0;
 550
 551        /*
 552         * Program the descriptor's address into the DMA controller,
 553         * then start the DMA transaction
 554         */
 555        phy_writel(chan->phy, desc->first, DDADR);
 556        phy_enable(chan->phy, chan->misaligned);
 557        wake_up(&chan->wq_state);
 558}
 559
 560static void set_updater_desc(struct pxad_desc_sw *sw_desc,
 561                             unsigned long flags)
 562{
 563        struct pxad_desc_hw *updater =
 564                sw_desc->hw_desc[sw_desc->nb_desc - 1];
 565        dma_addr_t dma = sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr;
 566
 567        updater->ddadr = DDADR_STOP;
 568        updater->dsadr = dma;
 569        updater->dtadr = dma + 8;
 570        updater->dcmd = PXA_DCMD_WIDTH4 | PXA_DCMD_BURST32 |
 571                (PXA_DCMD_LENGTH & sizeof(u32));
 572        if (flags & DMA_PREP_INTERRUPT)
 573                updater->dcmd |= PXA_DCMD_ENDIRQEN;
 574        if (sw_desc->cyclic)
 575                sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr = sw_desc->first;
 576}
 577
 578static bool is_desc_completed(struct virt_dma_desc *vd)
 579{
 580        struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd);
 581        struct pxad_desc_hw *updater =
 582                sw_desc->hw_desc[sw_desc->nb_desc - 1];
 583
 584        return updater->dtadr != (updater->dsadr + 8);
 585}
 586
 587static void pxad_desc_chain(struct virt_dma_desc *vd1,
 588                                struct virt_dma_desc *vd2)
 589{
 590        struct pxad_desc_sw *desc1 = to_pxad_sw_desc(vd1);
 591        struct pxad_desc_sw *desc2 = to_pxad_sw_desc(vd2);
 592        dma_addr_t dma_to_chain;
 593
 594        dma_to_chain = desc2->first;
 595        desc1->hw_desc[desc1->nb_desc - 1]->ddadr = dma_to_chain;
 596}
 597
 598static bool pxad_try_hotchain(struct virt_dma_chan *vc,
 599                                  struct virt_dma_desc *vd)
 600{
 601        struct virt_dma_desc *vd_last_issued = NULL;
 602        struct pxad_chan *chan = to_pxad_chan(&vc->chan);
 603
 604        /*
 605         * Attempt to hot chain the tx if the phy is still running. This is
 606         * considered successful only if either the channel is still running
 607         * after the chaining, or if the chained transfer is completed after
 608         * having been hot chained.
 609         * A change of alignment is not allowed, and forbids hotchaining.
 610         */
 611        if (is_chan_running(chan)) {
 612                BUG_ON(list_empty(&vc->desc_issued));
 613
 614                if (!is_running_chan_misaligned(chan) &&
 615                    to_pxad_sw_desc(vd)->misaligned)
 616                        return false;
 617
 618                vd_last_issued = list_entry(vc->desc_issued.prev,
 619                                            struct virt_dma_desc, node);
 620                pxad_desc_chain(vd_last_issued, vd);
 621                if (is_chan_running(chan) || is_desc_completed(vd))
 622                        return true;
 623        }
 624
 625        return false;
 626}
 627
 628static unsigned int clear_chan_irq(struct pxad_phy *phy)
 629{
 630        u32 dcsr;
 631        u32 dint = readl(phy->base + DINT);
 632
 633        if (!(dint & BIT(phy->idx)))
 634                return PXA_DCSR_RUN;
 635
 636        /* clear irq */
 637        dcsr = phy_readl_relaxed(phy, DCSR);
 638        phy_writel(phy, dcsr, DCSR);
 639        if ((dcsr & PXA_DCSR_BUSERR) && (phy->vchan))
 640                dev_warn(&phy->vchan->vc.chan.dev->device,
 641                         "%s(chan=%p): PXA_DCSR_BUSERR\n",
 642                         __func__, &phy->vchan);
 643
 644        return dcsr & ~PXA_DCSR_RUN;
 645}
 646
 647static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
 648{
 649        struct pxad_phy *phy = dev_id;
 650        struct pxad_chan *chan = phy->vchan;
 651        struct virt_dma_desc *vd, *tmp;
 652        unsigned int dcsr;
 653        unsigned long flags;
 654        bool vd_completed;
 655        dma_cookie_t last_started = 0;
 656
 657        BUG_ON(!chan);
 658
 659        dcsr = clear_chan_irq(phy);
 660        if (dcsr & PXA_DCSR_RUN)
 661                return IRQ_NONE;
 662
 663        spin_lock_irqsave(&chan->vc.lock, flags);
 664        list_for_each_entry_safe(vd, tmp, &chan->vc.desc_issued, node) {
 665                vd_completed = is_desc_completed(vd);
 666                dev_dbg(&chan->vc.chan.dev->device,
 667                        "%s(): checking txd %p[%x]: completed=%d dcsr=0x%x\n",
 668                        __func__, vd, vd->tx.cookie, vd_completed,
 669                        dcsr);
 670                last_started = vd->tx.cookie;
 671                if (to_pxad_sw_desc(vd)->cyclic) {
 672                        vchan_cyclic_callback(vd);
 673                        break;
 674                }
 675                if (vd_completed) {
 676                        list_del(&vd->node);
 677                        vchan_cookie_complete(vd);
 678                } else {
 679                        break;
 680                }
 681        }
 682
 683        if (dcsr & PXA_DCSR_BUSERR) {
 684                chan->bus_error = last_started;
 685                phy_disable(phy);
 686        }
 687
 688        if (!chan->bus_error && dcsr & PXA_DCSR_STOPSTATE) {
 689                dev_dbg(&chan->vc.chan.dev->device,
 690                "%s(): channel stopped, submitted_empty=%d issued_empty=%d",
 691                        __func__,
 692                        list_empty(&chan->vc.desc_submitted),
 693                        list_empty(&chan->vc.desc_issued));
 694                phy_writel_relaxed(phy, dcsr & ~PXA_DCSR_STOPIRQEN, DCSR);
 695
 696                if (list_empty(&chan->vc.desc_issued)) {
 697                        chan->misaligned =
 698                                !list_empty(&chan->vc.desc_submitted);
 699                } else {
 700                        vd = list_first_entry(&chan->vc.desc_issued,
 701                                              struct virt_dma_desc, node);
 702                        pxad_launch_chan(chan, to_pxad_sw_desc(vd));
 703                }
 704        }
 705        spin_unlock_irqrestore(&chan->vc.lock, flags);
 706        wake_up(&chan->wq_state);
 707
 708        return IRQ_HANDLED;
 709}
 710
 711static irqreturn_t pxad_int_handler(int irq, void *dev_id)
 712{
 713        struct pxad_device *pdev = dev_id;
 714        struct pxad_phy *phy;
 715        u32 dint = readl(pdev->base + DINT);
 716        int i, ret = IRQ_NONE;
 717
 718        while (dint) {
 719                i = __ffs(dint);
 720                dint &= (dint - 1);
 721                phy = &pdev->phys[i];
 722                if (pxad_chan_handler(irq, phy) == IRQ_HANDLED)
 723                        ret = IRQ_HANDLED;
 724        }
 725
 726        return ret;
 727}
 728
 729static int pxad_alloc_chan_resources(struct dma_chan *dchan)
 730{
 731        struct pxad_chan *chan = to_pxad_chan(dchan);
 732        struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
 733
 734        if (chan->desc_pool)
 735                return 1;
 736
 737        chan->desc_pool = dma_pool_create(dma_chan_name(dchan),
 738                                          pdev->slave.dev,
 739                                          sizeof(struct pxad_desc_hw),
 740                                          __alignof__(struct pxad_desc_hw),
 741                                          0);
 742        if (!chan->desc_pool) {
 743                dev_err(&chan->vc.chan.dev->device,
 744                        "%s(): unable to allocate descriptor pool\n",
 745                        __func__);
 746                return -ENOMEM;
 747        }
 748
 749        return 1;
 750}
 751
 752static void pxad_free_chan_resources(struct dma_chan *dchan)
 753{
 754        struct pxad_chan *chan = to_pxad_chan(dchan);
 755
 756        vchan_free_chan_resources(&chan->vc);
 757        dma_pool_destroy(chan->desc_pool);
 758        chan->desc_pool = NULL;
 759
 760}
 761
 762static void pxad_free_desc(struct virt_dma_desc *vd)
 763{
 764        int i;
 765        dma_addr_t dma;
 766        struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd);
 767
 768        BUG_ON(sw_desc->nb_desc == 0);
 769        for (i = sw_desc->nb_desc - 1; i >= 0; i--) {
 770                if (i > 0)
 771                        dma = sw_desc->hw_desc[i - 1]->ddadr;
 772                else
 773                        dma = sw_desc->first;
 774                dma_pool_free(sw_desc->desc_pool,
 775                              sw_desc->hw_desc[i], dma);
 776        }
 777        sw_desc->nb_desc = 0;
 778        kfree(sw_desc);
 779}
 780
 781static struct pxad_desc_sw *
 782pxad_alloc_desc(struct pxad_chan *chan, unsigned int nb_hw_desc)
 783{
 784        struct pxad_desc_sw *sw_desc;
 785        dma_addr_t dma;
 786        int i;
 787
 788        sw_desc = kzalloc(sizeof(*sw_desc) +
 789                          nb_hw_desc * sizeof(struct pxad_desc_hw *),
 790                          GFP_NOWAIT);
 791        if (!sw_desc)
 792                return NULL;
 793        sw_desc->desc_pool = chan->desc_pool;
 794
 795        for (i = 0; i < nb_hw_desc; i++) {
 796                sw_desc->hw_desc[i] = dma_pool_alloc(sw_desc->desc_pool,
 797                                                     GFP_NOWAIT, &dma);
 798                if (!sw_desc->hw_desc[i]) {
 799                        dev_err(&chan->vc.chan.dev->device,
 800                                "%s(): Couldn't allocate the %dth hw_desc from dma_pool %p\n",
 801                                __func__, i, sw_desc->desc_pool);
 802                        goto err;
 803                }
 804
 805                if (i == 0)
 806                        sw_desc->first = dma;
 807                else
 808                        sw_desc->hw_desc[i - 1]->ddadr = dma;
 809                sw_desc->nb_desc++;
 810        }
 811
 812        return sw_desc;
 813err:
 814        pxad_free_desc(&sw_desc->vd);
 815        return NULL;
 816}
 817
 818static dma_cookie_t pxad_tx_submit(struct dma_async_tx_descriptor *tx)
 819{
 820        struct virt_dma_chan *vc = to_virt_chan(tx->chan);
 821        struct pxad_chan *chan = to_pxad_chan(&vc->chan);
 822        struct virt_dma_desc *vd_chained = NULL,
 823                *vd = container_of(tx, struct virt_dma_desc, tx);
 824        dma_cookie_t cookie;
 825        unsigned long flags;
 826
 827        set_updater_desc(to_pxad_sw_desc(vd), tx->flags);
 828
 829        spin_lock_irqsave(&vc->lock, flags);
 830        cookie = dma_cookie_assign(tx);
 831
 832        if (list_empty(&vc->desc_submitted) && pxad_try_hotchain(vc, vd)) {
 833                list_move_tail(&vd->node, &vc->desc_issued);
 834                dev_dbg(&chan->vc.chan.dev->device,
 835                        "%s(): txd %p[%x]: submitted (hot linked)\n",
 836                        __func__, vd, cookie);
 837                goto out;
 838        }
 839
 840        /*
 841         * Fallback to placing the tx in the submitted queue
 842         */
 843        if (!list_empty(&vc->desc_submitted)) {
 844                vd_chained = list_entry(vc->desc_submitted.prev,
 845                                        struct virt_dma_desc, node);
 846                /*
 847                 * Only chain the descriptors if no new misalignment is
 848                 * introduced. If a new misalignment is chained, let the channel
 849                 * stop, and be relaunched in misalign mode from the irq
 850                 * handler.
 851                 */
 852                if (chan->misaligned || !to_pxad_sw_desc(vd)->misaligned)
 853                        pxad_desc_chain(vd_chained, vd);
 854                else
 855                        vd_chained = NULL;
 856        }
 857        dev_dbg(&chan->vc.chan.dev->device,
 858                "%s(): txd %p[%x]: submitted (%s linked)\n",
 859                __func__, vd, cookie, vd_chained ? "cold" : "not");
 860        list_move_tail(&vd->node, &vc->desc_submitted);
 861        chan->misaligned |= to_pxad_sw_desc(vd)->misaligned;
 862
 863out:
 864        spin_unlock_irqrestore(&vc->lock, flags);
 865        return cookie;
 866}
 867
 868static void pxad_issue_pending(struct dma_chan *dchan)
 869{
 870        struct pxad_chan *chan = to_pxad_chan(dchan);
 871        struct virt_dma_desc *vd_first;
 872        unsigned long flags;
 873
 874        spin_lock_irqsave(&chan->vc.lock, flags);
 875        if (list_empty(&chan->vc.desc_submitted))
 876                goto out;
 877
 878        vd_first = list_first_entry(&chan->vc.desc_submitted,
 879                                    struct virt_dma_desc, node);
 880        dev_dbg(&chan->vc.chan.dev->device,
 881                "%s(): txd %p[%x]", __func__, vd_first, vd_first->tx.cookie);
 882
 883        vchan_issue_pending(&chan->vc);
 884        if (!pxad_try_hotchain(&chan->vc, vd_first))
 885                pxad_launch_chan(chan, to_pxad_sw_desc(vd_first));
 886out:
 887        spin_unlock_irqrestore(&chan->vc.lock, flags);
 888}
 889
 890static inline struct dma_async_tx_descriptor *
 891pxad_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd,
 892                 unsigned long tx_flags)
 893{
 894        struct dma_async_tx_descriptor *tx;
 895        struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc);
 896
 897        INIT_LIST_HEAD(&vd->node);
 898        tx = vchan_tx_prep(vc, vd, tx_flags);
 899        tx->tx_submit = pxad_tx_submit;
 900        dev_dbg(&chan->vc.chan.dev->device,
 901                "%s(): vc=%p txd=%p[%x] flags=0x%lx\n", __func__,
 902                vc, vd, vd->tx.cookie,
 903                tx_flags);
 904
 905        return tx;
 906}
 907
 908static void pxad_get_config(struct pxad_chan *chan,
 909                            enum dma_transfer_direction dir,
 910                            u32 *dcmd, u32 *dev_src, u32 *dev_dst)
 911{
 912        u32 maxburst = 0, dev_addr = 0;
 913        enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
 914        struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
 915
 916        *dcmd = 0;
 917        if (dir == DMA_DEV_TO_MEM) {
 918                maxburst = chan->cfg.src_maxburst;
 919                width = chan->cfg.src_addr_width;
 920                dev_addr = chan->cfg.src_addr;
 921                *dev_src = dev_addr;
 922                *dcmd |= PXA_DCMD_INCTRGADDR;
 923                if (chan->drcmr <= pdev->nr_requestors)
 924                        *dcmd |= PXA_DCMD_FLOWSRC;
 925        }
 926        if (dir == DMA_MEM_TO_DEV) {
 927                maxburst = chan->cfg.dst_maxburst;
 928                width = chan->cfg.dst_addr_width;
 929                dev_addr = chan->cfg.dst_addr;
 930                *dev_dst = dev_addr;
 931                *dcmd |= PXA_DCMD_INCSRCADDR;
 932                if (chan->drcmr <= pdev->nr_requestors)
 933                        *dcmd |= PXA_DCMD_FLOWTRG;
 934        }
 935        if (dir == DMA_MEM_TO_MEM)
 936                *dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR |
 937                        PXA_DCMD_INCSRCADDR;
 938
 939        dev_dbg(&chan->vc.chan.dev->device,
 940                "%s(): dev_addr=0x%x maxburst=%d width=%d  dir=%d\n",
 941                __func__, dev_addr, maxburst, width, dir);
 942
 943        if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
 944                *dcmd |= PXA_DCMD_WIDTH1;
 945        else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
 946                *dcmd |= PXA_DCMD_WIDTH2;
 947        else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
 948                *dcmd |= PXA_DCMD_WIDTH4;
 949
 950        if (maxburst == 8)
 951                *dcmd |= PXA_DCMD_BURST8;
 952        else if (maxburst == 16)
 953                *dcmd |= PXA_DCMD_BURST16;
 954        else if (maxburst == 32)
 955                *dcmd |= PXA_DCMD_BURST32;
 956
 957        /* FIXME: drivers should be ported over to use the filter
 958         * function. Once that's done, the following two lines can
 959         * be removed.
 960         */
 961        if (chan->cfg.slave_id)
 962                chan->drcmr = chan->cfg.slave_id;
 963}
 964
 965static struct dma_async_tx_descriptor *
 966pxad_prep_memcpy(struct dma_chan *dchan,
 967                 dma_addr_t dma_dst, dma_addr_t dma_src,
 968                 size_t len, unsigned long flags)
 969{
 970        struct pxad_chan *chan = to_pxad_chan(dchan);
 971        struct pxad_desc_sw *sw_desc;
 972        struct pxad_desc_hw *hw_desc;
 973        u32 dcmd;
 974        unsigned int i, nb_desc = 0;
 975        size_t copy;
 976
 977        if (!dchan || !len)
 978                return NULL;
 979
 980        dev_dbg(&chan->vc.chan.dev->device,
 981                "%s(): dma_dst=0x%lx dma_src=0x%lx len=%zu flags=%lx\n",
 982                __func__, (unsigned long)dma_dst, (unsigned long)dma_src,
 983                len, flags);
 984        pxad_get_config(chan, DMA_MEM_TO_MEM, &dcmd, NULL, NULL);
 985
 986        nb_desc = DIV_ROUND_UP(len, PDMA_MAX_DESC_BYTES);
 987        sw_desc = pxad_alloc_desc(chan, nb_desc + 1);
 988        if (!sw_desc)
 989                return NULL;
 990        sw_desc->len = len;
 991
 992        if (!IS_ALIGNED(dma_src, 1 << PDMA_ALIGNMENT) ||
 993            !IS_ALIGNED(dma_dst, 1 << PDMA_ALIGNMENT))
 994                sw_desc->misaligned = true;
 995
 996        i = 0;
 997        do {
 998                hw_desc = sw_desc->hw_desc[i++];
 999                copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
1000                hw_desc->dcmd = dcmd | (PXA_DCMD_LENGTH & copy);
1001                hw_desc->dsadr = dma_src;
1002                hw_desc->dtadr = dma_dst;
1003                len -= copy;
1004                dma_src += copy;
1005                dma_dst += copy;
1006        } while (len);
1007        set_updater_desc(sw_desc, flags);
1008
1009        return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags);
1010}
1011
1012static struct dma_async_tx_descriptor *
1013pxad_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
1014                   unsigned int sg_len, enum dma_transfer_direction dir,
1015                   unsigned long flags, void *context)
1016{
1017        struct pxad_chan *chan = to_pxad_chan(dchan);
1018        struct pxad_desc_sw *sw_desc;
1019        size_t len, avail;
1020        struct scatterlist *sg;
1021        dma_addr_t dma;
1022        u32 dcmd, dsadr = 0, dtadr = 0;
1023        unsigned int nb_desc = 0, i, j = 0;
1024
1025        if ((sgl == NULL) || (sg_len == 0))
1026                return NULL;
1027
1028        pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr);
1029        dev_dbg(&chan->vc.chan.dev->device,
1030                "%s(): dir=%d flags=%lx\n", __func__, dir, flags);
1031
1032        for_each_sg(sgl, sg, sg_len, i)
1033                nb_desc += DIV_ROUND_UP(sg_dma_len(sg), PDMA_MAX_DESC_BYTES);
1034        sw_desc = pxad_alloc_desc(chan, nb_desc + 1);
1035        if (!sw_desc)
1036                return NULL;
1037
1038        for_each_sg(sgl, sg, sg_len, i) {
1039                dma = sg_dma_address(sg);
1040                avail = sg_dma_len(sg);
1041                sw_desc->len += avail;
1042
1043                do {
1044                        len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
1045                        if (dma & 0x7)
1046                                sw_desc->misaligned = true;
1047
1048                        sw_desc->hw_desc[j]->dcmd =
1049                                dcmd | (PXA_DCMD_LENGTH & len);
1050                        sw_desc->hw_desc[j]->dsadr = dsadr ? dsadr : dma;
1051                        sw_desc->hw_desc[j++]->dtadr = dtadr ? dtadr : dma;
1052
1053                        dma += len;
1054                        avail -= len;
1055                } while (avail);
1056        }
1057        set_updater_desc(sw_desc, flags);
1058
1059        return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags);
1060}
1061
1062static struct dma_async_tx_descriptor *
1063pxad_prep_dma_cyclic(struct dma_chan *dchan,
1064                     dma_addr_t buf_addr, size_t len, size_t period_len,
1065                     enum dma_transfer_direction dir, unsigned long flags)
1066{
1067        struct pxad_chan *chan = to_pxad_chan(dchan);
1068        struct pxad_desc_sw *sw_desc;
1069        struct pxad_desc_hw **phw_desc;
1070        dma_addr_t dma;
1071        u32 dcmd, dsadr = 0, dtadr = 0;
1072        unsigned int nb_desc = 0;
1073
1074        if (!dchan || !len || !period_len)
1075                return NULL;
1076        if ((dir != DMA_DEV_TO_MEM) && (dir != DMA_MEM_TO_DEV)) {
1077                dev_err(&chan->vc.chan.dev->device,
1078                        "Unsupported direction for cyclic DMA\n");
1079                return NULL;
1080        }
1081        /* the buffer length must be a multiple of period_len */
1082        if (len % period_len != 0 || period_len > PDMA_MAX_DESC_BYTES ||
1083            !IS_ALIGNED(period_len, 1 << PDMA_ALIGNMENT))
1084                return NULL;
1085
1086        pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr);
1087        dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH & period_len);
1088        dev_dbg(&chan->vc.chan.dev->device,
1089                "%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n",
1090                __func__, (unsigned long)buf_addr, len, period_len, dir, flags);
1091
1092        nb_desc = DIV_ROUND_UP(period_len, PDMA_MAX_DESC_BYTES);
1093        nb_desc *= DIV_ROUND_UP(len, period_len);
1094        sw_desc = pxad_alloc_desc(chan, nb_desc + 1);
1095        if (!sw_desc)
1096                return NULL;
1097        sw_desc->cyclic = true;
1098        sw_desc->len = len;
1099
1100        phw_desc = sw_desc->hw_desc;
1101        dma = buf_addr;
1102        do {
1103                phw_desc[0]->dsadr = dsadr ? dsadr : dma;
1104                phw_desc[0]->dtadr = dtadr ? dtadr : dma;
1105                phw_desc[0]->dcmd = dcmd;
1106                phw_desc++;
1107                dma += period_len;
1108                len -= period_len;
1109        } while (len);
1110        set_updater_desc(sw_desc, flags);
1111
1112        return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags);
1113}
1114
1115static int pxad_config(struct dma_chan *dchan,
1116                       struct dma_slave_config *cfg)
1117{
1118        struct pxad_chan *chan = to_pxad_chan(dchan);
1119
1120        if (!dchan)
1121                return -EINVAL;
1122
1123        chan->cfg = *cfg;
1124        return 0;
1125}
1126
1127static int pxad_terminate_all(struct dma_chan *dchan)
1128{
1129        struct pxad_chan *chan = to_pxad_chan(dchan);
1130        struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
1131        struct virt_dma_desc *vd = NULL;
1132        unsigned long flags;
1133        struct pxad_phy *phy;
1134        LIST_HEAD(head);
1135
1136        dev_dbg(&chan->vc.chan.dev->device,
1137                "%s(): vchan %p: terminate all\n", __func__, &chan->vc);
1138
1139        spin_lock_irqsave(&chan->vc.lock, flags);
1140        vchan_get_all_descriptors(&chan->vc, &head);
1141
1142        list_for_each_entry(vd, &head, node) {
1143                dev_dbg(&chan->vc.chan.dev->device,
1144                        "%s(): cancelling txd %p[%x] (completed=%d)", __func__,
1145                        vd, vd->tx.cookie, is_desc_completed(vd));
1146        }
1147
1148        phy = chan->phy;
1149        if (phy) {
1150                phy_disable(chan->phy);
1151                pxad_free_phy(chan);
1152                chan->phy = NULL;
1153                spin_lock(&pdev->phy_lock);
1154                phy->vchan = NULL;
1155                spin_unlock(&pdev->phy_lock);
1156        }
1157        spin_unlock_irqrestore(&chan->vc.lock, flags);
1158        vchan_dma_desc_free_list(&chan->vc, &head);
1159
1160        return 0;
1161}
1162
1163static unsigned int pxad_residue(struct pxad_chan *chan,
1164                                 dma_cookie_t cookie)
1165{
1166        struct virt_dma_desc *vd = NULL;
1167        struct pxad_desc_sw *sw_desc = NULL;
1168        struct pxad_desc_hw *hw_desc = NULL;
1169        u32 curr, start, len, end, residue = 0;
1170        unsigned long flags;
1171        bool passed = false;
1172        int i;
1173
1174        /*
1175         * If the channel does not have a phy pointer anymore, it has already
1176         * been completed. Therefore, its residue is 0.
1177         */
1178        if (!chan->phy)
1179                return 0;
1180
1181        spin_lock_irqsave(&chan->vc.lock, flags);
1182
1183        vd = vchan_find_desc(&chan->vc, cookie);
1184        if (!vd)
1185                goto out;
1186
1187        sw_desc = to_pxad_sw_desc(vd);
1188        if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR)
1189                curr = phy_readl_relaxed(chan->phy, DSADR);
1190        else
1191                curr = phy_readl_relaxed(chan->phy, DTADR);
1192
1193        /*
1194         * curr has to be actually read before checking descriptor
1195         * completion, so that a curr inside a status updater
1196         * descriptor implies the following test returns true, and
1197         * preventing reordering of curr load and the test.
1198         */
1199        rmb();
1200        if (is_desc_completed(vd))
1201                goto out;
1202
1203        for (i = 0; i < sw_desc->nb_desc - 1; i++) {
1204                hw_desc = sw_desc->hw_desc[i];
1205                if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR)
1206                        start = hw_desc->dsadr;
1207                else
1208                        start = hw_desc->dtadr;
1209                len = hw_desc->dcmd & PXA_DCMD_LENGTH;
1210                end = start + len;
1211
1212                /*
1213                 * 'passed' will be latched once we found the descriptor
1214                 * which lies inside the boundaries of the curr
1215                 * pointer. All descriptors that occur in the list
1216                 * _after_ we found that partially handled descriptor
1217                 * are still to be processed and are hence added to the
1218                 * residual bytes counter.
1219                 */
1220
1221                if (passed) {
1222                        residue += len;
1223                } else if (curr >= start && curr <= end) {
1224                        residue += end - curr;
1225                        passed = true;
1226                }
1227        }
1228        if (!passed)
1229                residue = sw_desc->len;
1230
1231out:
1232        spin_unlock_irqrestore(&chan->vc.lock, flags);
1233        dev_dbg(&chan->vc.chan.dev->device,
1234                "%s(): txd %p[%x] sw_desc=%p: %d\n",
1235                __func__, vd, cookie, sw_desc, residue);
1236        return residue;
1237}
1238
1239static enum dma_status pxad_tx_status(struct dma_chan *dchan,
1240                                      dma_cookie_t cookie,
1241                                      struct dma_tx_state *txstate)
1242{
1243        struct pxad_chan *chan = to_pxad_chan(dchan);
1244        enum dma_status ret;
1245
1246        if (cookie == chan->bus_error)
1247                return DMA_ERROR;
1248
1249        ret = dma_cookie_status(dchan, cookie, txstate);
1250        if (likely(txstate && (ret != DMA_ERROR)))
1251                dma_set_residue(txstate, pxad_residue(chan, cookie));
1252
1253        return ret;
1254}
1255
1256static void pxad_synchronize(struct dma_chan *dchan)
1257{
1258        struct pxad_chan *chan = to_pxad_chan(dchan);
1259
1260        wait_event(chan->wq_state, !is_chan_running(chan));
1261        vchan_synchronize(&chan->vc);
1262}
1263
1264static void pxad_free_channels(struct dma_device *dmadev)
1265{
1266        struct pxad_chan *c, *cn;
1267
1268        list_for_each_entry_safe(c, cn, &dmadev->channels,
1269                                 vc.chan.device_node) {
1270                list_del(&c->vc.chan.device_node);
1271                tasklet_kill(&c->vc.task);
1272        }
1273}
1274
1275static int pxad_remove(struct platform_device *op)
1276{
1277        struct pxad_device *pdev = platform_get_drvdata(op);
1278
1279        pxad_cleanup_debugfs(pdev);
1280        pxad_free_channels(&pdev->slave);
1281        dma_async_device_unregister(&pdev->slave);
1282        return 0;
1283}
1284
1285static int pxad_init_phys(struct platform_device *op,
1286                          struct pxad_device *pdev,
1287                          unsigned int nb_phy_chans)
1288{
1289        int irq0, irq, nr_irq = 0, i, ret;
1290        struct pxad_phy *phy;
1291
1292        irq0 = platform_get_irq(op, 0);
1293        if (irq0 < 0)
1294                return irq0;
1295
1296        pdev->phys = devm_kcalloc(&op->dev, nb_phy_chans,
1297                                  sizeof(pdev->phys[0]), GFP_KERNEL);
1298        if (!pdev->phys)
1299                return -ENOMEM;
1300
1301        for (i = 0; i < nb_phy_chans; i++)
1302                if (platform_get_irq(op, i) > 0)
1303                        nr_irq++;
1304
1305        for (i = 0; i < nb_phy_chans; i++) {
1306                phy = &pdev->phys[i];
1307                phy->base = pdev->base;
1308                phy->idx = i;
1309                irq = platform_get_irq(op, i);
1310                if ((nr_irq > 1) && (irq > 0))
1311                        ret = devm_request_irq(&op->dev, irq,
1312                                               pxad_chan_handler,
1313                                               IRQF_SHARED, "pxa-dma", phy);
1314                if ((nr_irq == 1) && (i == 0))
1315                        ret = devm_request_irq(&op->dev, irq0,
1316                                               pxad_int_handler,
1317                                               IRQF_SHARED, "pxa-dma", pdev);
1318                if (ret) {
1319                        dev_err(pdev->slave.dev,
1320                                "%s(): can't request irq %d:%d\n", __func__,
1321                                irq, ret);
1322                        return ret;
1323                }
1324        }
1325
1326        return 0;
1327}
1328
1329static const struct of_device_id pxad_dt_ids[] = {
1330        { .compatible = "marvell,pdma-1.0", },
1331        {}
1332};
1333MODULE_DEVICE_TABLE(of, pxad_dt_ids);
1334
1335static struct dma_chan *pxad_dma_xlate(struct of_phandle_args *dma_spec,
1336                                           struct of_dma *ofdma)
1337{
1338        struct pxad_device *d = ofdma->of_dma_data;
1339        struct dma_chan *chan;
1340
1341        chan = dma_get_any_slave_channel(&d->slave);
1342        if (!chan)
1343                return NULL;
1344
1345        to_pxad_chan(chan)->drcmr = dma_spec->args[0];
1346        to_pxad_chan(chan)->prio = dma_spec->args[1];
1347
1348        return chan;
1349}
1350
1351static int pxad_init_dmadev(struct platform_device *op,
1352                            struct pxad_device *pdev,
1353                            unsigned int nr_phy_chans,
1354                            unsigned int nr_requestors)
1355{
1356        int ret;
1357        unsigned int i;
1358        struct pxad_chan *c;
1359
1360        pdev->nr_chans = nr_phy_chans;
1361        pdev->nr_requestors = nr_requestors;
1362        INIT_LIST_HEAD(&pdev->slave.channels);
1363        pdev->slave.device_alloc_chan_resources = pxad_alloc_chan_resources;
1364        pdev->slave.device_free_chan_resources = pxad_free_chan_resources;
1365        pdev->slave.device_tx_status = pxad_tx_status;
1366        pdev->slave.device_issue_pending = pxad_issue_pending;
1367        pdev->slave.device_config = pxad_config;
1368        pdev->slave.device_synchronize = pxad_synchronize;
1369        pdev->slave.device_terminate_all = pxad_terminate_all;
1370
1371        if (op->dev.coherent_dma_mask)
1372                dma_set_mask(&op->dev, op->dev.coherent_dma_mask);
1373        else
1374                dma_set_mask(&op->dev, DMA_BIT_MASK(32));
1375
1376        ret = pxad_init_phys(op, pdev, nr_phy_chans);
1377        if (ret)
1378                return ret;
1379
1380        for (i = 0; i < nr_phy_chans; i++) {
1381                c = devm_kzalloc(&op->dev, sizeof(*c), GFP_KERNEL);
1382                if (!c)
1383                        return -ENOMEM;
1384                c->vc.desc_free = pxad_free_desc;
1385                vchan_init(&c->vc, &pdev->slave);
1386                init_waitqueue_head(&c->wq_state);
1387        }
1388
1389        return dma_async_device_register(&pdev->slave);
1390}
1391
1392static int pxad_probe(struct platform_device *op)
1393{
1394        struct pxad_device *pdev;
1395        const struct of_device_id *of_id;
1396        struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
1397        struct resource *iores;
1398        int ret, dma_channels = 0, nb_requestors = 0;
1399        const enum dma_slave_buswidth widths =
1400                DMA_SLAVE_BUSWIDTH_1_BYTE   | DMA_SLAVE_BUSWIDTH_2_BYTES |
1401                DMA_SLAVE_BUSWIDTH_4_BYTES;
1402
1403        pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
1404        if (!pdev)
1405                return -ENOMEM;
1406
1407        spin_lock_init(&pdev->phy_lock);
1408
1409        iores = platform_get_resource(op, IORESOURCE_MEM, 0);
1410        pdev->base = devm_ioremap_resource(&op->dev, iores);
1411        if (IS_ERR(pdev->base))
1412                return PTR_ERR(pdev->base);
1413
1414        of_id = of_match_device(pxad_dt_ids, &op->dev);
1415        if (of_id) {
1416                of_property_read_u32(op->dev.of_node, "#dma-channels",
1417                                     &dma_channels);
1418                ret = of_property_read_u32(op->dev.of_node, "#dma-requests",
1419                                           &nb_requestors);
1420                if (ret) {
1421                        dev_warn(pdev->slave.dev,
1422                                 "#dma-requests set to default 32 as missing in OF: %d",
1423                                 ret);
1424                        nb_requestors = 32;
1425                };
1426        } else if (pdata && pdata->dma_channels) {
1427                dma_channels = pdata->dma_channels;
1428                nb_requestors = pdata->nb_requestors;
1429        } else {
1430                dma_channels = 32;      /* default 32 channel */
1431        }
1432
1433        dma_cap_set(DMA_SLAVE, pdev->slave.cap_mask);
1434        dma_cap_set(DMA_MEMCPY, pdev->slave.cap_mask);
1435        dma_cap_set(DMA_CYCLIC, pdev->slave.cap_mask);
1436        dma_cap_set(DMA_PRIVATE, pdev->slave.cap_mask);
1437        pdev->slave.device_prep_dma_memcpy = pxad_prep_memcpy;
1438        pdev->slave.device_prep_slave_sg = pxad_prep_slave_sg;
1439        pdev->slave.device_prep_dma_cyclic = pxad_prep_dma_cyclic;
1440
1441        pdev->slave.copy_align = PDMA_ALIGNMENT;
1442        pdev->slave.src_addr_widths = widths;
1443        pdev->slave.dst_addr_widths = widths;
1444        pdev->slave.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1445        pdev->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1446        pdev->slave.descriptor_reuse = true;
1447
1448        pdev->slave.dev = &op->dev;
1449        ret = pxad_init_dmadev(op, pdev, dma_channels, nb_requestors);
1450        if (ret) {
1451                dev_err(pdev->slave.dev, "unable to register\n");
1452                return ret;
1453        }
1454
1455        if (op->dev.of_node) {
1456                /* Device-tree DMA controller registration */
1457                ret = of_dma_controller_register(op->dev.of_node,
1458                                                 pxad_dma_xlate, pdev);
1459                if (ret < 0) {
1460                        dev_err(pdev->slave.dev,
1461                                "of_dma_controller_register failed\n");
1462                        return ret;
1463                }
1464        }
1465
1466        platform_set_drvdata(op, pdev);
1467        pxad_init_debugfs(pdev);
1468        dev_info(pdev->slave.dev, "initialized %d channels on %d requestors\n",
1469                 dma_channels, nb_requestors);
1470        return 0;
1471}
1472
1473static const struct platform_device_id pxad_id_table[] = {
1474        { "pxa-dma", },
1475        { },
1476};
1477
1478static struct platform_driver pxad_driver = {
1479        .driver         = {
1480                .name   = "pxa-dma",
1481                .of_match_table = pxad_dt_ids,
1482        },
1483        .id_table       = pxad_id_table,
1484        .probe          = pxad_probe,
1485        .remove         = pxad_remove,
1486};
1487
1488bool pxad_filter_fn(struct dma_chan *chan, void *param)
1489{
1490        struct pxad_chan *c = to_pxad_chan(chan);
1491        struct pxad_param *p = param;
1492
1493        if (chan->device->dev->driver != &pxad_driver.driver)
1494                return false;
1495
1496        c->drcmr = p->drcmr;
1497        c->prio = p->prio;
1498
1499        return true;
1500}
1501EXPORT_SYMBOL_GPL(pxad_filter_fn);
1502
1503module_platform_driver(pxad_driver);
1504
1505MODULE_DESCRIPTION("Marvell PXA Peripheral DMA Driver");
1506MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
1507MODULE_LICENSE("GPL v2");
1508