linux/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
<<
>>
Prefs
   1// SPDX-License-Identifier:  GPL-2.0
   2// (C) 2017-2018 Synopsys, Inc. (www.synopsys.com)
   3
   4/*
   5 * Synopsys DesignWare AXI DMA Controller driver.
   6 *
   7 * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
   8 */
   9
  10#include <linux/bitops.h>
  11#include <linux/delay.h>
  12#include <linux/device.h>
  13#include <linux/dmaengine.h>
  14#include <linux/dmapool.h>
  15#include <linux/err.h>
  16#include <linux/interrupt.h>
  17#include <linux/io.h>
  18#include <linux/kernel.h>
  19#include <linux/module.h>
  20#include <linux/of.h>
  21#include <linux/platform_device.h>
  22#include <linux/pm_runtime.h>
  23#include <linux/property.h>
  24#include <linux/types.h>
  25
  26#include "dw-axi-dmac.h"
  27#include "../dmaengine.h"
  28#include "../virt-dma.h"
  29
  30/*
  31 * The set of bus widths supported by the DMA controller. DW AXI DMAC supports
  32 * master data bus width up to 512 bits (for both AXI master interfaces), but
  33 * it depends on IP block configurarion.
  34 */
  35#define AXI_DMA_BUSWIDTHS                 \
  36        (DMA_SLAVE_BUSWIDTH_1_BYTE      | \
  37        DMA_SLAVE_BUSWIDTH_2_BYTES      | \
  38        DMA_SLAVE_BUSWIDTH_4_BYTES      | \
  39        DMA_SLAVE_BUSWIDTH_8_BYTES      | \
  40        DMA_SLAVE_BUSWIDTH_16_BYTES     | \
  41        DMA_SLAVE_BUSWIDTH_32_BYTES     | \
  42        DMA_SLAVE_BUSWIDTH_64_BYTES)
  43
  44static inline void
  45axi_dma_iowrite32(struct axi_dma_chip *chip, u32 reg, u32 val)
  46{
  47        iowrite32(val, chip->regs + reg);
  48}
  49
  50static inline u32 axi_dma_ioread32(struct axi_dma_chip *chip, u32 reg)
  51{
  52        return ioread32(chip->regs + reg);
  53}
  54
  55static inline void
  56axi_chan_iowrite32(struct axi_dma_chan *chan, u32 reg, u32 val)
  57{
  58        iowrite32(val, chan->chan_regs + reg);
  59}
  60
  61static inline u32 axi_chan_ioread32(struct axi_dma_chan *chan, u32 reg)
  62{
  63        return ioread32(chan->chan_regs + reg);
  64}
  65
  66static inline void
  67axi_chan_iowrite64(struct axi_dma_chan *chan, u32 reg, u64 val)
  68{
  69        /*
  70         * We split one 64 bit write for two 32 bit write as some HW doesn't
  71         * support 64 bit access.
  72         */
  73        iowrite32(lower_32_bits(val), chan->chan_regs + reg);
  74        iowrite32(upper_32_bits(val), chan->chan_regs + reg + 4);
  75}
  76
  77static inline void axi_dma_disable(struct axi_dma_chip *chip)
  78{
  79        u32 val;
  80
  81        val = axi_dma_ioread32(chip, DMAC_CFG);
  82        val &= ~DMAC_EN_MASK;
  83        axi_dma_iowrite32(chip, DMAC_CFG, val);
  84}
  85
  86static inline void axi_dma_enable(struct axi_dma_chip *chip)
  87{
  88        u32 val;
  89
  90        val = axi_dma_ioread32(chip, DMAC_CFG);
  91        val |= DMAC_EN_MASK;
  92        axi_dma_iowrite32(chip, DMAC_CFG, val);
  93}
  94
  95static inline void axi_dma_irq_disable(struct axi_dma_chip *chip)
  96{
  97        u32 val;
  98
  99        val = axi_dma_ioread32(chip, DMAC_CFG);
 100        val &= ~INT_EN_MASK;
 101        axi_dma_iowrite32(chip, DMAC_CFG, val);
 102}
 103
 104static inline void axi_dma_irq_enable(struct axi_dma_chip *chip)
 105{
 106        u32 val;
 107
 108        val = axi_dma_ioread32(chip, DMAC_CFG);
 109        val |= INT_EN_MASK;
 110        axi_dma_iowrite32(chip, DMAC_CFG, val);
 111}
 112
 113static inline void axi_chan_irq_disable(struct axi_dma_chan *chan, u32 irq_mask)
 114{
 115        u32 val;
 116
 117        if (likely(irq_mask == DWAXIDMAC_IRQ_ALL)) {
 118                axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, DWAXIDMAC_IRQ_NONE);
 119        } else {
 120                val = axi_chan_ioread32(chan, CH_INTSTATUS_ENA);
 121                val &= ~irq_mask;
 122                axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, val);
 123        }
 124}
 125
 126static inline void axi_chan_irq_set(struct axi_dma_chan *chan, u32 irq_mask)
 127{
 128        axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, irq_mask);
 129}
 130
 131static inline void axi_chan_irq_sig_set(struct axi_dma_chan *chan, u32 irq_mask)
 132{
 133        axi_chan_iowrite32(chan, CH_INTSIGNAL_ENA, irq_mask);
 134}
 135
 136static inline void axi_chan_irq_clear(struct axi_dma_chan *chan, u32 irq_mask)
 137{
 138        axi_chan_iowrite32(chan, CH_INTCLEAR, irq_mask);
 139}
 140
 141static inline u32 axi_chan_irq_read(struct axi_dma_chan *chan)
 142{
 143        return axi_chan_ioread32(chan, CH_INTSTATUS);
 144}
 145
 146static inline void axi_chan_disable(struct axi_dma_chan *chan)
 147{
 148        u32 val;
 149
 150        val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
 151        val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);
 152        val |=   BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
 153        axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
 154}
 155
 156static inline void axi_chan_enable(struct axi_dma_chan *chan)
 157{
 158        u32 val;
 159
 160        val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
 161        val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
 162               BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
 163        axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
 164}
 165
 166static inline bool axi_chan_is_hw_enable(struct axi_dma_chan *chan)
 167{
 168        u32 val;
 169
 170        val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
 171
 172        return !!(val & (BIT(chan->id) << DMAC_CHAN_EN_SHIFT));
 173}
 174
 175static void axi_dma_hw_init(struct axi_dma_chip *chip)
 176{
 177        u32 i;
 178
 179        for (i = 0; i < chip->dw->hdata->nr_channels; i++) {
 180                axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
 181                axi_chan_disable(&chip->dw->chan[i]);
 182        }
 183}
 184
 185static u32 axi_chan_get_xfer_width(struct axi_dma_chan *chan, dma_addr_t src,
 186                                   dma_addr_t dst, size_t len)
 187{
 188        u32 max_width = chan->chip->dw->hdata->m_data_width;
 189
 190        return __ffs(src | dst | len | BIT(max_width));
 191}
 192
 193static inline const char *axi_chan_name(struct axi_dma_chan *chan)
 194{
 195        return dma_chan_name(&chan->vc.chan);
 196}
 197
 198static struct axi_dma_desc *axi_desc_get(struct axi_dma_chan *chan)
 199{
 200        struct dw_axi_dma *dw = chan->chip->dw;
 201        struct axi_dma_desc *desc;
 202        dma_addr_t phys;
 203
 204        desc = dma_pool_zalloc(dw->desc_pool, GFP_NOWAIT, &phys);
 205        if (unlikely(!desc)) {
 206                dev_err(chan2dev(chan), "%s: not enough descriptors available\n",
 207                        axi_chan_name(chan));
 208                return NULL;
 209        }
 210
 211        atomic_inc(&chan->descs_allocated);
 212        INIT_LIST_HEAD(&desc->xfer_list);
 213        desc->vd.tx.phys = phys;
 214        desc->chan = chan;
 215
 216        return desc;
 217}
 218
 219static void axi_desc_put(struct axi_dma_desc *desc)
 220{
 221        struct axi_dma_chan *chan = desc->chan;
 222        struct dw_axi_dma *dw = chan->chip->dw;
 223        struct axi_dma_desc *child, *_next;
 224        unsigned int descs_put = 0;
 225
 226        list_for_each_entry_safe(child, _next, &desc->xfer_list, xfer_list) {
 227                list_del(&child->xfer_list);
 228                dma_pool_free(dw->desc_pool, child, child->vd.tx.phys);
 229                descs_put++;
 230        }
 231
 232        dma_pool_free(dw->desc_pool, desc, desc->vd.tx.phys);
 233        descs_put++;
 234
 235        atomic_sub(descs_put, &chan->descs_allocated);
 236        dev_vdbg(chan2dev(chan), "%s: %d descs put, %d still allocated\n",
 237                axi_chan_name(chan), descs_put,
 238                atomic_read(&chan->descs_allocated));
 239}
 240
 241static void vchan_desc_put(struct virt_dma_desc *vdesc)
 242{
 243        axi_desc_put(vd_to_axi_desc(vdesc));
 244}
 245
 246static enum dma_status
 247dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
 248                  struct dma_tx_state *txstate)
 249{
 250        struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
 251        enum dma_status ret;
 252
 253        ret = dma_cookie_status(dchan, cookie, txstate);
 254
 255        if (chan->is_paused && ret == DMA_IN_PROGRESS)
 256                ret = DMA_PAUSED;
 257
 258        return ret;
 259}
 260
 261static void write_desc_llp(struct axi_dma_desc *desc, dma_addr_t adr)
 262{
 263        desc->lli.llp = cpu_to_le64(adr);
 264}
 265
 266static void write_chan_llp(struct axi_dma_chan *chan, dma_addr_t adr)
 267{
 268        axi_chan_iowrite64(chan, CH_LLP, adr);
 269}
 270
 271/* Called in chan locked context */
 272static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
 273                                      struct axi_dma_desc *first)
 274{
 275        u32 priority = chan->chip->dw->hdata->priority[chan->id];
 276        u32 reg, irq_mask;
 277        u8 lms = 0; /* Select AXI0 master for LLI fetching */
 278
 279        if (unlikely(axi_chan_is_hw_enable(chan))) {
 280                dev_err(chan2dev(chan), "%s is non-idle!\n",
 281                        axi_chan_name(chan));
 282
 283                return;
 284        }
 285
 286        axi_dma_enable(chan->chip);
 287
 288        reg = (DWAXIDMAC_MBLK_TYPE_LL << CH_CFG_L_DST_MULTBLK_TYPE_POS |
 289               DWAXIDMAC_MBLK_TYPE_LL << CH_CFG_L_SRC_MULTBLK_TYPE_POS);
 290        axi_chan_iowrite32(chan, CH_CFG_L, reg);
 291
 292        reg = (DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC << CH_CFG_H_TT_FC_POS |
 293               priority << CH_CFG_H_PRIORITY_POS |
 294               DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_DST_POS |
 295               DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_SRC_POS);
 296        axi_chan_iowrite32(chan, CH_CFG_H, reg);
 297
 298        write_chan_llp(chan, first->vd.tx.phys | lms);
 299
 300        irq_mask = DWAXIDMAC_IRQ_DMA_TRF | DWAXIDMAC_IRQ_ALL_ERR;
 301        axi_chan_irq_sig_set(chan, irq_mask);
 302
 303        /* Generate 'suspend' status but don't generate interrupt */
 304        irq_mask |= DWAXIDMAC_IRQ_SUSPENDED;
 305        axi_chan_irq_set(chan, irq_mask);
 306
 307        axi_chan_enable(chan);
 308}
 309
 310static void axi_chan_start_first_queued(struct axi_dma_chan *chan)
 311{
 312        struct axi_dma_desc *desc;
 313        struct virt_dma_desc *vd;
 314
 315        vd = vchan_next_desc(&chan->vc);
 316        if (!vd)
 317                return;
 318
 319        desc = vd_to_axi_desc(vd);
 320        dev_vdbg(chan2dev(chan), "%s: started %u\n", axi_chan_name(chan),
 321                vd->tx.cookie);
 322        axi_chan_block_xfer_start(chan, desc);
 323}
 324
 325static void dma_chan_issue_pending(struct dma_chan *dchan)
 326{
 327        struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
 328        unsigned long flags;
 329
 330        spin_lock_irqsave(&chan->vc.lock, flags);
 331        if (vchan_issue_pending(&chan->vc))
 332                axi_chan_start_first_queued(chan);
 333        spin_unlock_irqrestore(&chan->vc.lock, flags);
 334}
 335
 336static int dma_chan_alloc_chan_resources(struct dma_chan *dchan)
 337{
 338        struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
 339
 340        /* ASSERT: channel is idle */
 341        if (axi_chan_is_hw_enable(chan)) {
 342                dev_err(chan2dev(chan), "%s is non-idle!\n",
 343                        axi_chan_name(chan));
 344                return -EBUSY;
 345        }
 346
 347        dev_vdbg(dchan2dev(dchan), "%s: allocating\n", axi_chan_name(chan));
 348
 349        pm_runtime_get(chan->chip->dev);
 350
 351        return 0;
 352}
 353
 354static void dma_chan_free_chan_resources(struct dma_chan *dchan)
 355{
 356        struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
 357
 358        /* ASSERT: channel is idle */
 359        if (axi_chan_is_hw_enable(chan))
 360                dev_err(dchan2dev(dchan), "%s is non-idle!\n",
 361                        axi_chan_name(chan));
 362
 363        axi_chan_disable(chan);
 364        axi_chan_irq_disable(chan, DWAXIDMAC_IRQ_ALL);
 365
 366        vchan_free_chan_resources(&chan->vc);
 367
 368        dev_vdbg(dchan2dev(dchan),
 369                 "%s: free resources, descriptor still allocated: %u\n",
 370                 axi_chan_name(chan), atomic_read(&chan->descs_allocated));
 371
 372        pm_runtime_put(chan->chip->dev);
 373}
 374
 375/*
 376 * If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI
 377 * as 1, it understands that the current block is the final block in the
 378 * transfer and completes the DMA transfer operation at the end of current
 379 * block transfer.
 380 */
 381static void set_desc_last(struct axi_dma_desc *desc)
 382{
 383        u32 val;
 384
 385        val = le32_to_cpu(desc->lli.ctl_hi);
 386        val |= CH_CTL_H_LLI_LAST;
 387        desc->lli.ctl_hi = cpu_to_le32(val);
 388}
 389
 390static void write_desc_sar(struct axi_dma_desc *desc, dma_addr_t adr)
 391{
 392        desc->lli.sar = cpu_to_le64(adr);
 393}
 394
 395static void write_desc_dar(struct axi_dma_desc *desc, dma_addr_t adr)
 396{
 397        desc->lli.dar = cpu_to_le64(adr);
 398}
 399
 400static void set_desc_src_master(struct axi_dma_desc *desc)
 401{
 402        u32 val;
 403
 404        /* Select AXI0 for source master */
 405        val = le32_to_cpu(desc->lli.ctl_lo);
 406        val &= ~CH_CTL_L_SRC_MAST;
 407        desc->lli.ctl_lo = cpu_to_le32(val);
 408}
 409
 410static void set_desc_dest_master(struct axi_dma_desc *desc)
 411{
 412        u32 val;
 413
 414        /* Select AXI1 for source master if available */
 415        val = le32_to_cpu(desc->lli.ctl_lo);
 416        if (desc->chan->chip->dw->hdata->nr_masters > 1)
 417                val |= CH_CTL_L_DST_MAST;
 418        else
 419                val &= ~CH_CTL_L_DST_MAST;
 420
 421        desc->lli.ctl_lo = cpu_to_le32(val);
 422}
 423
 424static struct dma_async_tx_descriptor *
 425dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
 426                         dma_addr_t src_adr, size_t len, unsigned long flags)
 427{
 428        struct axi_dma_desc *first = NULL, *desc = NULL, *prev = NULL;
 429        struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
 430        size_t block_ts, max_block_ts, xfer_len;
 431        u32 xfer_width, reg;
 432        u8 lms = 0; /* Select AXI0 master for LLI fetching */
 433
 434        dev_dbg(chan2dev(chan), "%s: memcpy: src: %pad dst: %pad length: %zd flags: %#lx",
 435                axi_chan_name(chan), &src_adr, &dst_adr, len, flags);
 436
 437        max_block_ts = chan->chip->dw->hdata->block_size[chan->id];
 438
 439        while (len) {
 440                xfer_len = len;
 441
 442                /*
 443                 * Take care for the alignment.
 444                 * Actually source and destination widths can be different, but
 445                 * make them same to be simpler.
 446                 */
 447                xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, xfer_len);
 448
 449                /*
 450                 * block_ts indicates the total number of data of width
 451                 * to be transferred in a DMA block transfer.
 452                 * BLOCK_TS register should be set to block_ts - 1
 453                 */
 454                block_ts = xfer_len >> xfer_width;
 455                if (block_ts > max_block_ts) {
 456                        block_ts = max_block_ts;
 457                        xfer_len = max_block_ts << xfer_width;
 458                }
 459
 460                desc = axi_desc_get(chan);
 461                if (unlikely(!desc))
 462                        goto err_desc_get;
 463
 464                write_desc_sar(desc, src_adr);
 465                write_desc_dar(desc, dst_adr);
 466                desc->lli.block_ts_lo = cpu_to_le32(block_ts - 1);
 467
 468                reg = CH_CTL_H_LLI_VALID;
 469                if (chan->chip->dw->hdata->restrict_axi_burst_len) {
 470                        u32 burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
 471
 472                        reg |= (CH_CTL_H_ARLEN_EN |
 473                                burst_len << CH_CTL_H_ARLEN_POS |
 474                                CH_CTL_H_AWLEN_EN |
 475                                burst_len << CH_CTL_H_AWLEN_POS);
 476                }
 477                desc->lli.ctl_hi = cpu_to_le32(reg);
 478
 479                reg = (DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
 480                       DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS |
 481                       xfer_width << CH_CTL_L_DST_WIDTH_POS |
 482                       xfer_width << CH_CTL_L_SRC_WIDTH_POS |
 483                       DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
 484                       DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS);
 485                desc->lli.ctl_lo = cpu_to_le32(reg);
 486
 487                set_desc_src_master(desc);
 488                set_desc_dest_master(desc);
 489
 490                /* Manage transfer list (xfer_list) */
 491                if (!first) {
 492                        first = desc;
 493                } else {
 494                        list_add_tail(&desc->xfer_list, &first->xfer_list);
 495                        write_desc_llp(prev, desc->vd.tx.phys | lms);
 496                }
 497                prev = desc;
 498
 499                /* update the length and addresses for the next loop cycle */
 500                len -= xfer_len;
 501                dst_adr += xfer_len;
 502                src_adr += xfer_len;
 503        }
 504
 505        /* Total len of src/dest sg == 0, so no descriptor were allocated */
 506        if (unlikely(!first))
 507                return NULL;
 508
 509        /* Set end-of-link to the last link descriptor of list */
 510        set_desc_last(desc);
 511
 512        return vchan_tx_prep(&chan->vc, &first->vd, flags);
 513
 514err_desc_get:
 515        if (first)
 516                axi_desc_put(first);
 517        return NULL;
 518}
 519
 520static void axi_chan_dump_lli(struct axi_dma_chan *chan,
 521                              struct axi_dma_desc *desc)
 522{
 523        dev_err(dchan2dev(&chan->vc.chan),
 524                "SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x",
 525                le64_to_cpu(desc->lli.sar),
 526                le64_to_cpu(desc->lli.dar),
 527                le64_to_cpu(desc->lli.llp),
 528                le32_to_cpu(desc->lli.block_ts_lo),
 529                le32_to_cpu(desc->lli.ctl_hi),
 530                le32_to_cpu(desc->lli.ctl_lo));
 531}
 532
 533static void axi_chan_list_dump_lli(struct axi_dma_chan *chan,
 534                                   struct axi_dma_desc *desc_head)
 535{
 536        struct axi_dma_desc *desc;
 537
 538        axi_chan_dump_lli(chan, desc_head);
 539        list_for_each_entry(desc, &desc_head->xfer_list, xfer_list)
 540                axi_chan_dump_lli(chan, desc);
 541}
 542
 543static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
 544{
 545        struct virt_dma_desc *vd;
 546        unsigned long flags;
 547
 548        spin_lock_irqsave(&chan->vc.lock, flags);
 549
 550        axi_chan_disable(chan);
 551
 552        /* The bad descriptor currently is in the head of vc list */
 553        vd = vchan_next_desc(&chan->vc);
 554        /* Remove the completed descriptor from issued list */
 555        list_del(&vd->node);
 556
 557        /* WARN about bad descriptor */
 558        dev_err(chan2dev(chan),
 559                "Bad descriptor submitted for %s, cookie: %d, irq: 0x%08x\n",
 560                axi_chan_name(chan), vd->tx.cookie, status);
 561        axi_chan_list_dump_lli(chan, vd_to_axi_desc(vd));
 562
 563        vchan_cookie_complete(vd);
 564
 565        /* Try to restart the controller */
 566        axi_chan_start_first_queued(chan);
 567
 568        spin_unlock_irqrestore(&chan->vc.lock, flags);
 569}
 570
 571static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
 572{
 573        struct virt_dma_desc *vd;
 574        unsigned long flags;
 575
 576        spin_lock_irqsave(&chan->vc.lock, flags);
 577        if (unlikely(axi_chan_is_hw_enable(chan))) {
 578                dev_err(chan2dev(chan), "BUG: %s caught DWAXIDMAC_IRQ_DMA_TRF, but channel not idle!\n",
 579                        axi_chan_name(chan));
 580                axi_chan_disable(chan);
 581        }
 582
 583        /* The completed descriptor currently is in the head of vc list */
 584        vd = vchan_next_desc(&chan->vc);
 585        /* Remove the completed descriptor from issued list before completing */
 586        list_del(&vd->node);
 587        vchan_cookie_complete(vd);
 588
 589        /* Submit queued descriptors after processing the completed ones */
 590        axi_chan_start_first_queued(chan);
 591
 592        spin_unlock_irqrestore(&chan->vc.lock, flags);
 593}
 594
 595static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id)
 596{
 597        struct axi_dma_chip *chip = dev_id;
 598        struct dw_axi_dma *dw = chip->dw;
 599        struct axi_dma_chan *chan;
 600
 601        u32 status, i;
 602
 603        /* Disable DMAC inerrupts. We'll enable them after processing chanels */
 604        axi_dma_irq_disable(chip);
 605
 606        /* Poll, clear and process every chanel interrupt status */
 607        for (i = 0; i < dw->hdata->nr_channels; i++) {
 608                chan = &dw->chan[i];
 609                status = axi_chan_irq_read(chan);
 610                axi_chan_irq_clear(chan, status);
 611
 612                dev_vdbg(chip->dev, "%s %u IRQ status: 0x%08x\n",
 613                        axi_chan_name(chan), i, status);
 614
 615                if (status & DWAXIDMAC_IRQ_ALL_ERR)
 616                        axi_chan_handle_err(chan, status);
 617                else if (status & DWAXIDMAC_IRQ_DMA_TRF)
 618                        axi_chan_block_xfer_complete(chan);
 619        }
 620
 621        /* Re-enable interrupts */
 622        axi_dma_irq_enable(chip);
 623
 624        return IRQ_HANDLED;
 625}
 626
 627static int dma_chan_terminate_all(struct dma_chan *dchan)
 628{
 629        struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
 630        unsigned long flags;
 631        LIST_HEAD(head);
 632
 633        spin_lock_irqsave(&chan->vc.lock, flags);
 634
 635        axi_chan_disable(chan);
 636
 637        vchan_get_all_descriptors(&chan->vc, &head);
 638
 639        /*
 640         * As vchan_dma_desc_free_list can access to desc_allocated list
 641         * we need to call it in vc.lock context.
 642         */
 643        vchan_dma_desc_free_list(&chan->vc, &head);
 644
 645        spin_unlock_irqrestore(&chan->vc.lock, flags);
 646
 647        dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan));
 648
 649        return 0;
 650}
 651
 652static int dma_chan_pause(struct dma_chan *dchan)
 653{
 654        struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
 655        unsigned long flags;
 656        unsigned int timeout = 20; /* timeout iterations */
 657        u32 val;
 658
 659        spin_lock_irqsave(&chan->vc.lock, flags);
 660
 661        val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
 662        val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT |
 663               BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT;
 664        axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
 665
 666        do  {
 667                if (axi_chan_irq_read(chan) & DWAXIDMAC_IRQ_SUSPENDED)
 668                        break;
 669
 670                udelay(2);
 671        } while (--timeout);
 672
 673        axi_chan_irq_clear(chan, DWAXIDMAC_IRQ_SUSPENDED);
 674
 675        chan->is_paused = true;
 676
 677        spin_unlock_irqrestore(&chan->vc.lock, flags);
 678
 679        return timeout ? 0 : -EAGAIN;
 680}
 681
 682/* Called in chan locked context */
 683static inline void axi_chan_resume(struct axi_dma_chan *chan)
 684{
 685        u32 val;
 686
 687        val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
 688        val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT);
 689        val |=  (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);
 690        axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
 691
 692        chan->is_paused = false;
 693}
 694
 695static int dma_chan_resume(struct dma_chan *dchan)
 696{
 697        struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
 698        unsigned long flags;
 699
 700        spin_lock_irqsave(&chan->vc.lock, flags);
 701
 702        if (chan->is_paused)
 703                axi_chan_resume(chan);
 704
 705        spin_unlock_irqrestore(&chan->vc.lock, flags);
 706
 707        return 0;
 708}
 709
 710static int axi_dma_suspend(struct axi_dma_chip *chip)
 711{
 712        axi_dma_irq_disable(chip);
 713        axi_dma_disable(chip);
 714
 715        clk_disable_unprepare(chip->core_clk);
 716        clk_disable_unprepare(chip->cfgr_clk);
 717
 718        return 0;
 719}
 720
 721static int axi_dma_resume(struct axi_dma_chip *chip)
 722{
 723        int ret;
 724
 725        ret = clk_prepare_enable(chip->cfgr_clk);
 726        if (ret < 0)
 727                return ret;
 728
 729        ret = clk_prepare_enable(chip->core_clk);
 730        if (ret < 0)
 731                return ret;
 732
 733        axi_dma_enable(chip);
 734        axi_dma_irq_enable(chip);
 735
 736        return 0;
 737}
 738
 739static int __maybe_unused axi_dma_runtime_suspend(struct device *dev)
 740{
 741        struct axi_dma_chip *chip = dev_get_drvdata(dev);
 742
 743        return axi_dma_suspend(chip);
 744}
 745
 746static int __maybe_unused axi_dma_runtime_resume(struct device *dev)
 747{
 748        struct axi_dma_chip *chip = dev_get_drvdata(dev);
 749
 750        return axi_dma_resume(chip);
 751}
 752
 753static int parse_device_properties(struct axi_dma_chip *chip)
 754{
 755        struct device *dev = chip->dev;
 756        u32 tmp, carr[DMAC_MAX_CHANNELS];
 757        int ret;
 758
 759        ret = device_property_read_u32(dev, "dma-channels", &tmp);
 760        if (ret)
 761                return ret;
 762        if (tmp == 0 || tmp > DMAC_MAX_CHANNELS)
 763                return -EINVAL;
 764
 765        chip->dw->hdata->nr_channels = tmp;
 766
 767        ret = device_property_read_u32(dev, "snps,dma-masters", &tmp);
 768        if (ret)
 769                return ret;
 770        if (tmp == 0 || tmp > DMAC_MAX_MASTERS)
 771                return -EINVAL;
 772
 773        chip->dw->hdata->nr_masters = tmp;
 774
 775        ret = device_property_read_u32(dev, "snps,data-width", &tmp);
 776        if (ret)
 777                return ret;
 778        if (tmp > DWAXIDMAC_TRANS_WIDTH_MAX)
 779                return -EINVAL;
 780
 781        chip->dw->hdata->m_data_width = tmp;
 782
 783        ret = device_property_read_u32_array(dev, "snps,block-size", carr,
 784                                             chip->dw->hdata->nr_channels);
 785        if (ret)
 786                return ret;
 787        for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
 788                if (carr[tmp] == 0 || carr[tmp] > DMAC_MAX_BLK_SIZE)
 789                        return -EINVAL;
 790
 791                chip->dw->hdata->block_size[tmp] = carr[tmp];
 792        }
 793
 794        ret = device_property_read_u32_array(dev, "snps,priority", carr,
 795                                             chip->dw->hdata->nr_channels);
 796        if (ret)
 797                return ret;
 798        /* Priority value must be programmed within [0:nr_channels-1] range */
 799        for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
 800                if (carr[tmp] >= chip->dw->hdata->nr_channels)
 801                        return -EINVAL;
 802
 803                chip->dw->hdata->priority[tmp] = carr[tmp];
 804        }
 805
 806        /* axi-max-burst-len is optional property */
 807        ret = device_property_read_u32(dev, "snps,axi-max-burst-len", &tmp);
 808        if (!ret) {
 809                if (tmp > DWAXIDMAC_ARWLEN_MAX + 1)
 810                        return -EINVAL;
 811                if (tmp < DWAXIDMAC_ARWLEN_MIN + 1)
 812                        return -EINVAL;
 813
 814                chip->dw->hdata->restrict_axi_burst_len = true;
 815                chip->dw->hdata->axi_rw_burst_len = tmp - 1;
 816        }
 817
 818        return 0;
 819}
 820
 821static int dw_probe(struct platform_device *pdev)
 822{
 823        struct axi_dma_chip *chip;
 824        struct resource *mem;
 825        struct dw_axi_dma *dw;
 826        struct dw_axi_dma_hcfg *hdata;
 827        u32 i;
 828        int ret;
 829
 830        chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
 831        if (!chip)
 832                return -ENOMEM;
 833
 834        dw = devm_kzalloc(&pdev->dev, sizeof(*dw), GFP_KERNEL);
 835        if (!dw)
 836                return -ENOMEM;
 837
 838        hdata = devm_kzalloc(&pdev->dev, sizeof(*hdata), GFP_KERNEL);
 839        if (!hdata)
 840                return -ENOMEM;
 841
 842        chip->dw = dw;
 843        chip->dev = &pdev->dev;
 844        chip->dw->hdata = hdata;
 845
 846        chip->irq = platform_get_irq(pdev, 0);
 847        if (chip->irq < 0)
 848                return chip->irq;
 849
 850        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 851        chip->regs = devm_ioremap_resource(chip->dev, mem);
 852        if (IS_ERR(chip->regs))
 853                return PTR_ERR(chip->regs);
 854
 855        chip->core_clk = devm_clk_get(chip->dev, "core-clk");
 856        if (IS_ERR(chip->core_clk))
 857                return PTR_ERR(chip->core_clk);
 858
 859        chip->cfgr_clk = devm_clk_get(chip->dev, "cfgr-clk");
 860        if (IS_ERR(chip->cfgr_clk))
 861                return PTR_ERR(chip->cfgr_clk);
 862
 863        ret = parse_device_properties(chip);
 864        if (ret)
 865                return ret;
 866
 867        dw->chan = devm_kcalloc(chip->dev, hdata->nr_channels,
 868                                sizeof(*dw->chan), GFP_KERNEL);
 869        if (!dw->chan)
 870                return -ENOMEM;
 871
 872        ret = devm_request_irq(chip->dev, chip->irq, dw_axi_dma_interrupt,
 873                               IRQF_SHARED, KBUILD_MODNAME, chip);
 874        if (ret)
 875                return ret;
 876
 877        /* Lli address must be aligned to a 64-byte boundary */
 878        dw->desc_pool = dmam_pool_create(KBUILD_MODNAME, chip->dev,
 879                                         sizeof(struct axi_dma_desc), 64, 0);
 880        if (!dw->desc_pool) {
 881                dev_err(chip->dev, "No memory for descriptors dma pool\n");
 882                return -ENOMEM;
 883        }
 884
 885        INIT_LIST_HEAD(&dw->dma.channels);
 886        for (i = 0; i < hdata->nr_channels; i++) {
 887                struct axi_dma_chan *chan = &dw->chan[i];
 888
 889                chan->chip = chip;
 890                chan->id = i;
 891                chan->chan_regs = chip->regs + COMMON_REG_LEN + i * CHAN_REG_LEN;
 892                atomic_set(&chan->descs_allocated, 0);
 893
 894                chan->vc.desc_free = vchan_desc_put;
 895                vchan_init(&chan->vc, &dw->dma);
 896        }
 897
 898        /* Set capabilities */
 899        dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
 900
 901        /* DMA capabilities */
 902        dw->dma.chancnt = hdata->nr_channels;
 903        dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS;
 904        dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS;
 905        dw->dma.directions = BIT(DMA_MEM_TO_MEM);
 906        dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
 907
 908        dw->dma.dev = chip->dev;
 909        dw->dma.device_tx_status = dma_chan_tx_status;
 910        dw->dma.device_issue_pending = dma_chan_issue_pending;
 911        dw->dma.device_terminate_all = dma_chan_terminate_all;
 912        dw->dma.device_pause = dma_chan_pause;
 913        dw->dma.device_resume = dma_chan_resume;
 914
 915        dw->dma.device_alloc_chan_resources = dma_chan_alloc_chan_resources;
 916        dw->dma.device_free_chan_resources = dma_chan_free_chan_resources;
 917
 918        dw->dma.device_prep_dma_memcpy = dma_chan_prep_dma_memcpy;
 919
 920        platform_set_drvdata(pdev, chip);
 921
 922        pm_runtime_enable(chip->dev);
 923
 924        /*
 925         * We can't just call pm_runtime_get here instead of
 926         * pm_runtime_get_noresume + axi_dma_resume because we need
 927         * driver to work also without Runtime PM.
 928         */
 929        pm_runtime_get_noresume(chip->dev);
 930        ret = axi_dma_resume(chip);
 931        if (ret < 0)
 932                goto err_pm_disable;
 933
 934        axi_dma_hw_init(chip);
 935
 936        pm_runtime_put(chip->dev);
 937
 938        ret = dmaenginem_async_device_register(&dw->dma);
 939        if (ret)
 940                goto err_pm_disable;
 941
 942        dev_info(chip->dev, "DesignWare AXI DMA Controller, %d channels\n",
 943                 dw->hdata->nr_channels);
 944
 945        return 0;
 946
 947err_pm_disable:
 948        pm_runtime_disable(chip->dev);
 949
 950        return ret;
 951}
 952
 953static int dw_remove(struct platform_device *pdev)
 954{
 955        struct axi_dma_chip *chip = platform_get_drvdata(pdev);
 956        struct dw_axi_dma *dw = chip->dw;
 957        struct axi_dma_chan *chan, *_chan;
 958        u32 i;
 959
 960        /* Enable clk before accessing to registers */
 961        clk_prepare_enable(chip->cfgr_clk);
 962        clk_prepare_enable(chip->core_clk);
 963        axi_dma_irq_disable(chip);
 964        for (i = 0; i < dw->hdata->nr_channels; i++) {
 965                axi_chan_disable(&chip->dw->chan[i]);
 966                axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
 967        }
 968        axi_dma_disable(chip);
 969
 970        pm_runtime_disable(chip->dev);
 971        axi_dma_suspend(chip);
 972
 973        devm_free_irq(chip->dev, chip->irq, chip);
 974
 975        list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
 976                        vc.chan.device_node) {
 977                list_del(&chan->vc.chan.device_node);
 978                tasklet_kill(&chan->vc.task);
 979        }
 980
 981        return 0;
 982}
 983
 984static const struct dev_pm_ops dw_axi_dma_pm_ops = {
 985        SET_RUNTIME_PM_OPS(axi_dma_runtime_suspend, axi_dma_runtime_resume, NULL)
 986};
 987
 988static const struct of_device_id dw_dma_of_id_table[] = {
 989        { .compatible = "snps,axi-dma-1.01a" },
 990        {}
 991};
 992MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
 993
 994static struct platform_driver dw_driver = {
 995        .probe          = dw_probe,
 996        .remove         = dw_remove,
 997        .driver = {
 998                .name   = KBUILD_MODNAME,
 999                .of_match_table = of_match_ptr(dw_dma_of_id_table),
1000                .pm = &dw_axi_dma_pm_ops,
1001        },
1002};
1003module_platform_driver(dw_driver);
1004
1005MODULE_LICENSE("GPL v2");
1006MODULE_DESCRIPTION("Synopsys DesignWare AXI DMA Controller platform driver");
1007MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>");
1008