linux/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
<<
>>
Prefs
   1// SPDX-License-Identifier:  GPL-2.0
   2// (C) 2017-2018 Synopsys, Inc. (www.synopsys.com)
   3
   4/*
   5 * Synopsys DesignWare AXI DMA Controller driver.
   6 *
   7 * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
   8 */
   9
  10#include <linux/bitops.h>
  11#include <linux/delay.h>
  12#include <linux/device.h>
  13#include <linux/dmaengine.h>
  14#include <linux/dmapool.h>
  15#include <linux/err.h>
  16#include <linux/interrupt.h>
  17#include <linux/io.h>
  18#include <linux/kernel.h>
  19#include <linux/module.h>
  20#include <linux/of.h>
  21#include <linux/platform_device.h>
  22#include <linux/pm_runtime.h>
  23#include <linux/property.h>
  24#include <linux/types.h>
  25
  26#include "dw-axi-dmac.h"
  27#include "../dmaengine.h"
  28#include "../virt-dma.h"
  29
  30/*
  31 * The set of bus widths supported by the DMA controller. DW AXI DMAC supports
  32 * master data bus width up to 512 bits (for both AXI master interfaces), but
  33 * it depends on IP block configurarion.
  34 */
  35#define AXI_DMA_BUSWIDTHS                 \
  36        (DMA_SLAVE_BUSWIDTH_1_BYTE      | \
  37        DMA_SLAVE_BUSWIDTH_2_BYTES      | \
  38        DMA_SLAVE_BUSWIDTH_4_BYTES      | \
  39        DMA_SLAVE_BUSWIDTH_8_BYTES      | \
  40        DMA_SLAVE_BUSWIDTH_16_BYTES     | \
  41        DMA_SLAVE_BUSWIDTH_32_BYTES     | \
  42        DMA_SLAVE_BUSWIDTH_64_BYTES)
  43
  44static inline void
  45axi_dma_iowrite32(struct axi_dma_chip *chip, u32 reg, u32 val)
  46{
  47        iowrite32(val, chip->regs + reg);
  48}
  49
  50static inline u32 axi_dma_ioread32(struct axi_dma_chip *chip, u32 reg)
  51{
  52        return ioread32(chip->regs + reg);
  53}
  54
  55static inline void
  56axi_chan_iowrite32(struct axi_dma_chan *chan, u32 reg, u32 val)
  57{
  58        iowrite32(val, chan->chan_regs + reg);
  59}
  60
  61static inline u32 axi_chan_ioread32(struct axi_dma_chan *chan, u32 reg)
  62{
  63        return ioread32(chan->chan_regs + reg);
  64}
  65
  66static inline void
  67axi_chan_iowrite64(struct axi_dma_chan *chan, u32 reg, u64 val)
  68{
  69        /*
  70         * We split one 64 bit write for two 32 bit write as some HW doesn't
  71         * support 64 bit access.
  72         */
  73        iowrite32(lower_32_bits(val), chan->chan_regs + reg);
  74        iowrite32(upper_32_bits(val), chan->chan_regs + reg + 4);
  75}
  76
  77static inline void axi_dma_disable(struct axi_dma_chip *chip)
  78{
  79        u32 val;
  80
  81        val = axi_dma_ioread32(chip, DMAC_CFG);
  82        val &= ~DMAC_EN_MASK;
  83        axi_dma_iowrite32(chip, DMAC_CFG, val);
  84}
  85
  86static inline void axi_dma_enable(struct axi_dma_chip *chip)
  87{
  88        u32 val;
  89
  90        val = axi_dma_ioread32(chip, DMAC_CFG);
  91        val |= DMAC_EN_MASK;
  92        axi_dma_iowrite32(chip, DMAC_CFG, val);
  93}
  94
  95static inline void axi_dma_irq_disable(struct axi_dma_chip *chip)
  96{
  97        u32 val;
  98
  99        val = axi_dma_ioread32(chip, DMAC_CFG);
 100        val &= ~INT_EN_MASK;
 101        axi_dma_iowrite32(chip, DMAC_CFG, val);
 102}
 103
 104static inline void axi_dma_irq_enable(struct axi_dma_chip *chip)
 105{
 106        u32 val;
 107
 108        val = axi_dma_ioread32(chip, DMAC_CFG);
 109        val |= INT_EN_MASK;
 110        axi_dma_iowrite32(chip, DMAC_CFG, val);
 111}
 112
 113static inline void axi_chan_irq_disable(struct axi_dma_chan *chan, u32 irq_mask)
 114{
 115        u32 val;
 116
 117        if (likely(irq_mask == DWAXIDMAC_IRQ_ALL)) {
 118                axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, DWAXIDMAC_IRQ_NONE);
 119        } else {
 120                val = axi_chan_ioread32(chan, CH_INTSTATUS_ENA);
 121                val &= ~irq_mask;
 122                axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, val);
 123        }
 124}
 125
 126static inline void axi_chan_irq_set(struct axi_dma_chan *chan, u32 irq_mask)
 127{
 128        axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, irq_mask);
 129}
 130
 131static inline void axi_chan_irq_sig_set(struct axi_dma_chan *chan, u32 irq_mask)
 132{
 133        axi_chan_iowrite32(chan, CH_INTSIGNAL_ENA, irq_mask);
 134}
 135
 136static inline void axi_chan_irq_clear(struct axi_dma_chan *chan, u32 irq_mask)
 137{
 138        axi_chan_iowrite32(chan, CH_INTCLEAR, irq_mask);
 139}
 140
 141static inline u32 axi_chan_irq_read(struct axi_dma_chan *chan)
 142{
 143        return axi_chan_ioread32(chan, CH_INTSTATUS);
 144}
 145
 146static inline void axi_chan_disable(struct axi_dma_chan *chan)
 147{
 148        u32 val;
 149
 150        val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
 151        val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);
 152        val |=   BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
 153        axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
 154}
 155
 156static inline void axi_chan_enable(struct axi_dma_chan *chan)
 157{
 158        u32 val;
 159
 160        val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
 161        val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
 162               BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
 163        axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
 164}
 165
 166static inline bool axi_chan_is_hw_enable(struct axi_dma_chan *chan)
 167{
 168        u32 val;
 169
 170        val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
 171
 172        return !!(val & (BIT(chan->id) << DMAC_CHAN_EN_SHIFT));
 173}
 174
 175static void axi_dma_hw_init(struct axi_dma_chip *chip)
 176{
 177        u32 i;
 178
 179        for (i = 0; i < chip->dw->hdata->nr_channels; i++) {
 180                axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
 181                axi_chan_disable(&chip->dw->chan[i]);
 182        }
 183}
 184
 185static u32 axi_chan_get_xfer_width(struct axi_dma_chan *chan, dma_addr_t src,
 186                                   dma_addr_t dst, size_t len)
 187{
 188        u32 max_width = chan->chip->dw->hdata->m_data_width;
 189
 190        return __ffs(src | dst | len | BIT(max_width));
 191}
 192
 193static inline const char *axi_chan_name(struct axi_dma_chan *chan)
 194{
 195        return dma_chan_name(&chan->vc.chan);
 196}
 197
 198static struct axi_dma_desc *axi_desc_get(struct axi_dma_chan *chan)
 199{
 200        struct dw_axi_dma *dw = chan->chip->dw;
 201        struct axi_dma_desc *desc;
 202        dma_addr_t phys;
 203
 204        desc = dma_pool_zalloc(dw->desc_pool, GFP_NOWAIT, &phys);
 205        if (unlikely(!desc)) {
 206                dev_err(chan2dev(chan), "%s: not enough descriptors available\n",
 207                        axi_chan_name(chan));
 208                return NULL;
 209        }
 210
 211        atomic_inc(&chan->descs_allocated);
 212        INIT_LIST_HEAD(&desc->xfer_list);
 213        desc->vd.tx.phys = phys;
 214        desc->chan = chan;
 215
 216        return desc;
 217}
 218
 219static void axi_desc_put(struct axi_dma_desc *desc)
 220{
 221        struct axi_dma_chan *chan = desc->chan;
 222        struct dw_axi_dma *dw = chan->chip->dw;
 223        struct axi_dma_desc *child, *_next;
 224        unsigned int descs_put = 0;
 225
 226        list_for_each_entry_safe(child, _next, &desc->xfer_list, xfer_list) {
 227                list_del(&child->xfer_list);
 228                dma_pool_free(dw->desc_pool, child, child->vd.tx.phys);
 229                descs_put++;
 230        }
 231
 232        dma_pool_free(dw->desc_pool, desc, desc->vd.tx.phys);
 233        descs_put++;
 234
 235        atomic_sub(descs_put, &chan->descs_allocated);
 236        dev_vdbg(chan2dev(chan), "%s: %d descs put, %d still allocated\n",
 237                axi_chan_name(chan), descs_put,
 238                atomic_read(&chan->descs_allocated));
 239}
 240
 241static void vchan_desc_put(struct virt_dma_desc *vdesc)
 242{
 243        axi_desc_put(vd_to_axi_desc(vdesc));
 244}
 245
 246static enum dma_status
 247dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
 248                  struct dma_tx_state *txstate)
 249{
 250        struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
 251        enum dma_status ret;
 252
 253        ret = dma_cookie_status(dchan, cookie, txstate);
 254
 255        if (chan->is_paused && ret == DMA_IN_PROGRESS)
 256                ret = DMA_PAUSED;
 257
 258        return ret;
 259}
 260
 261static void write_desc_llp(struct axi_dma_desc *desc, dma_addr_t adr)
 262{
 263        desc->lli.llp = cpu_to_le64(adr);
 264}
 265
 266static void write_chan_llp(struct axi_dma_chan *chan, dma_addr_t adr)
 267{
 268        axi_chan_iowrite64(chan, CH_LLP, adr);
 269}
 270
 271/* Called in chan locked context */
 272static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
 273                                      struct axi_dma_desc *first)
 274{
 275        u32 priority = chan->chip->dw->hdata->priority[chan->id];
 276        u32 reg, irq_mask;
 277        u8 lms = 0; /* Select AXI0 master for LLI fetching */
 278
 279        if (unlikely(axi_chan_is_hw_enable(chan))) {
 280                dev_err(chan2dev(chan), "%s is non-idle!\n",
 281                        axi_chan_name(chan));
 282
 283                return;
 284        }
 285
 286        axi_dma_enable(chan->chip);
 287
 288        reg = (DWAXIDMAC_MBLK_TYPE_LL << CH_CFG_L_DST_MULTBLK_TYPE_POS |
 289               DWAXIDMAC_MBLK_TYPE_LL << CH_CFG_L_SRC_MULTBLK_TYPE_POS);
 290        axi_chan_iowrite32(chan, CH_CFG_L, reg);
 291
 292        reg = (DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC << CH_CFG_H_TT_FC_POS |
 293               priority << CH_CFG_H_PRIORITY_POS |
 294               DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_DST_POS |
 295               DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_SRC_POS);
 296        axi_chan_iowrite32(chan, CH_CFG_H, reg);
 297
 298        write_chan_llp(chan, first->vd.tx.phys | lms);
 299
 300        irq_mask = DWAXIDMAC_IRQ_DMA_TRF | DWAXIDMAC_IRQ_ALL_ERR;
 301        axi_chan_irq_sig_set(chan, irq_mask);
 302
 303        /* Generate 'suspend' status but don't generate interrupt */
 304        irq_mask |= DWAXIDMAC_IRQ_SUSPENDED;
 305        axi_chan_irq_set(chan, irq_mask);
 306
 307        axi_chan_enable(chan);
 308}
 309
 310static void axi_chan_start_first_queued(struct axi_dma_chan *chan)
 311{
 312        struct axi_dma_desc *desc;
 313        struct virt_dma_desc *vd;
 314
 315        vd = vchan_next_desc(&chan->vc);
 316        if (!vd)
 317                return;
 318
 319        desc = vd_to_axi_desc(vd);
 320        dev_vdbg(chan2dev(chan), "%s: started %u\n", axi_chan_name(chan),
 321                vd->tx.cookie);
 322        axi_chan_block_xfer_start(chan, desc);
 323}
 324
 325static void dma_chan_issue_pending(struct dma_chan *dchan)
 326{
 327        struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
 328        unsigned long flags;
 329
 330        spin_lock_irqsave(&chan->vc.lock, flags);
 331        if (vchan_issue_pending(&chan->vc))
 332                axi_chan_start_first_queued(chan);
 333        spin_unlock_irqrestore(&chan->vc.lock, flags);
 334}
 335
 336static int dma_chan_alloc_chan_resources(struct dma_chan *dchan)
 337{
 338        struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
 339
 340        /* ASSERT: channel is idle */
 341        if (axi_chan_is_hw_enable(chan)) {
 342                dev_err(chan2dev(chan), "%s is non-idle!\n",
 343                        axi_chan_name(chan));
 344                return -EBUSY;
 345        }
 346
 347        dev_vdbg(dchan2dev(dchan), "%s: allocating\n", axi_chan_name(chan));
 348
 349        pm_runtime_get(chan->chip->dev);
 350
 351        return 0;
 352}
 353
 354static void dma_chan_free_chan_resources(struct dma_chan *dchan)
 355{
 356        struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
 357
 358        /* ASSERT: channel is idle */
 359        if (axi_chan_is_hw_enable(chan))
 360                dev_err(dchan2dev(dchan), "%s is non-idle!\n",
 361                        axi_chan_name(chan));
 362
 363        axi_chan_disable(chan);
 364        axi_chan_irq_disable(chan, DWAXIDMAC_IRQ_ALL);
 365
 366        vchan_free_chan_resources(&chan->vc);
 367
 368        dev_vdbg(dchan2dev(dchan),
 369                 "%s: free resources, descriptor still allocated: %u\n",
 370                 axi_chan_name(chan), atomic_read(&chan->descs_allocated));
 371
 372        pm_runtime_put(chan->chip->dev);
 373}
 374
 375/*
 376 * If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI
 377 * as 1, it understands that the current block is the final block in the
 378 * transfer and completes the DMA transfer operation at the end of current
 379 * block transfer.
 380 */
 381static void set_desc_last(struct axi_dma_desc *desc)
 382{
 383        u32 val;
 384
 385        val = le32_to_cpu(desc->lli.ctl_hi);
 386        val |= CH_CTL_H_LLI_LAST;
 387        desc->lli.ctl_hi = cpu_to_le32(val);
 388}
 389
 390static void write_desc_sar(struct axi_dma_desc *desc, dma_addr_t adr)
 391{
 392        desc->lli.sar = cpu_to_le64(adr);
 393}
 394
 395static void write_desc_dar(struct axi_dma_desc *desc, dma_addr_t adr)
 396{
 397        desc->lli.dar = cpu_to_le64(adr);
 398}
 399
 400static void set_desc_src_master(struct axi_dma_desc *desc)
 401{
 402        u32 val;
 403
 404        /* Select AXI0 for source master */
 405        val = le32_to_cpu(desc->lli.ctl_lo);
 406        val &= ~CH_CTL_L_SRC_MAST;
 407        desc->lli.ctl_lo = cpu_to_le32(val);
 408}
 409
 410static void set_desc_dest_master(struct axi_dma_desc *desc)
 411{
 412        u32 val;
 413
 414        /* Select AXI1 for source master if available */
 415        val = le32_to_cpu(desc->lli.ctl_lo);
 416        if (desc->chan->chip->dw->hdata->nr_masters > 1)
 417                val |= CH_CTL_L_DST_MAST;
 418        else
 419                val &= ~CH_CTL_L_DST_MAST;
 420
 421        desc->lli.ctl_lo = cpu_to_le32(val);
 422}
 423
 424static struct dma_async_tx_descriptor *
 425dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
 426                         dma_addr_t src_adr, size_t len, unsigned long flags)
 427{
 428        struct axi_dma_desc *first = NULL, *desc = NULL, *prev = NULL;
 429        struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
 430        size_t block_ts, max_block_ts, xfer_len;
 431        u32 xfer_width, reg;
 432        u8 lms = 0; /* Select AXI0 master for LLI fetching */
 433
 434        dev_dbg(chan2dev(chan), "%s: memcpy: src: %pad dst: %pad length: %zd flags: %#lx",
 435                axi_chan_name(chan), &src_adr, &dst_adr, len, flags);
 436
 437        max_block_ts = chan->chip->dw->hdata->block_size[chan->id];
 438
 439        while (len) {
 440                xfer_len = len;
 441
 442                /*
 443                 * Take care for the alignment.
 444                 * Actually source and destination widths can be different, but
 445                 * make them same to be simpler.
 446                 */
 447                xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, xfer_len);
 448
 449                /*
 450                 * block_ts indicates the total number of data of width
 451                 * to be transferred in a DMA block transfer.
 452                 * BLOCK_TS register should be set to block_ts - 1
 453                 */
 454                block_ts = xfer_len >> xfer_width;
 455                if (block_ts > max_block_ts) {
 456                        block_ts = max_block_ts;
 457                        xfer_len = max_block_ts << xfer_width;
 458                }
 459
 460                desc = axi_desc_get(chan);
 461                if (unlikely(!desc))
 462                        goto err_desc_get;
 463
 464                write_desc_sar(desc, src_adr);
 465                write_desc_dar(desc, dst_adr);
 466                desc->lli.block_ts_lo = cpu_to_le32(block_ts - 1);
 467
 468                reg = CH_CTL_H_LLI_VALID;
 469                if (chan->chip->dw->hdata->restrict_axi_burst_len) {
 470                        u32 burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
 471
 472                        reg |= (CH_CTL_H_ARLEN_EN |
 473                                burst_len << CH_CTL_H_ARLEN_POS |
 474                                CH_CTL_H_AWLEN_EN |
 475                                burst_len << CH_CTL_H_AWLEN_POS);
 476                }
 477                desc->lli.ctl_hi = cpu_to_le32(reg);
 478
 479                reg = (DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
 480                       DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS |
 481                       xfer_width << CH_CTL_L_DST_WIDTH_POS |
 482                       xfer_width << CH_CTL_L_SRC_WIDTH_POS |
 483                       DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
 484                       DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS);
 485                desc->lli.ctl_lo = cpu_to_le32(reg);
 486
 487                set_desc_src_master(desc);
 488                set_desc_dest_master(desc);
 489
 490                /* Manage transfer list (xfer_list) */
 491                if (!first) {
 492                        first = desc;
 493                } else {
 494                        list_add_tail(&desc->xfer_list, &first->xfer_list);
 495                        write_desc_llp(prev, desc->vd.tx.phys | lms);
 496                }
 497                prev = desc;
 498
 499                /* update the length and addresses for the next loop cycle */
 500                len -= xfer_len;
 501                dst_adr += xfer_len;
 502                src_adr += xfer_len;
 503        }
 504
 505        /* Total len of src/dest sg == 0, so no descriptor were allocated */
 506        if (unlikely(!first))
 507                return NULL;
 508
 509        /* Set end-of-link to the last link descriptor of list */
 510        set_desc_last(desc);
 511
 512        return vchan_tx_prep(&chan->vc, &first->vd, flags);
 513
 514err_desc_get:
 515        axi_desc_put(first);
 516        return NULL;
 517}
 518
 519static void axi_chan_dump_lli(struct axi_dma_chan *chan,
 520                              struct axi_dma_desc *desc)
 521{
 522        dev_err(dchan2dev(&chan->vc.chan),
 523                "SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x",
 524                le64_to_cpu(desc->lli.sar),
 525                le64_to_cpu(desc->lli.dar),
 526                le64_to_cpu(desc->lli.llp),
 527                le32_to_cpu(desc->lli.block_ts_lo),
 528                le32_to_cpu(desc->lli.ctl_hi),
 529                le32_to_cpu(desc->lli.ctl_lo));
 530}
 531
 532static void axi_chan_list_dump_lli(struct axi_dma_chan *chan,
 533                                   struct axi_dma_desc *desc_head)
 534{
 535        struct axi_dma_desc *desc;
 536
 537        axi_chan_dump_lli(chan, desc_head);
 538        list_for_each_entry(desc, &desc_head->xfer_list, xfer_list)
 539                axi_chan_dump_lli(chan, desc);
 540}
 541
 542static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
 543{
 544        struct virt_dma_desc *vd;
 545        unsigned long flags;
 546
 547        spin_lock_irqsave(&chan->vc.lock, flags);
 548
 549        axi_chan_disable(chan);
 550
 551        /* The bad descriptor currently is in the head of vc list */
 552        vd = vchan_next_desc(&chan->vc);
 553        /* Remove the completed descriptor from issued list */
 554        list_del(&vd->node);
 555
 556        /* WARN about bad descriptor */
 557        dev_err(chan2dev(chan),
 558                "Bad descriptor submitted for %s, cookie: %d, irq: 0x%08x\n",
 559                axi_chan_name(chan), vd->tx.cookie, status);
 560        axi_chan_list_dump_lli(chan, vd_to_axi_desc(vd));
 561
 562        vchan_cookie_complete(vd);
 563
 564        /* Try to restart the controller */
 565        axi_chan_start_first_queued(chan);
 566
 567        spin_unlock_irqrestore(&chan->vc.lock, flags);
 568}
 569
 570static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
 571{
 572        struct virt_dma_desc *vd;
 573        unsigned long flags;
 574
 575        spin_lock_irqsave(&chan->vc.lock, flags);
 576        if (unlikely(axi_chan_is_hw_enable(chan))) {
 577                dev_err(chan2dev(chan), "BUG: %s caught DWAXIDMAC_IRQ_DMA_TRF, but channel not idle!\n",
 578                        axi_chan_name(chan));
 579                axi_chan_disable(chan);
 580        }
 581
 582        /* The completed descriptor currently is in the head of vc list */
 583        vd = vchan_next_desc(&chan->vc);
 584        /* Remove the completed descriptor from issued list before completing */
 585        list_del(&vd->node);
 586        vchan_cookie_complete(vd);
 587
 588        /* Submit queued descriptors after processing the completed ones */
 589        axi_chan_start_first_queued(chan);
 590
 591        spin_unlock_irqrestore(&chan->vc.lock, flags);
 592}
 593
 594static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id)
 595{
 596        struct axi_dma_chip *chip = dev_id;
 597        struct dw_axi_dma *dw = chip->dw;
 598        struct axi_dma_chan *chan;
 599
 600        u32 status, i;
 601
 602        /* Disable DMAC inerrupts. We'll enable them after processing chanels */
 603        axi_dma_irq_disable(chip);
 604
 605        /* Poll, clear and process every chanel interrupt status */
 606        for (i = 0; i < dw->hdata->nr_channels; i++) {
 607                chan = &dw->chan[i];
 608                status = axi_chan_irq_read(chan);
 609                axi_chan_irq_clear(chan, status);
 610
 611                dev_vdbg(chip->dev, "%s %u IRQ status: 0x%08x\n",
 612                        axi_chan_name(chan), i, status);
 613
 614                if (status & DWAXIDMAC_IRQ_ALL_ERR)
 615                        axi_chan_handle_err(chan, status);
 616                else if (status & DWAXIDMAC_IRQ_DMA_TRF)
 617                        axi_chan_block_xfer_complete(chan);
 618        }
 619
 620        /* Re-enable interrupts */
 621        axi_dma_irq_enable(chip);
 622
 623        return IRQ_HANDLED;
 624}
 625
 626static int dma_chan_terminate_all(struct dma_chan *dchan)
 627{
 628        struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
 629        unsigned long flags;
 630        LIST_HEAD(head);
 631
 632        spin_lock_irqsave(&chan->vc.lock, flags);
 633
 634        axi_chan_disable(chan);
 635
 636        vchan_get_all_descriptors(&chan->vc, &head);
 637
 638        /*
 639         * As vchan_dma_desc_free_list can access to desc_allocated list
 640         * we need to call it in vc.lock context.
 641         */
 642        vchan_dma_desc_free_list(&chan->vc, &head);
 643
 644        spin_unlock_irqrestore(&chan->vc.lock, flags);
 645
 646        dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan));
 647
 648        return 0;
 649}
 650
 651static int dma_chan_pause(struct dma_chan *dchan)
 652{
 653        struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
 654        unsigned long flags;
 655        unsigned int timeout = 20; /* timeout iterations */
 656        u32 val;
 657
 658        spin_lock_irqsave(&chan->vc.lock, flags);
 659
 660        val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
 661        val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT |
 662               BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT;
 663        axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
 664
 665        do  {
 666                if (axi_chan_irq_read(chan) & DWAXIDMAC_IRQ_SUSPENDED)
 667                        break;
 668
 669                udelay(2);
 670        } while (--timeout);
 671
 672        axi_chan_irq_clear(chan, DWAXIDMAC_IRQ_SUSPENDED);
 673
 674        chan->is_paused = true;
 675
 676        spin_unlock_irqrestore(&chan->vc.lock, flags);
 677
 678        return timeout ? 0 : -EAGAIN;
 679}
 680
 681/* Called in chan locked context */
 682static inline void axi_chan_resume(struct axi_dma_chan *chan)
 683{
 684        u32 val;
 685
 686        val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
 687        val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT);
 688        val |=  (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);
 689        axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
 690
 691        chan->is_paused = false;
 692}
 693
 694static int dma_chan_resume(struct dma_chan *dchan)
 695{
 696        struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
 697        unsigned long flags;
 698
 699        spin_lock_irqsave(&chan->vc.lock, flags);
 700
 701        if (chan->is_paused)
 702                axi_chan_resume(chan);
 703
 704        spin_unlock_irqrestore(&chan->vc.lock, flags);
 705
 706        return 0;
 707}
 708
 709static int axi_dma_suspend(struct axi_dma_chip *chip)
 710{
 711        axi_dma_irq_disable(chip);
 712        axi_dma_disable(chip);
 713
 714        clk_disable_unprepare(chip->core_clk);
 715        clk_disable_unprepare(chip->cfgr_clk);
 716
 717        return 0;
 718}
 719
 720static int axi_dma_resume(struct axi_dma_chip *chip)
 721{
 722        int ret;
 723
 724        ret = clk_prepare_enable(chip->cfgr_clk);
 725        if (ret < 0)
 726                return ret;
 727
 728        ret = clk_prepare_enable(chip->core_clk);
 729        if (ret < 0)
 730                return ret;
 731
 732        axi_dma_enable(chip);
 733        axi_dma_irq_enable(chip);
 734
 735        return 0;
 736}
 737
 738static int __maybe_unused axi_dma_runtime_suspend(struct device *dev)
 739{
 740        struct axi_dma_chip *chip = dev_get_drvdata(dev);
 741
 742        return axi_dma_suspend(chip);
 743}
 744
 745static int __maybe_unused axi_dma_runtime_resume(struct device *dev)
 746{
 747        struct axi_dma_chip *chip = dev_get_drvdata(dev);
 748
 749        return axi_dma_resume(chip);
 750}
 751
 752static int parse_device_properties(struct axi_dma_chip *chip)
 753{
 754        struct device *dev = chip->dev;
 755        u32 tmp, carr[DMAC_MAX_CHANNELS];
 756        int ret;
 757
 758        ret = device_property_read_u32(dev, "dma-channels", &tmp);
 759        if (ret)
 760                return ret;
 761        if (tmp == 0 || tmp > DMAC_MAX_CHANNELS)
 762                return -EINVAL;
 763
 764        chip->dw->hdata->nr_channels = tmp;
 765
 766        ret = device_property_read_u32(dev, "snps,dma-masters", &tmp);
 767        if (ret)
 768                return ret;
 769        if (tmp == 0 || tmp > DMAC_MAX_MASTERS)
 770                return -EINVAL;
 771
 772        chip->dw->hdata->nr_masters = tmp;
 773
 774        ret = device_property_read_u32(dev, "snps,data-width", &tmp);
 775        if (ret)
 776                return ret;
 777        if (tmp > DWAXIDMAC_TRANS_WIDTH_MAX)
 778                return -EINVAL;
 779
 780        chip->dw->hdata->m_data_width = tmp;
 781
 782        ret = device_property_read_u32_array(dev, "snps,block-size", carr,
 783                                             chip->dw->hdata->nr_channels);
 784        if (ret)
 785                return ret;
 786        for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
 787                if (carr[tmp] == 0 || carr[tmp] > DMAC_MAX_BLK_SIZE)
 788                        return -EINVAL;
 789
 790                chip->dw->hdata->block_size[tmp] = carr[tmp];
 791        }
 792
 793        ret = device_property_read_u32_array(dev, "snps,priority", carr,
 794                                             chip->dw->hdata->nr_channels);
 795        if (ret)
 796                return ret;
 797        /* Priority value must be programmed within [0:nr_channels-1] range */
 798        for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
 799                if (carr[tmp] >= chip->dw->hdata->nr_channels)
 800                        return -EINVAL;
 801
 802                chip->dw->hdata->priority[tmp] = carr[tmp];
 803        }
 804
 805        /* axi-max-burst-len is optional property */
 806        ret = device_property_read_u32(dev, "snps,axi-max-burst-len", &tmp);
 807        if (!ret) {
 808                if (tmp > DWAXIDMAC_ARWLEN_MAX + 1)
 809                        return -EINVAL;
 810                if (tmp < DWAXIDMAC_ARWLEN_MIN + 1)
 811                        return -EINVAL;
 812
 813                chip->dw->hdata->restrict_axi_burst_len = true;
 814                chip->dw->hdata->axi_rw_burst_len = tmp - 1;
 815        }
 816
 817        return 0;
 818}
 819
 820static int dw_probe(struct platform_device *pdev)
 821{
 822        struct axi_dma_chip *chip;
 823        struct resource *mem;
 824        struct dw_axi_dma *dw;
 825        struct dw_axi_dma_hcfg *hdata;
 826        u32 i;
 827        int ret;
 828
 829        chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
 830        if (!chip)
 831                return -ENOMEM;
 832
 833        dw = devm_kzalloc(&pdev->dev, sizeof(*dw), GFP_KERNEL);
 834        if (!dw)
 835                return -ENOMEM;
 836
 837        hdata = devm_kzalloc(&pdev->dev, sizeof(*hdata), GFP_KERNEL);
 838        if (!hdata)
 839                return -ENOMEM;
 840
 841        chip->dw = dw;
 842        chip->dev = &pdev->dev;
 843        chip->dw->hdata = hdata;
 844
 845        chip->irq = platform_get_irq(pdev, 0);
 846        if (chip->irq < 0)
 847                return chip->irq;
 848
 849        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 850        chip->regs = devm_ioremap_resource(chip->dev, mem);
 851        if (IS_ERR(chip->regs))
 852                return PTR_ERR(chip->regs);
 853
 854        chip->core_clk = devm_clk_get(chip->dev, "core-clk");
 855        if (IS_ERR(chip->core_clk))
 856                return PTR_ERR(chip->core_clk);
 857
 858        chip->cfgr_clk = devm_clk_get(chip->dev, "cfgr-clk");
 859        if (IS_ERR(chip->cfgr_clk))
 860                return PTR_ERR(chip->cfgr_clk);
 861
 862        ret = parse_device_properties(chip);
 863        if (ret)
 864                return ret;
 865
 866        dw->chan = devm_kcalloc(chip->dev, hdata->nr_channels,
 867                                sizeof(*dw->chan), GFP_KERNEL);
 868        if (!dw->chan)
 869                return -ENOMEM;
 870
 871        ret = devm_request_irq(chip->dev, chip->irq, dw_axi_dma_interrupt,
 872                               IRQF_SHARED, KBUILD_MODNAME, chip);
 873        if (ret)
 874                return ret;
 875
 876        /* Lli address must be aligned to a 64-byte boundary */
 877        dw->desc_pool = dmam_pool_create(KBUILD_MODNAME, chip->dev,
 878                                         sizeof(struct axi_dma_desc), 64, 0);
 879        if (!dw->desc_pool) {
 880                dev_err(chip->dev, "No memory for descriptors dma pool\n");
 881                return -ENOMEM;
 882        }
 883
 884        INIT_LIST_HEAD(&dw->dma.channels);
 885        for (i = 0; i < hdata->nr_channels; i++) {
 886                struct axi_dma_chan *chan = &dw->chan[i];
 887
 888                chan->chip = chip;
 889                chan->id = i;
 890                chan->chan_regs = chip->regs + COMMON_REG_LEN + i * CHAN_REG_LEN;
 891                atomic_set(&chan->descs_allocated, 0);
 892
 893                chan->vc.desc_free = vchan_desc_put;
 894                vchan_init(&chan->vc, &dw->dma);
 895        }
 896
 897        /* Set capabilities */
 898        dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
 899
 900        /* DMA capabilities */
 901        dw->dma.chancnt = hdata->nr_channels;
 902        dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS;
 903        dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS;
 904        dw->dma.directions = BIT(DMA_MEM_TO_MEM);
 905        dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
 906
 907        dw->dma.dev = chip->dev;
 908        dw->dma.device_tx_status = dma_chan_tx_status;
 909        dw->dma.device_issue_pending = dma_chan_issue_pending;
 910        dw->dma.device_terminate_all = dma_chan_terminate_all;
 911        dw->dma.device_pause = dma_chan_pause;
 912        dw->dma.device_resume = dma_chan_resume;
 913
 914        dw->dma.device_alloc_chan_resources = dma_chan_alloc_chan_resources;
 915        dw->dma.device_free_chan_resources = dma_chan_free_chan_resources;
 916
 917        dw->dma.device_prep_dma_memcpy = dma_chan_prep_dma_memcpy;
 918
 919        platform_set_drvdata(pdev, chip);
 920
 921        pm_runtime_enable(chip->dev);
 922
 923        /*
 924         * We can't just call pm_runtime_get here instead of
 925         * pm_runtime_get_noresume + axi_dma_resume because we need
 926         * driver to work also without Runtime PM.
 927         */
 928        pm_runtime_get_noresume(chip->dev);
 929        ret = axi_dma_resume(chip);
 930        if (ret < 0)
 931                goto err_pm_disable;
 932
 933        axi_dma_hw_init(chip);
 934
 935        pm_runtime_put(chip->dev);
 936
 937        ret = dma_async_device_register(&dw->dma);
 938        if (ret)
 939                goto err_pm_disable;
 940
 941        dev_info(chip->dev, "DesignWare AXI DMA Controller, %d channels\n",
 942                 dw->hdata->nr_channels);
 943
 944        return 0;
 945
 946err_pm_disable:
 947        pm_runtime_disable(chip->dev);
 948
 949        return ret;
 950}
 951
 952static int dw_remove(struct platform_device *pdev)
 953{
 954        struct axi_dma_chip *chip = platform_get_drvdata(pdev);
 955        struct dw_axi_dma *dw = chip->dw;
 956        struct axi_dma_chan *chan, *_chan;
 957        u32 i;
 958
 959        /* Enable clk before accessing to registers */
 960        clk_prepare_enable(chip->cfgr_clk);
 961        clk_prepare_enable(chip->core_clk);
 962        axi_dma_irq_disable(chip);
 963        for (i = 0; i < dw->hdata->nr_channels; i++) {
 964                axi_chan_disable(&chip->dw->chan[i]);
 965                axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
 966        }
 967        axi_dma_disable(chip);
 968
 969        pm_runtime_disable(chip->dev);
 970        axi_dma_suspend(chip);
 971
 972        devm_free_irq(chip->dev, chip->irq, chip);
 973
 974        list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
 975                        vc.chan.device_node) {
 976                list_del(&chan->vc.chan.device_node);
 977                tasklet_kill(&chan->vc.task);
 978        }
 979
 980        dma_async_device_unregister(&dw->dma);
 981
 982        return 0;
 983}
 984
 985static const struct dev_pm_ops dw_axi_dma_pm_ops = {
 986        SET_RUNTIME_PM_OPS(axi_dma_runtime_suspend, axi_dma_runtime_resume, NULL)
 987};
 988
 989static const struct of_device_id dw_dma_of_id_table[] = {
 990        { .compatible = "snps,axi-dma-1.01a" },
 991        {}
 992};
 993MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
 994
 995static struct platform_driver dw_driver = {
 996        .probe          = dw_probe,
 997        .remove         = dw_remove,
 998        .driver = {
 999                .name   = KBUILD_MODNAME,
1000                .of_match_table = of_match_ptr(dw_dma_of_id_table),
1001                .pm = &dw_axi_dma_pm_ops,
1002        },
1003};
1004module_platform_driver(dw_driver);
1005
1006MODULE_LICENSE("GPL v2");
1007MODULE_DESCRIPTION("Synopsys DesignWare AXI DMA Controller platform driver");
1008MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>");
1009